summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c10
-rw-r--r--drivers/gpu/drm/bochs/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_crtc.c697
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c193
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h38
-rw-r--r--drivers/gpu/drm/drm_edid.c30
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c4
-rw-r--r--drivers/gpu/drm/drm_gem.c63
-rw-r--r--drivers/gpu/drm/drm_mm.c211
-rw-r--r--drivers/gpu/drm/drm_modes.c346
-rw-r--r--drivers/gpu/drm/drm_prime.c110
-rw-r--r--drivers/gpu/drm/i915/Makefile81
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c485
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c79
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c24
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c42
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h282
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c92
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c52
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c621
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c272
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c208
-rw-r--r--drivers/gpu/drm/i915/i915_params.c5
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h69
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c4
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h174
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c57
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c371
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c43
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h21
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c18
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c49
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c40
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c581
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h32
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c227
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c16
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c10
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c14
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c15
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c26
-rw-r--r--drivers/gpu/drm/radeon/evergreen_smc.h2
-rw-r--r--drivers/gpu/drm/radeon/ni.c3
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r520.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c5
-rw-r--r--drivers/gpu/drm/radeon/si.c3
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c18
79 files changed, 4416 insertions, 1550 deletions
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index acf3a36c9ebc..32982da82694 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -68,15 +68,7 @@ void __armada_drm_queue_unref_work(struct drm_device *dev,
{
struct armada_private *priv = dev->dev_private;
- /*
- * Yes, we really must jump through these hoops just to store a
- * _pointer_ to something into the kfifo. This is utterly insane
- * and idiotic, because it kfifo requires the _data_ pointed to by
- * the pointer const, not the pointer itself. Not only that, but
- * you have to pass a pointer _to_ the pointer you want stored.
- */
- const struct drm_framebuffer *silly_api_alert = fb;
- WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
+ WARN_ON(!kfifo_put(&priv->fb_unref, fb));
schedule_work(&priv->fb_unref_work);
}
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index c8fcf12019f0..5f8b0c2b9a44 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -2,6 +2,7 @@ config DRM_BOCHS
tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
depends on DRM && PCI
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b1c2b278005c..16ca28ed5ee8 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -38,12 +38,15 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+#include "drm_crtc_internal.h"
+
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
*
* This function takes all modeset locks, suitable where a more fine-grained
- * scheme isn't (yet) implemented.
+ * scheme isn't (yet) implemented. Locks must be dropped with
+ * drm_modeset_unlock_all.
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
@@ -59,6 +62,8 @@ EXPORT_SYMBOL(drm_modeset_lock_all);
/**
* drm_modeset_unlock_all - drop all modeset locks
* @dev: device
+ *
+ * This function drop all modeset locks taken by drm_modeset_lock_all.
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
@@ -74,6 +79,8 @@ EXPORT_SYMBOL(drm_modeset_unlock_all);
/**
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
* @dev: device
+ *
+ * Useful as a debug assert.
*/
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
{
@@ -241,6 +248,15 @@ void drm_connector_ida_destroy(void)
ida_destroy(&drm_connector_enum_list[i].ida);
}
+/**
+ * drm_get_encoder_name - return a string for encoder
+ * @encoder: encoder to compute name of
+ *
+ * Note that the buffer used by this function is globally shared and owned by
+ * the function itself.
+ *
+ * FIXME: This isn't really multithreading safe.
+ */
const char *drm_get_encoder_name(const struct drm_encoder *encoder)
{
static char buf[32];
@@ -252,6 +268,15 @@ const char *drm_get_encoder_name(const struct drm_encoder *encoder)
}
EXPORT_SYMBOL(drm_get_encoder_name);
+/**
+ * drm_get_connector_name - return a string for connector
+ * @connector: connector to compute name of
+ *
+ * Note that the buffer used by this function is globally shared and owned by
+ * the function itself.
+ *
+ * FIXME: This isn't really multithreading safe.
+ */
const char *drm_get_connector_name(const struct drm_connector *connector)
{
static char buf[32];
@@ -263,6 +288,13 @@ const char *drm_get_connector_name(const struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_get_connector_name);
+/**
+ * drm_get_connector_status_name - return a string for connector status
+ * @status: connector status to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
const char *drm_get_connector_status_name(enum drm_connector_status status)
{
if (status == connector_status_connected)
@@ -292,6 +324,15 @@ static char printable_char(int c)
return isascii(c) && isprint(c) ? c : '?';
}
+/**
+ * drm_get_format_name - return a string for drm fourcc format
+ * @format: format to compute name of
+ *
+ * Note that the buffer used by this function is globally shared and owned by
+ * the function itself.
+ *
+ * FIXME: This isn't really multithreading safe.
+ */
const char *drm_get_format_name(uint32_t format)
{
static char buf[32];
@@ -316,14 +357,16 @@ EXPORT_SYMBOL(drm_get_format_name);
* @obj_type: object type
*
* Create a unique identifier based on @ptr in @dev's identifier space. Used
- * for tracking modes, CRTCs and connectors.
+ * for tracking modes, CRTCs and connectors. Note that despite the _get postfix
+ * modeset identifiers are _not_ reference counted. Hence don't use this for
+ * reference counted modeset objects like framebuffers.
*
- * RETURNS:
+ * Returns:
* New unique (relative to other objects in @dev) integer identifier for the
* object.
*/
-static int drm_mode_object_get(struct drm_device *dev,
- struct drm_mode_object *obj, uint32_t obj_type)
+int drm_mode_object_get(struct drm_device *dev,
+ struct drm_mode_object *obj, uint32_t obj_type)
{
int ret;
@@ -347,10 +390,12 @@ static int drm_mode_object_get(struct drm_device *dev,
* @dev: DRM device
* @object: object to free
*
- * Free @id from @dev's unique identifier pool.
+ * Free @id from @dev's unique identifier pool. Note that despite the _get
+ * postfix modeset identifiers are _not_ reference counted. Hence don't use this
+ * for reference counted modeset objects like framebuffers.
*/
-static void drm_mode_object_put(struct drm_device *dev,
- struct drm_mode_object *object)
+void drm_mode_object_put(struct drm_device *dev,
+ struct drm_mode_object *object)
{
mutex_lock(&dev->mode_config.idr_mutex);
idr_remove(&dev->mode_config.crtc_idr, object->id);
@@ -400,7 +445,7 @@ EXPORT_SYMBOL(drm_mode_object_find);
* since all the fb attributes are invariant over its lifetime, no further
* locking but only correct reference counting is required.
*
- * RETURNS:
+ * Returns:
* Zero on success, error code on failure.
*/
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
@@ -461,7 +506,7 @@ static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
*
* If successful, this grabs an additional reference to the framebuffer -
* callers need to make sure to eventually unreference the returned framebuffer
- * again.
+ * again, using @drm_framebuffer_unreference.
*/
struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
uint32_t id)
@@ -494,6 +539,8 @@ EXPORT_SYMBOL(drm_framebuffer_unreference);
/**
* drm_framebuffer_reference - incr the fb refcnt
* @fb: framebuffer
+ *
+ * This functions increments the fb's refcount.
*/
void drm_framebuffer_reference(struct drm_framebuffer *fb)
{
@@ -550,8 +597,9 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
* drm_framebuffer_cleanup - remove a framebuffer object
* @fb: framebuffer to remove
*
- * Cleanup references to a user-created framebuffer. This function is intended
- * to be used from the drivers ->destroy callback.
+ * Cleanup framebuffer. This function is intended to be used from the drivers
+ * ->destroy callback. It can also be used to clean up driver private
+ * framebuffers embedded into a larger structure.
*
* Note that this function does not remove the fb from active usuage - if it is
* still used anywhere, hilarity can ensue since userspace could call getfb on
@@ -644,7 +692,7 @@ EXPORT_SYMBOL(drm_framebuffer_remove);
*
* Inits a new object created as base part of a driver crtc object.
*
- * RETURNS:
+ * Returns:
* Zero on success, error code on failure.
*/
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
@@ -720,20 +768,6 @@ unsigned int drm_crtc_index(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_index);
-/**
- * drm_mode_probed_add - add a mode to a connector's probed mode list
- * @connector: connector the new mode
- * @mode: mode data
- *
- * Add @mode to @connector's mode list for later use.
- */
-void drm_mode_probed_add(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- list_add_tail(&mode->head, &connector->probed_modes);
-}
-EXPORT_SYMBOL(drm_mode_probed_add);
-
/*
* drm_mode_remove - remove and free a mode
* @connector: connector list to modify
@@ -758,7 +792,7 @@ static void drm_mode_remove(struct drm_connector *connector,
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
*
- * RETURNS:
+ * Returns:
* Zero on success, error code on failure.
*/
int drm_connector_init(struct drm_device *dev,
@@ -836,6 +870,14 @@ void drm_connector_cleanup(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_connector_cleanup);
+/**
+ * drm_connector_unplug_all - unregister connector userspace interfaces
+ * @dev: drm device
+ *
+ * This function unregisters all connector userspace interfaces in sysfs. Should
+ * be call when the device is disconnected, e.g. from an usb driver's
+ * ->disconnect callback.
+ */
void drm_connector_unplug_all(struct drm_device *dev)
{
struct drm_connector *connector;
@@ -847,6 +889,18 @@ void drm_connector_unplug_all(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_connector_unplug_all);
+/**
+ * drm_bridge_init - initialize a drm transcoder/bridge
+ * @dev: drm device
+ * @bridge: transcoder/bridge to set up
+ * @funcs: bridge function table
+ *
+ * Initialises a preallocated bridge. Bridges should be
+ * subclassed as part of driver connector objects.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
const struct drm_bridge_funcs *funcs)
{
@@ -870,6 +924,12 @@ int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
}
EXPORT_SYMBOL(drm_bridge_init);
+/**
+ * drm_bridge_cleanup - cleans up an initialised bridge
+ * @bridge: bridge to cleanup
+ *
+ * Cleans up the bridge but doesn't free the object.
+ */
void drm_bridge_cleanup(struct drm_bridge *bridge)
{
struct drm_device *dev = bridge->dev;
@@ -882,6 +942,19 @@ void drm_bridge_cleanup(struct drm_bridge *bridge)
}
EXPORT_SYMBOL(drm_bridge_cleanup);
+/**
+ * drm_encoder_init - Init a preallocated encoder
+ * @dev: drm device
+ * @encoder: the encoder to init
+ * @funcs: callbacks for this encoder
+ * @encoder_type: user visible type of the encoder
+ *
+ * Initialises a preallocated encoder. Encoder should be
+ * subclassed as part of driver encoder objects.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
@@ -909,6 +982,12 @@ int drm_encoder_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_encoder_init);
+/**
+ * drm_encoder_cleanup - cleans up an initialised encoder
+ * @encoder: encoder to cleanup
+ *
+ * Cleans up the encoder but doesn't free the object.
+ */
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -930,9 +1009,10 @@ EXPORT_SYMBOL(drm_encoder_cleanup);
* @format_count: number of elements in @formats
* @priv: plane is private (hidden from userspace)?
*
- * Inits a new object created as base part of a driver plane object.
+ * Inits a preallocate plane object created as base part of a driver plane
+ * object.
*
- * RETURNS:
+ * Returns:
* Zero on success, error code on failure.
*/
int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
@@ -1033,50 +1113,6 @@ void drm_plane_force_disable(struct drm_plane *plane)
}
EXPORT_SYMBOL(drm_plane_force_disable);
-/**
- * drm_mode_create - create a new display mode
- * @dev: DRM device
- *
- * Create a new drm_display_mode, give it an ID, and return it.
- *
- * RETURNS:
- * Pointer to new mode on success, NULL on error.
- */
-struct drm_display_mode *drm_mode_create(struct drm_device *dev)
-{
- struct drm_display_mode *nmode;
-
- nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
- if (!nmode)
- return NULL;
-
- if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
- kfree(nmode);
- return NULL;
- }
-
- return nmode;
-}
-EXPORT_SYMBOL(drm_mode_create);
-
-/**
- * drm_mode_destroy - remove a mode
- * @dev: DRM device
- * @mode: mode to remove
- *
- * Free @mode's unique identifier, then free it.
- */
-void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
-{
- if (!mode)
- return;
-
- drm_mode_object_put(dev, &mode->base);
-
- kfree(mode);
-}
-EXPORT_SYMBOL(drm_mode_destroy);
-
static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
{
struct drm_property *edid;
@@ -1280,6 +1316,10 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
return 0;
}
+/*
+ * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
+ * the drm core's responsibility to set up mode control groups.
+ */
int drm_mode_group_init_legacy_group(struct drm_device *dev,
struct drm_mode_group *group)
{
@@ -1356,7 +1396,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
static int drm_crtc_convert_umode(struct drm_display_mode *out,
@@ -1399,7 +1439,7 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getresources(struct drm_device *dev, void *data,
@@ -1584,7 +1624,7 @@ out:
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getcrtc(struct drm_device *dev,
@@ -1653,7 +1693,7 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getconnector(struct drm_device *dev, void *data,
@@ -1788,6 +1828,19 @@ out:
return ret;
}
+/**
+ * drm_mode_getencoder - get encoder configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Construct a encoder configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1823,15 +1876,20 @@ out:
}
/**
- * drm_mode_getplane_res - get plane info
+ * drm_mode_getplane_res - enumerate all plane resources
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
- * Return an plane count and set of IDs.
+ * Construct a list of plane ids to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
*/
int drm_mode_getplane_res(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_mode_get_plane_res *plane_resp = data;
struct drm_mode_config *config;
@@ -1869,16 +1927,20 @@ out:
}
/**
- * drm_mode_getplane - get plane info
+ * drm_mode_getplane - get plane configuration
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
- * Return plane info, including formats supported, gamma size, any
- * current fb, etc.
+ * Construct a plane configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
*/
int drm_mode_getplane(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_mode_get_plane *plane_resp = data;
struct drm_mode_object *obj;
@@ -1934,16 +1996,19 @@ out:
}
/**
- * drm_mode_setplane - set up or tear down an plane
+ * drm_mode_setplane - configure a plane's configuration
* @dev: DRM device
* @data: ioctl data*
* @file_priv: DRM file info
*
- * Set plane info, including placement, fb, scaling, and other factors.
+ * Set plane configuration, including placement, fb, scaling, and other factors.
* Or pass a NULL fb to disable.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
*/
int drm_mode_setplane(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_mode_set_plane *plane_req = data;
struct drm_mode_object *obj;
@@ -2073,6 +2138,9 @@ out:
*
* This is a little helper to wrap internal calls to the ->set_config driver
* interface. The only thing it adds is correct refcounting dance.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
*/
int drm_mode_set_config_internal(struct drm_mode_set *set)
{
@@ -2157,7 +2225,7 @@ static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_setcrtc(struct drm_device *dev, void *data,
@@ -2359,8 +2427,23 @@ out:
return ret;
}
+
+
+/**
+ * drm_mode_cursor_ioctl - set CRTC's cursor configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Set the cursor configuration based on user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_cursor_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+ void *data, struct drm_file *file_priv)
{
struct drm_mode_cursor *req = data;
struct drm_mode_cursor2 new_req;
@@ -2371,6 +2454,21 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
return drm_mode_cursor_common(dev, &new_req, file_priv);
}
+/**
+ * drm_mode_cursor2_ioctl - set CRTC's cursor configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Set the cursor configuration based on user request. This implements the 2nd
+ * version of the cursor ioctl, which allows userspace to additionally specify
+ * the hotspot of the pointer.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_cursor2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -2378,7 +2476,14 @@ int drm_mode_cursor2_ioctl(struct drm_device *dev,
return drm_mode_cursor_common(dev, req, file_priv);
}
-/* Original addfb only supported RGB formats, so figure out which one */
+/**
+ * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
+ * @bpp: bits per pixels
+ * @depth: bit depth per pixel
+ *
+ * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
+ * Useful in fbdev emulation code, since that deals in those values.
+ */
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
{
uint32_t fmt;
@@ -2420,11 +2525,12 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
- * Add a new FB to the specified CRTC, given a user request.
+ * Add a new FB to the specified CRTC, given a user request. This is the
+ * original addfb ioclt which only supported RGB formats.
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_addfb(struct drm_device *dev,
@@ -2597,11 +2703,13 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
- * Add a new FB to the specified CRTC, given a user request with format.
+ * Add a new FB to the specified CRTC, given a user request with format. This is
+ * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
+ * and uses fourcc codes as pixel format specifiers.
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_addfb2(struct drm_device *dev,
@@ -2661,7 +2769,7 @@ int drm_mode_addfb2(struct drm_device *dev,
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_rmfb(struct drm_device *dev,
@@ -2715,7 +2823,7 @@ fail_lookup:
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getfb(struct drm_device *dev,
@@ -2759,6 +2867,25 @@ int drm_mode_getfb(struct drm_device *dev,
return ret;
}
+/**
+ * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB and flush out the damaged area supplied by userspace as a clip
+ * rectangle list. Generic userspace which does frontbuffer rendering must call
+ * this ioctl to flush out the changes on manual-update display outputs, e.g.
+ * usb display-link, mipi manual update panels or edp panel self refresh modes.
+ *
+ * Modesetting drivers which always update the frontbuffer do not need to
+ * implement the corresponding ->dirty framebuffer callback.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -2836,7 +2963,7 @@ out_err1:
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
void drm_fb_release(struct drm_file *priv)
@@ -2860,6 +2987,20 @@ void drm_fb_release(struct drm_file *priv)
mutex_unlock(&priv->fbs_lock);
}
+/**
+ * drm_property_create - create a new property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values)
{
@@ -2898,6 +3039,24 @@ fail:
}
EXPORT_SYMBOL(drm_property_create);
+/**
+ * drm_property_create - create a new enumeration property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property values
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
+ *
+ * Userspace is only allowed to set one of the predefined values for enumeration
+ * properties.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
const char *name,
const struct drm_prop_enum_list *props,
@@ -2926,6 +3085,24 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
}
EXPORT_SYMBOL(drm_property_create_enum);
+/**
+ * drm_property_create - create a new bitmask property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property bitflags
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
+ *
+ * Compared to plain enumeration properties userspace is allowed to set any
+ * or'ed together combination of the predefined property bitflag values
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
@@ -2954,6 +3131,24 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_property_create_bitmask);
+/**
+ * drm_property_create - create a new ranged property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @min: minimum value of the property
+ * @max: maximum value of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
+ *
+ * Userspace is allowed to set any interger value in the (min, max) range
+ * inclusive.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max)
@@ -2973,6 +3168,21 @@ struct drm_property *drm_property_create_range(struct drm_device *dev, int flags
}
EXPORT_SYMBOL(drm_property_create_range);
+/**
+ * drm_property_add_enum - add a possible value to an enumeration property
+ * @property: enumeration property to change
+ * @index: index of the new enumeration
+ * @value: value of the new enumeration
+ * @name: symbolic name of the new enumeration
+ *
+ * This functions adds enumerations to a property.
+ *
+ * It's use is deprecated, drivers should use one of the more specific helpers
+ * to directly create the property with all enumerations already attached.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name)
{
@@ -3012,6 +3222,14 @@ int drm_property_add_enum(struct drm_property *property, int index,
}
EXPORT_SYMBOL(drm_property_add_enum);
+/**
+ * drm_property_destroy - destroy a drm property
+ * @dev: drm device
+ * @property: property to destry
+ *
+ * This function frees a property including any attached resources like
+ * enumeration values.
+ */
void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
{
struct drm_property_enum *prop_enum, *pt;
@@ -3029,6 +3247,16 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
+/**
+ * drm_object_attach_property - attach a property to a modeset object
+ * @obj: drm modeset object
+ * @property: property to attach
+ * @init_val: initial value of the property
+ *
+ * This attaches the given property to the modeset object with the given initial
+ * value. Currently this function cannot fail since the properties are stored in
+ * a statically sized array.
+ */
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
@@ -3049,6 +3277,19 @@ void drm_object_attach_property(struct drm_mode_object *obj,
}
EXPORT_SYMBOL(drm_object_attach_property);
+/**
+ * drm_object_property_set_value - set the value of a property
+ * @obj: drm mode object to set property value for
+ * @property: property to set
+ * @val: value the property should be set to
+ *
+ * This functions sets a given property on a given object. This function only
+ * changes the software state of the property, it does not call into the
+ * driver's ->set_property callback.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t val)
{
@@ -3065,6 +3306,20 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
}
EXPORT_SYMBOL(drm_object_property_set_value);
+/**
+ * drm_object_property_get_value - retrieve the value of a property
+ * @obj: drm mode object to get property value from
+ * @property: property to retrieve
+ * @val: storage for the property value
+ *
+ * This function retrieves the softare state of the given property for the given
+ * property. Since there is no driver callback to retrieve the current property
+ * value this might be out of sync with the hardware, depending upon the driver
+ * and property.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
@@ -3081,6 +3336,19 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
}
EXPORT_SYMBOL(drm_object_property_get_value);
+/**
+ * drm_mode_getproperty_ioctl - get the current value of a connector's property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function retrieves the current value for an connectors's property.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3219,6 +3487,20 @@ static void drm_property_destroy_blob(struct drm_device *dev,
kfree(blob);
}
+/**
+ * drm_mode_getblob_ioctl - get the contents of a blob property value
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function retrieves the contents of a blob property. The value stored in
+ * an object's blob property is just a normal modeset object id.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3253,6 +3535,17 @@ done:
return ret;
}
+/**
+ * drm_mode_connector_update_edid_property - update the edid property of a connector
+ * @connector: drm connector
+ * @edid: new value of the edid property
+ *
+ * This function creates a new blob modeset object and assigns its id to the
+ * connector's edid property.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
@@ -3310,6 +3603,20 @@ static bool drm_property_change_is_valid(struct drm_property *property,
}
}
+/**
+ * drm_mode_connector_property_set_ioctl - set the current value of a connector property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function sets the current value for a connectors's property. It also
+ * calls into a driver's ->set_property callback to update the hardware state
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3376,6 +3683,21 @@ static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
+/**
+ * drm_mode_getproperty_ioctl - get the current value of a object's property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function retrieves the current value for an object's property. Compared
+ * to the connector specific ioctl this one is extended to also work on crtc and
+ * plane objects.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -3432,6 +3754,22 @@ out:
return ret;
}
+/**
+ * drm_mode_obj_set_property_ioctl - set the current value of an object's property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function sets the current value for an object's property. It also calls
+ * into a driver's ->set_property callback to update the hardware state.
+ * Compared to the connector specific ioctl this one is extended to also work on
+ * crtc and plane objects.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -3491,6 +3829,18 @@ out:
return ret;
}
+/**
+ * drm_mode_connector_attach_encoder - attach a connector to an encoder
+ * @connector: connector to attach
+ * @encoder: encoder to attach @connector to
+ *
+ * This function links up a connector to an encoder. Note that the routing
+ * restrictions between encoders and crtcs are exposed to userspace through the
+ * possible_clones and possible_crtcs bitmasks.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
@@ -3506,23 +3856,20 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
-void drm_mode_connector_detach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder)
-{
- int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == encoder->base.id) {
- connector->encoder_ids[i] = 0;
- if (connector->encoder == encoder)
- connector->encoder = NULL;
- break;
- }
- }
-}
-EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
-
+/**
+ * drm_mode_crtc_set_gamma_size - set the gamma table size
+ * @crtc: CRTC to set the gamma table size for
+ * @gamma_size: size of the gamma table
+ *
+ * Drivers which support gamma tables should set this to the supported gamma
+ * table size when initializing the CRTC. Currently the drm core only supports a
+ * fixed gamma table size.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
- int gamma_size)
+ int gamma_size)
{
crtc->gamma_size = gamma_size;
@@ -3536,6 +3883,20 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
}
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+/**
+ * drm_mode_gamma_set_ioctl - set the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Set the gamma table of a CRTC to the one passed in by the user. Userspace can
+ * inquire the required gamma table size through drm_mode_gamma_get_ioctl.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3595,6 +3956,21 @@ out:
}
+/**
+ * drm_mode_gamma_get_ioctl - get the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Copy the current gamma table into the storage provided. This also provides
+ * the gamma table size the driver expects, which can be used to size the
+ * allocated storage.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3645,6 +4021,24 @@ out:
return ret;
}
+/**
+ * drm_mode_page_flip_ioctl - schedule an asynchronous fb update
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This schedules an asynchronous update on a given CRTC, called page flip.
+ * Optionally a drm event is generated to signal the completion of the event.
+ * Generic drivers cannot assume that a pageflip with changed framebuffer
+ * properties (including driver specific metadata like tiling layout) will work,
+ * but some drivers support e.g. pixel format changes through the pageflip
+ * ioctl.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3757,6 +4151,14 @@ out:
return ret;
}
+/**
+ * drm_mode_config_reset - call ->reset callbacks
+ * @dev: drm device
+ *
+ * This functions calls all the crtc's, encoder's and connector's ->reset
+ * callback. Drivers can use this in e.g. their driver load or resume code to
+ * reset hardware and software state.
+ */
void drm_mode_config_reset(struct drm_device *dev)
{
struct drm_crtc *crtc;
@@ -3780,6 +4182,25 @@ void drm_mode_config_reset(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_mode_config_reset);
+/**
+ * drm_mode_create_dumb_ioctl - create a dumb backing storage buffer
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This creates a new dumb buffer in the driver's backing storage manager (GEM,
+ * TTM or something else entirely) and returns the resulting buffer handle. This
+ * handle can then be wrapped up into a framebuffer modeset object.
+ *
+ * Note that userspace is not allowed to use such objects for render
+ * acceleration - drivers must create their own private ioctls for such a use
+ * case.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3807,6 +4228,20 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
return dev->driver->dumb_create(file_priv, dev, args);
}
+/**
+ * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Allocate an offset in the drm device node's address space to be able to
+ * memory map a dumb buffer.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3819,6 +4254,21 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
}
+/**
+ * drm_mode_destroy_dumb_ioctl - destroy a dumb backing strage buffer
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This destroys the userspace handle for the given dumb backing storage buffer.
+ * Since buffer objects must be reference counted in the kernel a buffer object
+ * won't be immediately freed if a framebuffer modeset object still uses it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -3830,9 +4280,14 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
return dev->driver->dumb_destroy(file_priv, dev, args->handle);
}
-/*
- * Just need to support RGB formats here for compat with code that doesn't
- * use pixel formats directly yet.
+/**
+ * drm_fb_get_bpp_depth - get the bpp/depth values for format
+ * @format: pixel format (DRM_FORMAT_*)
+ * @depth: storage for the depth value
+ * @bpp: storage for the bpp value
+ *
+ * This only supports RGB formats here for compat with code that doesn't use
+ * pixel formats directly yet.
*/
void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp)
@@ -3904,7 +4359,7 @@ EXPORT_SYMBOL(drm_fb_get_bpp_depth);
* drm_format_num_planes - get the number of planes for format
* @format: pixel format (DRM_FORMAT_*)
*
- * RETURNS:
+ * Returns:
* The number of planes used by the specified pixel format.
*/
int drm_format_num_planes(uint32_t format)
@@ -3939,7 +4394,7 @@ EXPORT_SYMBOL(drm_format_num_planes);
* @format: pixel format (DRM_FORMAT_*)
* @plane: plane index
*
- * RETURNS:
+ * Returns:
* The bytes per pixel value for the specified plane.
*/
int drm_format_plane_cpp(uint32_t format, int plane)
@@ -3985,7 +4440,7 @@ EXPORT_SYMBOL(drm_format_plane_cpp);
* drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
* @format: pixel format (DRM_FORMAT_*)
*
- * RETURNS:
+ * Returns:
* The horizontal chroma subsampling factor for the
* specified pixel format.
*/
@@ -4020,7 +4475,7 @@ EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
* drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
* @format: pixel format (DRM_FORMAT_*)
*
- * RETURNS:
+ * Returns:
* The vertical chroma subsampling factor for the
* specified pixel format.
*/
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index ea92b827e787..a85517854073 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -105,9 +105,6 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
* @maxX: max width for modes
* @maxY: max height for modes
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Based on the helper callbacks implemented by @connector try to detect all
* valid modes. Modes will first be added to the connector's probed_modes list,
* then culled (based on validity and the @maxX, @maxY parameters) and put into
@@ -117,8 +114,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
* @connector vfunc for drivers that use the crtc helpers for output mode
* filtering and detection.
*
- * RETURNS:
- * Number of modes found on @connector.
+ * Returns:
+ * The number of modes found on @connector.
*/
int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
@@ -131,6 +128,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
int mode_flags = 0;
bool verbose_prune = true;
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
drm_get_connector_name(connector));
/* set all modes to the unverified state */
@@ -176,8 +175,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
drm_mode_connector_list_update(connector);
if (maxX && maxY)
- drm_mode_validate_size(dev, &connector->modes, maxX,
- maxY, 0);
+ drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
@@ -219,18 +217,19 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
*
- * LOCKING:
- * Caller must hold mode config lock.
+ * Checks whether @encoder is with the current mode setting output configuration
+ * in use by any connector. This doesn't mean that it is actually enabled since
+ * the DPMS state is tracked separately.
*
- * Walk @encoders's DRM device's mode_config and see if it's in use.
- *
- * RETURNS:
- * True if @encoder is part of the mode_config, false otherwise.
+ * Returns:
+ * True if @encoder is used, false otherwise.
*/
bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder)
return true;
@@ -242,19 +241,19 @@ EXPORT_SYMBOL(drm_helper_encoder_in_use);
* drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
* @crtc: CRTC to check
*
- * LOCKING:
- * Caller must hold mode config lock.
+ * Checks whether @crtc is with the current mode setting output configuration
+ * in use by any connector. This doesn't mean that it is actually enabled since
+ * the DPMS state is tracked separately.
*
- * Walk @crtc's DRM device's mode_config and see if it's in use.
- *
- * RETURNS:
- * True if @crtc is part of the mode_config, false otherwise.
+ * Returns:
+ * True if @crtc is used, false otherwise.
*/
bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
- /* FIXME: Locking around list access? */
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
return true;
@@ -283,11 +282,11 @@ drm_encoder_disable(struct drm_encoder *encoder)
* drm_helper_disable_unused_functions - disable unused objects
* @dev: DRM device
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
- * by calling its dpms function, which should power it off.
+ * This function walks through the entire mode setting configuration of @dev. It
+ * will remove any crtc links of unused encoders and encoder links of
+ * disconnected connectors. Then it will disable all unused encoders and crtcs
+ * either by calling their disable callback if available or by calling their
+ * dpms callback with DRM_MODE_DPMS_OFF.
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
@@ -295,6 +294,8 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
struct drm_connector *connector;
struct drm_crtc *crtc;
+ drm_warn_on_modeset_not_all_locked(dev);
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
continue;
@@ -355,9 +356,6 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
* @y: vertical offset into the surface
* @old_fb: old framebuffer, for cleanup
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
* to fixup or reject the mode prior to trying to set it. This is an internal
* helper that drivers could e.g. use to update properties that require the
@@ -367,8 +365,8 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
* drm_crtc_helper_set_config() helper function to drive the mode setting
* sequence.
*
- * RETURNS:
- * True if the mode was set successfully, or false otherwise.
+ * Returns:
+ * True if the mode was set successfully, false otherwise.
*/
bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
@@ -384,6 +382,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_encoder *encoder;
bool ret = true;
+ drm_warn_on_modeset_not_all_locked(dev);
+
saved_enabled = crtc->enabled;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled)
@@ -560,17 +560,14 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
* drm_crtc_helper_set_config - set a new config from userspace
* @set: mode set configuration
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Setup a new configuration, provided by the upper layers (either an ioctl call
* from userspace or internally e.g. from the fbdev suppport code) in @set, and
* enable it. This is the main helper functions for drivers that implement
* kernel mode setting with the crtc helper functions and the assorted
* ->prepare(), ->modeset() and ->commit() helper callbacks.
*
- * RETURNS:
- * Returns 0 on success, -ERRNO on failure.
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
@@ -612,6 +609,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
dev = set->crtc->dev;
+ drm_warn_on_modeset_not_all_locked(dev);
+
/*
* Allocate space for the backup of all (non-pointer) encoder and
* connector data.
@@ -924,8 +923,16 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
-int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
- struct drm_mode_fb_cmd2 *mode_cmd)
+/**
+ * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
+ * @fb: drm_framebuffer object to fill out
+ * @mode_cmd: metadata from the userspace fb creation request
+ *
+ * This helper can be used in a drivers fb_create callback to pre-fill the fb's
+ * metadata fields.
+ */
+void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
int i;
@@ -938,17 +945,36 @@ int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
&fb->bits_per_pixel);
fb->pixel_format = mode_cmd->pixel_format;
-
- return 0;
}
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
-int drm_helper_resume_force_mode(struct drm_device *dev)
+/**
+ * drm_helper_resume_force_mode - force-restore mode setting configuration
+ * @dev: drm_device which should be restored
+ *
+ * Drivers which use the mode setting helpers can use this function to
+ * force-restore the mode setting configuration e.g. on resume or when something
+ * else might have trampled over the hw state (like some overzealous old BIOSen
+ * tended to do).
+ *
+ * This helper doesn't provide a error return value since restoring the old
+ * config should never fail due to resource allocation issues since the driver
+ * has successfully set the restored configuration already. Hence this should
+ * boil down to the equivalent of a few dpms on calls, which also don't provide
+ * an error code.
+ *
+ * Drivers where simply restoring an old configuration again might fail (e.g.
+ * due to slight differences in allocating shared resources when the
+ * configuration is restored in a different order than when userspace set it up)
+ * need to use their own restore logic.
+ */
+void drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_crtc_helper_funcs *crtc_funcs;
- int ret, encoder_dpms;
+ int encoder_dpms;
+ bool ret;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -958,6 +984,7 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
+ /* Restoring the old config should never fail! */
if (ret == false)
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
@@ -980,12 +1007,28 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
drm_helper_choose_crtc_dpms(crtc));
}
}
+
/* disable the unused connectors while restoring the modesetting */
drm_helper_disable_unused_functions(dev);
- return 0;
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
+/**
+ * drm_kms_helper_hotplug_event - fire off KMS hotplug events
+ * @dev: drm_device whose connector state changed
+ *
+ * This function fires off the uevent for userspace and also calls the
+ * output_poll_changed function, which is most commonly used to inform the fbdev
+ * emulation code and allow it to update the fbcon output configuration.
+ *
+ * Drivers should call this from their hotplug handling code when a change is
+ * detected. Note that this function does not do any output detection of its
+ * own, like drm_helper_hpd_irq_event() does - this is assumed to be done by the
+ * driver already.
+ *
+ * This function must be called from process context with no mode
+ * setting locks held.
+ */
void drm_kms_helper_hotplug_event(struct drm_device *dev)
{
/* send a uevent + call fbdev */
@@ -1054,6 +1097,16 @@ static void output_poll_execute(struct work_struct *work)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
}
+/**
+ * drm_kms_helper_poll_disable - disable output polling
+ * @dev: drm_device
+ *
+ * This function disables the output polling work.
+ *
+ * Drivers can call this helper from their device suspend implementation. It is
+ * not an error to call this even when output polling isn't enabled or arlready
+ * disabled.
+ */
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
@@ -1062,6 +1115,16 @@ void drm_kms_helper_poll_disable(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
+/**
+ * drm_kms_helper_poll_enable - re-enable output polling.
+ * @dev: drm_device
+ *
+ * This function re-enables the output polling work.
+ *
+ * Drivers can call this helper from their device resume implementation. It is
+ * an error to call this when the output polling support has not yet been set
+ * up.
+ */
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
bool poll = false;
@@ -1081,6 +1144,25 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+/**
+ * drm_kms_helper_poll_init - initialize and enable output polling
+ * @dev: drm_device
+ *
+ * This function intializes and then also enables output polling support for
+ * @dev. Drivers which do not have reliable hotplug support in hardware can use
+ * this helper infrastructure to regularly poll such connectors for changes in
+ * their connection state.
+ *
+ * Drivers can control which connectors are polled by setting the
+ * DRM_CONNECTOR_POLL_CONNECT and DRM_CONNECTOR_POLL_DISCONNECT flags. On
+ * connectors where probing live outputs can result in visual distortion drivers
+ * should not set the DRM_CONNECTOR_POLL_DISCONNECT flag to avoid this.
+ * Connectors which have no flag or only DRM_CONNECTOR_POLL_HPD set are
+ * completely ignored by the polling logic.
+ *
+ * Note that a connector can be both polled and probed from the hotplug handler,
+ * in case the hotplug interrupt is known to be unreliable.
+ */
void drm_kms_helper_poll_init(struct drm_device *dev)
{
INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
@@ -1090,12 +1172,39 @@ void drm_kms_helper_poll_init(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_init);
+/**
+ * drm_kms_helper_poll_fini - disable output polling and clean it up
+ * @dev: drm_device
+ */
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
drm_kms_helper_poll_disable(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+/**
+ * drm_helper_hpd_irq_event - hotplug processing
+ * @dev: drm_device
+ *
+ * Drivers can use this helper function to run a detect cycle on all connectors
+ * which have the DRM_CONNECTOR_POLL_HPD flag set in their &polled member. All
+ * other connectors are ignored, which is useful to avoid reprobing fixed
+ * panels.
+ *
+ * This helper function is useful for drivers which can't or don't track hotplug
+ * interrupts for each connector.
+ *
+ * Drivers which support hotplug interrupts for each connector individually and
+ * which have a more fine-grained detect logic should bypass this code and
+ * directly call drm_kms_helper_hotplug_event() in case the connector state
+ * changed.
+ *
+ * This function must be called from process context with no mode
+ * setting locks held.
+ *
+ * Note that a connector can be both polled and probed from the hotplug handler,
+ * in case the hotplug interrupt is known to be unreliable.
+ */
bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector;
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
new file mode 100644
index 000000000000..a2945ee6d675
--- /dev/null
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright © 2014 Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This header file contains mode setting related functions and definitions
+ * which are only used within the drm module as internal implementation details
+ * and are not exported to drivers.
+ */
+
+int drm_mode_object_get(struct drm_device *dev,
+ struct drm_mode_object *obj, uint32_t obj_type);
+void drm_mode_object_put(struct drm_device *dev,
+ struct drm_mode_object *object);
+
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index b924306b8477..4f4d3408a30a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1098,10 +1098,14 @@ EXPORT_SYMBOL(drm_edid_is_valid);
/**
* Get EDID information via I2C.
*
- * \param adapter : i2c device adaptor
- * \param buf : EDID data buffer to be filled
- * \param len : EDID data buffer length
- * \return 0 on success or -1 on failure.
+ * @adapter : i2c device adaptor
+ * @buf: EDID data buffer to be filled
+ * @block: 128 byte EDID block to start fetching from
+ * @len: EDID data buffer length to fetch
+ *
+ * Returns:
+ *
+ * 0 on success or -1 on failure.
*
* Try to fetch EDID information by calling i2c driver function.
*/
@@ -1243,9 +1247,11 @@ out:
/**
* Probe DDC presence.
+ * @adapter: i2c adapter to probe
+ *
+ * Returns:
*
- * \param adapter : i2c device adaptor
- * \return 1 on success
+ * 1 on success
*/
bool
drm_probe_ddc(struct i2c_adapter *adapter)
@@ -1586,8 +1592,10 @@ bad_std_timing(u8 a, u8 b)
/**
* drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @connector: connector of for the EDID block
+ * @edid: EDID block to scan
* @t: standard timing params
- * @timing_level: standard timing level
+ * @revision: standard timing level
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
@@ -2132,6 +2140,7 @@ do_established_modes(struct detailed_timing *timing, void *c)
/**
* add_established_modes - get est. modes from EDID and add them
+ * @connector: connector of for the EDID block
* @edid: EDID block to scan
*
* Each EDID block contains a bitmap of the supported "established modes" list
@@ -2194,6 +2203,7 @@ do_standard_modes(struct detailed_timing *timing, void *c)
/**
* add_standard_modes - get std. modes from EDID and add them
+ * @connector: connector of for the EDID block
* @edid: EDID block to scan
*
* Standard modes can be calculated using the appropriate standard (DMT,
@@ -3300,6 +3310,7 @@ EXPORT_SYMBOL(drm_detect_hdmi_monitor);
/**
* drm_detect_monitor_audio - check monitor audio capability
+ * @edid: EDID block to scan
*
* Monitor should have CEA extension block.
* If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
@@ -3345,6 +3356,7 @@ EXPORT_SYMBOL(drm_detect_monitor_audio);
/**
* drm_rgb_quant_range_selectable - is RGB quantization range selectable?
+ * @edid: EDID block to scan
*
* Check whether the monitor reports the RGB quantization range selection
* as supported. The AVI infoframe can then be used to inform the monitor
@@ -3564,8 +3576,8 @@ void drm_set_preferred_mode(struct drm_connector *connector,
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->probed_modes, head) {
- if (drm_mode_width(mode) == hpref &&
- drm_mode_height(mode) == vpref)
+ if (mode->hdisplay == hpref &&
+ mode->vdisplay == vpref)
mode->type |= DRM_MODE_TYPE_PREFERRED;
}
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d99df15a78bc..060d25aa46fa 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1141,8 +1141,8 @@ struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *
struct drm_display_mode *mode;
list_for_each_entry(mode, &fb_connector->connector->modes, head) {
- if (drm_mode_width(mode) > width ||
- drm_mode_height(mode) > height)
+ if (mode->hdisplay > width ||
+ mode->vdisplay > height)
continue;
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return mode;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 154d6c6955c1..9909bef59800 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -85,9 +85,9 @@
#endif
/**
- * Initialize the GEM device fields
+ * drm_gem_init - Initialize the GEM device fields
+ * @dev: drm_devic structure to initialize
*/
-
int
drm_gem_init(struct drm_device *dev)
{
@@ -120,6 +120,11 @@ drm_gem_destroy(struct drm_device *dev)
}
/**
+ * drm_gem_object_init - initialize an allocated shmem-backed GEM object
+ * @dev: drm_device the object should be initialized for
+ * @obj: drm_gem_object to initialize
+ * @size: object size
+ *
* Initialize an already allocated GEM object of the specified size with
* shmfs backing store.
*/
@@ -141,6 +146,11 @@ int drm_gem_object_init(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_object_init);
/**
+ * drm_gem_object_init - initialize an allocated private GEM object
+ * @dev: drm_device the object should be initialized for
+ * @obj: drm_gem_object to initialize
+ * @size: object size
+ *
* Initialize an already allocated GEM object of the specified size with
* no GEM provided backing store. Instead the caller is responsible for
* backing the object and handling it.
@@ -176,6 +186,9 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
}
/**
+ * drm_gem_object_free - release resources bound to userspace handles
+ * @obj: GEM object to clean up.
+ *
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
@@ -225,7 +238,12 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
}
/**
- * Removes the mapping from handle to filp for this object.
+ * drm_gem_handle_delete - deletes the given file-private handle
+ * @filp: drm file-private structure to use for the handle look up
+ * @handle: userspace handle to delete
+ *
+ * Removes the GEM handle from the @filp lookup table and if this is the last
+ * handle also cleans up linked resources like GEM names.
*/
int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
@@ -270,6 +288,9 @@ EXPORT_SYMBOL(drm_gem_handle_delete);
/**
* drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
+ * @file: drm file-private structure to remove the dumb handle from
+ * @dev: corresponding drm_device
+ * @handle: the dumb handle to remove
*
* This implements the ->dumb_destroy kms driver callback for drivers which use
* gem to manage their backing storage.
@@ -284,6 +305,9 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy);
/**
* drm_gem_handle_create_tail - internal functions to create a handle
+ * @file_priv: drm file-private structure to register the handle for
+ * @obj: object to register
+ * @handlep: pionter to return the created handle to the caller
*
* This expects the dev->object_name_lock to be held already and will drop it
* before returning. Used to avoid races in establishing new handles when
@@ -336,6 +360,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
}
/**
+ * gem_handle_create - create a gem handle for an object
+ * @file_priv: drm file-private structure to register the handle for
+ * @obj: object to register
+ * @handlep: pionter to return the created handle to the caller
+ *
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
@@ -536,6 +565,11 @@ drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
+ * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
+ * @dev: drm_device
+ * @data: ioctl data
+ * @file_priv: drm file-private structure
+ *
* Releases the handle to an mm object.
*/
int
@@ -554,6 +588,11 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
}
/**
+ * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
+ * @dev: drm_device
+ * @data: ioctl data
+ * @file_priv: drm file-private structure
+ *
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
@@ -601,6 +640,11 @@ err:
}
/**
+ * drm_gem_open - implementation of the GEM_OPEN ioctl
+ * @dev: drm_device
+ * @data: ioctl data
+ * @file_priv: drm file-private structure
+ *
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
@@ -640,6 +684,10 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
}
/**
+ * gem_gem_open - initalizes GEM file-private structures at devnode open time
+ * @dev: drm_device which is being opened by userspace
+ * @file_private: drm file-private structure to set up
+ *
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
@@ -650,7 +698,7 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
spin_lock_init(&file_private->table_lock);
}
-/**
+/*
* Called at device close to release the file's
* handle references on objects.
*/
@@ -674,6 +722,10 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
}
/**
+ * drm_gem_release - release file-private GEM resources
+ * @dev: drm_device which is being closed by userspace
+ * @file_private: drm file-private structure to clean up
+ *
* Called at close time when the filp is going away.
*
* Releases any remaining references on objects by this filp.
@@ -699,6 +751,9 @@ drm_gem_object_release(struct drm_gem_object *obj)
EXPORT_SYMBOL(drm_gem_object_release);
/**
+ * drm_gem_object_free - free a GEM object
+ * @kref: kref of the object to free
+ *
* Called after the last reference to the object has been lost.
* Must be called holding struct_ mutex
*
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index af93cc55259f..a2d45b748f86 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -47,7 +47,44 @@
#include <linux/seq_file.h>
#include <linux/export.h>
-#define MM_UNUSED_TARGET 4
+/**
+ * DOC: Overview
+ *
+ * drm_mm provides a simple range allocator. The drivers are free to use the
+ * resource allocator from the linux core if it suits them, the upside of drm_mm
+ * is that it's in the DRM core. Which means that it's easier to extend for
+ * some of the crazier special purpose needs of gpus.
+ *
+ * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
+ * Drivers are free to embed either of them into their own suitable
+ * datastructures. drm_mm itself will not do any allocations of its own, so if
+ * drivers choose not to embed nodes they need to still allocate them
+ * themselves.
+ *
+ * The range allocator also supports reservation of preallocated blocks. This is
+ * useful for taking over initial mode setting configurations from the firmware,
+ * where an object needs to be created which exactly matches the firmware's
+ * scanout target. As long as the range is still free it can be inserted anytime
+ * after the allocator is initialized, which helps with avoiding looped
+ * depencies in the driver load sequence.
+ *
+ * drm_mm maintains a stack of most recently freed holes, which of all
+ * simplistic datastructures seems to be a fairly decent approach to clustering
+ * allocations and avoiding too much fragmentation. This means free space
+ * searches are O(num_holes). Given that all the fancy features drm_mm supports
+ * something better would be fairly complex and since gfx thrashing is a fairly
+ * steep cliff not a real concern. Removing a node again is O(1).
+ *
+ * drm_mm supports a few features: Alignment and range restrictions can be
+ * supplied. Further more every &drm_mm_node has a color value (which is just an
+ * opaqua unsigned long) which in conjunction with a driver callback can be used
+ * to implement sophisticated placement restrictions. The i915 DRM driver uses
+ * this to implement guard pages between incompatible caching domains in the
+ * graphics TT.
+ *
+ * Finally iteration helpers to walk all nodes and all holes are provided as are
+ * some basic allocator dumpers for debugging.
+ */
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
@@ -107,6 +144,20 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
}
}
+/**
+ * drm_mm_reserve_node - insert an pre-initialized node
+ * @mm: drm_mm allocator to insert @node into
+ * @node: drm_mm_node to insert
+ *
+ * This functions inserts an already set-up drm_mm_node into the allocator,
+ * meaning that start, size and color must be set by the caller. This is useful
+ * to initialize the allocator with preallocated objects which must be set-up
+ * before the range allocator can be set-up, e.g. when taking over a firmware
+ * framebuffer.
+ *
+ * Returns:
+ * 0 on success, -ENOSPC if there's no hole where @node is.
+ */
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
struct drm_mm_node *hole;
@@ -148,9 +199,18 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
EXPORT_SYMBOL(drm_mm_reserve_node);
/**
- * Search for free space and insert a preallocated memory node. Returns
- * -ENOSPC if no suitable free area is available. The preallocated memory node
- * must be cleared.
+ * drm_mm_insert_node_generic - search for space and insert @node
+ * @mm: drm_mm to allocate from
+ * @node: preallocate node to insert
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for this node
+ * @flags: flags to fine-tune the allocation
+ *
+ * The preallocated node must be cleared to 0.
+ *
+ * Returns:
+ * 0 on success, -ENOSPC if there's no suitable hole.
*/
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
@@ -222,9 +282,20 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
}
/**
- * Search for free space and insert a preallocated memory node. Returns
- * -ENOSPC if no suitable free area is available. This is for range
- * restricted allocations. The preallocated memory node must be cleared.
+ * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
+ * @mm: drm_mm to allocate from
+ * @node: preallocate node to insert
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for this node
+ * @start: start of the allowed range for this node
+ * @end: end of the allowed range for this node
+ * @flags: flags to fine-tune the allocation
+ *
+ * The preallocated node must be cleared to 0.
+ *
+ * Returns:
+ * 0 on success, -ENOSPC if there's no suitable hole.
*/
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long color,
@@ -247,7 +318,12 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
/**
- * Remove a memory node from the allocator.
+ * drm_mm_remove_node - Remove a memory node from the allocator.
+ * @node: drm_mm_node to remove
+ *
+ * This just removes a node from its drm_mm allocator. The node does not need to
+ * be cleared again before it can be re-inserted into this or any other drm_mm
+ * allocator. It is a bug to call this function on a un-allocated node.
*/
void drm_mm_remove_node(struct drm_mm_node *node)
{
@@ -384,7 +460,13 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
}
/**
- * Moves an allocation. To be used with embedded struct drm_mm_node.
+ * drm_mm_replace_node - move an allocation from @old to @new
+ * @old: drm_mm_node to remove from the allocator
+ * @new: drm_mm_node which should inherit @old's allocation
+ *
+ * This is useful for when drivers embed the drm_mm_node structure and hence
+ * can't move allocations by reassigning pointers. It's a combination of remove
+ * and insert with the guarantee that the allocation start will match.
*/
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
@@ -402,12 +484,46 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
EXPORT_SYMBOL(drm_mm_replace_node);
/**
- * Initializa lru scanning.
+ * DOC: lru scan roaster
+ *
+ * Very often GPUs need to have continuous allocations for a given object. When
+ * evicting objects to make space for a new one it is therefore not most
+ * efficient when we simply start to select all objects from the tail of an LRU
+ * until there's a suitable hole: Especially for big objects or nodes that
+ * otherwise have special allocation constraints there's a good chance we evict
+ * lots of (smaller) objects unecessarily.
+ *
+ * The DRM range allocator supports this use-case through the scanning
+ * interfaces. First a scan operation needs to be initialized with
+ * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
+ * objects to the roaster (probably by walking an LRU list, but this can be
+ * freely implemented) until a suitable hole is found or there's no further
+ * evitable object.
+ *
+ * The the driver must walk through all objects again in exactly the reverse
+ * order to restore the allocator state. Note that while the allocator is used
+ * in the scan mode no other operation is allowed.
+ *
+ * Finally the driver evicts all objects selected in the scan. Adding and
+ * removing an object is O(1), and since freeing a node is also O(1) the overall
+ * complexity is O(scanned_objects). So like the free stack which needs to be
+ * walked before a scan operation even begins this is linear in the number of
+ * objects. It doesn't seem to hurt badly.
+ */
+
+/**
+ * drm_mm_init_scan - initialize lru scanning
+ * @mm: drm_mm to scan
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for the allocation
*
* This simply sets up the scanning routines with the parameters for the desired
- * hole.
+ * hole. Note that there's no need to specify allocation flags, since they only
+ * change the place a node is allocated from within a suitable hole.
*
- * Warning: As long as the scan list is non-empty, no other operations than
+ * Warning:
+ * As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan(struct drm_mm *mm,
@@ -427,12 +543,20 @@ void drm_mm_init_scan(struct drm_mm *mm,
EXPORT_SYMBOL(drm_mm_init_scan);
/**
- * Initializa lru scanning.
+ * drm_mm_init_scan - initialize range-restricted lru scanning
+ * @mm: drm_mm to scan
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for the allocation
+ * @start: start of the allowed range for the allocation
+ * @end: end of the allowed range for the allocation
*
* This simply sets up the scanning routines with the parameters for the desired
- * hole. This version is for range-restricted scans.
+ * hole. Note that there's no need to specify allocation flags, since they only
+ * change the place a node is allocated from within a suitable hole.
*
- * Warning: As long as the scan list is non-empty, no other operations than
+ * Warning:
+ * As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan_with_range(struct drm_mm *mm,
@@ -456,12 +580,16 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
/**
+ * drm_mm_scan_add_block - add a node to the scan list
+ * @node: drm_mm_node to add
+ *
* Add a node to the scan list that might be freed to make space for the desired
* hole.
*
- * Returns non-zero, if a hole has been found, zero otherwise.
+ * Returns:
+ * True if a hole has been found, false otherwise.
*/
-int drm_mm_scan_add_block(struct drm_mm_node *node)
+bool drm_mm_scan_add_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
@@ -501,15 +629,16 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start;
mm->scan_hit_end = hole_end;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
EXPORT_SYMBOL(drm_mm_scan_add_block);
/**
- * Remove a node from the scan list.
+ * drm_mm_scan_remove_block - remove a node from the scan list
+ * @node: drm_mm_node to remove
*
* Nodes _must_ be removed in the exact same order from the scan list as they
* have been added, otherwise the internal state of the memory manager will be
@@ -519,10 +648,11 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
* immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
* return the just freed block (because its at the top of the free_stack list).
*
- * Returns one if this block should be evicted, zero otherwise. Will always
- * return zero when no hole has been found.
+ * Returns:
+ * True if this block should be evicted, false otherwise. Will always
+ * return false when no hole has been found.
*/
-int drm_mm_scan_remove_block(struct drm_mm_node *node)
+bool drm_mm_scan_remove_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
@@ -543,7 +673,15 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);
-int drm_mm_clean(struct drm_mm * mm)
+/**
+ * drm_mm_clean - checks whether an allocator is clean
+ * @mm: drm_mm allocator to check
+ *
+ * Returns:
+ * True if the allocator is completely free, false if there's still a node
+ * allocated in it.
+ */
+bool drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->head_node.node_list;
@@ -551,6 +689,14 @@ int drm_mm_clean(struct drm_mm * mm)
}
EXPORT_SYMBOL(drm_mm_clean);
+/**
+ * drm_mm_init - initialize a drm-mm allocator
+ * @mm: the drm_mm structure to initialize
+ * @start: start of the range managed by @mm
+ * @size: end of the range managed by @mm
+ *
+ * Note that @mm must be cleared to 0 before calling this function.
+ */
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->hole_stack);
@@ -572,6 +718,13 @@ void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
}
EXPORT_SYMBOL(drm_mm_init);
+/**
+ * drm_mm_takedown - clean up a drm_mm allocator
+ * @mm: drm_mm allocator to clean up
+ *
+ * Note that it is a bug to call this function on an allocator which is not
+ * clean.
+ */
void drm_mm_takedown(struct drm_mm * mm)
{
WARN(!list_empty(&mm->head_node.node_list),
@@ -597,6 +750,11 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
return 0;
}
+/**
+ * drm_mm_debug_table - dump allocator state to dmesg
+ * @mm: drm_mm allocator to dump
+ * @prefix: prefix to use for dumping to dmesg
+ */
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
@@ -635,6 +793,11 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en
return 0;
}
+/**
+ * drm_mm_dump_table - dump allocator state to a seq_file
+ * @m: seq_file to dump to
+ * @mm: drm_mm allocator to dump
+ */
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
struct drm_mm_node *entry;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index b0733153dfd2..8b410576fce4 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -37,15 +37,14 @@
#include <drm/drm_crtc.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/drm_modes.h>
+
+#include "drm_crtc_internal.h"
/**
- * drm_mode_debug_printmodeline - debug print a mode
- * @dev: DRM device
+ * drm_mode_debug_printmodeline - print a mode to dmesg
* @mode: mode to print
*
- * LOCKING:
- * None.
- *
* Describe @mode using DRM_DEBUG.
*/
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
@@ -61,18 +60,77 @@ void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
EXPORT_SYMBOL(drm_mode_debug_printmodeline);
/**
- * drm_cvt_mode -create a modeline based on CVT algorithm
+ * drm_mode_create - create a new display mode
* @dev: DRM device
- * @hdisplay: hdisplay size
- * @vdisplay: vdisplay size
- * @vrefresh : vrefresh rate
- * @reduced : Whether the GTF calculation is simplified
- * @interlaced:Whether the interlace is supported
*
- * LOCKING:
- * none.
+ * Create a new, cleared drm_display_mode with kzalloc, allocate an ID for it
+ * and return it.
*
- * return the modeline based on CVT algorithm
+ * Returns:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+{
+ struct drm_display_mode *nmode;
+
+ nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
+ if (!nmode)
+ return NULL;
+
+ if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
+ kfree(nmode);
+ return NULL;
+ }
+
+ return nmode;
+}
+EXPORT_SYMBOL(drm_mode_create);
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ * Release @mode's unique ID, then free it @mode structure itself using kfree.
+ */
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+ if (!mode)
+ return;
+
+ drm_mode_object_put(dev, &mode->base);
+
+ kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_destroy);
+
+/**
+ * drm_mode_probed_add - add a mode to a connector's probed_mode list
+ * @connector: connector the new mode
+ * @mode: mode data
+ *
+ * Add @mode to @connector's probed_mode list for later use. This list should
+ * then in a second step get filtered and all the modes actually supported by
+ * the hardware moved to the @connector's modes list.
+ */
+void drm_mode_probed_add(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
+
+ list_add_tail(&mode->head, &connector->probed_modes);
+}
+EXPORT_SYMBOL(drm_mode_probed_add);
+
+/**
+ * drm_cvt_mode -create a modeline based on the CVT algorithm
+ * @dev: drm device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh: vrefresh rate
+ * @reduced: whether to use reduced blanking
+ * @interlaced: whether to compute an interlaced mode
+ * @margins: whether to add margins (borders)
*
* This function is called to generate the modeline based on CVT algorithm
* according to the hdisplay, vdisplay, vrefresh.
@@ -82,12 +140,17 @@ EXPORT_SYMBOL(drm_mode_debug_printmodeline);
*
* And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
* What I have done is to translate it by using integer calculation.
+ *
+ * Returns:
+ * The modeline based on the CVT algorithm stored in a drm_display_mode object.
+ * The display mode object is allocated with drm_mode_create(). Returns NULL
+ * when no mode could be allocated.
*/
-#define HV_FACTOR 1000
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
int vdisplay, int vrefresh,
bool reduced, bool interlaced, bool margins)
{
+#define HV_FACTOR 1000
/* 1) top/bottom margin size (% of height) - default: 1.8, */
#define CVT_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
@@ -281,23 +344,25 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
EXPORT_SYMBOL(drm_cvt_mode);
/**
- * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
- *
- * @dev :drm device
- * @hdisplay :hdisplay size
- * @vdisplay :vdisplay size
- * @vrefresh :vrefresh rate.
- * @interlaced :whether the interlace is supported
- * @margins :desired margin size
- * @GTF_[MCKJ] :extended GTF formula parameters
- *
- * LOCKING.
- * none.
- *
- * return the modeline based on full GTF algorithm.
+ * drm_gtf_mode_complex - create the modeline based on the full GTF algorithm
+ * @dev: drm device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh: vrefresh rate.
+ * @interlaced: whether to compute an interlaced mode
+ * @margins: desired margin (borders) size
+ * @GTF_M: extended GTF formula parameters
+ * @GTF_2C: extended GTF formula parameters
+ * @GTF_K: extended GTF formula parameters
+ * @GTF_2J: extended GTF formula parameters
*
* GTF feature blocks specify C and J in multiples of 0.5, so we pass them
* in here multiplied by two. For a C of 40, pass in 80.
+ *
+ * Returns:
+ * The modeline based on the full GTF algorithm stored in a drm_display_mode object.
+ * The display mode object is allocated with drm_mode_create(). Returns NULL
+ * when no mode could be allocated.
*/
struct drm_display_mode *
drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
@@ -467,17 +532,13 @@ drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
EXPORT_SYMBOL(drm_gtf_mode_complex);
/**
- * drm_gtf_mode - create the modeline based on GTF algorithm
- *
- * @dev :drm device
- * @hdisplay :hdisplay size
- * @vdisplay :vdisplay size
- * @vrefresh :vrefresh rate.
- * @interlaced :whether the interlace is supported
- * @margins :whether the margin is supported
- *
- * LOCKING.
- * none.
+ * drm_gtf_mode - create the modeline based on the GTF algorithm
+ * @dev: drm device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh: vrefresh rate.
+ * @interlaced: whether to compute an interlaced mode
+ * @margins: desired margin (borders) size
*
* return the modeline based on GTF algorithm
*
@@ -496,19 +557,32 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
* C = 40
* K = 128
* J = 20
+ *
+ * Returns:
+ * The modeline based on the GTF algorithm stored in a drm_display_mode object.
+ * The display mode object is allocated with drm_mode_create(). Returns NULL
+ * when no mode could be allocated.
*/
struct drm_display_mode *
drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
- bool lace, int margins)
+ bool interlaced, int margins)
{
- return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
- margins, 600, 40 * 2, 128, 20 * 2);
+ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh,
+ interlaced, margins,
+ 600, 40 * 2, 128, 20 * 2);
}
EXPORT_SYMBOL(drm_gtf_mode);
#ifdef CONFIG_VIDEOMODE_HELPERS
-int drm_display_mode_from_videomode(const struct videomode *vm,
- struct drm_display_mode *dmode)
+/**
+ * drm_display_mode_from_videomode - fill in @dmode using @vm,
+ * @vm: videomode structure to use as source
+ * @dmode: drm_display_mode structure to use as destination
+ *
+ * Fills out @dmode using the display mode specified in @vm.
+ */
+void drm_display_mode_from_videomode(const struct videomode *vm,
+ struct drm_display_mode *dmode)
{
dmode->hdisplay = vm->hactive;
dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
@@ -538,8 +612,6 @@ int drm_display_mode_from_videomode(const struct videomode *vm,
if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
dmode->flags |= DRM_MODE_FLAG_DBLCLK;
drm_mode_set_name(dmode);
-
- return 0;
}
EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
@@ -553,6 +625,9 @@ EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
* This function is expensive and should only be used, if only one mode is to be
* read from DT. To get multiple modes start with of_get_display_timings and
* work with that instead.
+ *
+ * Returns:
+ * 0 on success, a negative errno code when no of videomode node was found.
*/
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode, int index)
@@ -580,10 +655,8 @@ EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
*
- * LOCKING:
- * None.
- *
- * Set the name of @mode to a standard format.
+ * Set the name of @mode to a standard format which is <hdisplay>x<vdisplay>
+ * with an optional 'i' suffix for interlaced modes.
*/
void drm_mode_set_name(struct drm_display_mode *mode)
{
@@ -595,54 +668,12 @@ void drm_mode_set_name(struct drm_display_mode *mode)
}
EXPORT_SYMBOL(drm_mode_set_name);
-/**
- * drm_mode_width - get the width of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's width (hdisplay) value.
- *
- * FIXME: is this needed?
- *
- * RETURNS:
- * @mode->hdisplay
- */
-int drm_mode_width(const struct drm_display_mode *mode)
-{
- return mode->hdisplay;
-
-}
-EXPORT_SYMBOL(drm_mode_width);
-
-/**
- * drm_mode_height - get the height of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's height (vdisplay) value.
- *
- * FIXME: is this needed?
- *
- * RETURNS:
- * @mode->vdisplay
- */
-int drm_mode_height(const struct drm_display_mode *mode)
-{
- return mode->vdisplay;
-}
-EXPORT_SYMBOL(drm_mode_height);
-
/** drm_mode_hsync - get the hsync of a mode
* @mode: mode
*
- * LOCKING:
- * None.
- *
- * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ * Returns:
+ * @modes's hsync rate in kHz, rounded to the nearest integer. Calculates the
+ * value first if it is not yet set.
*/
int drm_mode_hsync(const struct drm_display_mode *mode)
{
@@ -666,17 +697,9 @@ EXPORT_SYMBOL(drm_mode_hsync);
* drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode
*
- * LOCKING:
- * None.
- *
- * Return @mode's vrefresh rate in Hz or calculate it if necessary.
- *
- * FIXME: why is this needed? shouldn't vrefresh be set already?
- *
- * RETURNS:
- * Vertical refresh rate. It will be the result of actual value plus 0.5.
- * If it is 70.288, it will return 70Hz.
- * If it is 59.6, it will return 60Hz.
+ * Returns:
+ * @modes's vrefresh rate in Hz, rounded to the nearest integer. Calculates the
+ * value first if it is not yet set.
*/
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
@@ -705,14 +728,11 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
EXPORT_SYMBOL(drm_mode_vrefresh);
/**
- * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * drm_mode_set_crtcinfo - set CRTC modesetting timing parameters
* @p: mode
* @adjust_flags: a combination of adjustment flags
*
- * LOCKING:
- * None.
- *
- * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ * Setup the CRTC modesetting timing parameters for @p, adjusting if necessary.
*
* - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
* interlaced modes.
@@ -780,15 +800,11 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
}
EXPORT_SYMBOL(drm_mode_set_crtcinfo);
-
/**
* drm_mode_copy - copy the mode
* @dst: mode to overwrite
* @src: mode to copy
*
- * LOCKING:
- * None.
- *
* Copy an existing mode into another mode, preserving the object id and
* list head of the destination mode.
*/
@@ -805,13 +821,14 @@ EXPORT_SYMBOL(drm_mode_copy);
/**
* drm_mode_duplicate - allocate and duplicate an existing mode
- * @m: mode to duplicate
- *
- * LOCKING:
- * None.
+ * @dev: drm_device to allocate the duplicated mode for
+ * @mode: mode to duplicate
*
* Just allocate a new mode, copy the existing mode into it, and return
* a pointer to it. Used to create new instances of established modes.
+ *
+ * Returns:
+ * Pointer to duplicated mode on success, NULL on error.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode)
@@ -833,12 +850,9 @@ EXPORT_SYMBOL(drm_mode_duplicate);
* @mode1: first mode
* @mode2: second mode
*
- * LOCKING:
- * None.
- *
* Check to see if @mode1 and @mode2 are equivalent.
*
- * RETURNS:
+ * Returns:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
@@ -864,13 +878,10 @@ EXPORT_SYMBOL(drm_mode_equal);
* @mode1: first mode
* @mode2: second mode
*
- * LOCKING:
- * None.
- *
* Check to see if @mode1 and @mode2 are equivalent, but
* don't check the pixel clocks nor the stereo layout.
*
- * RETURNS:
+ * Returns:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
@@ -900,25 +911,19 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
* @mode_list: list of modes to check
* @maxX: maximum width
* @maxY: maximum height
- * @maxPitch: max pitch
*
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * The DRM device (@dev) has size and pitch limits. Here we validate the
- * modes we probed for @dev against those limits and set their status as
- * necessary.
+ * This function is a helper which can be used to validate modes against size
+ * limitations of the DRM device/connector. If a mode is too big its status
+ * memeber is updated with the appropriate validation failure code. The list
+ * itself is not changed.
*/
void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
- int maxX, int maxY, int maxPitch)
+ int maxX, int maxY)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, mode_list, head) {
- if (maxPitch > 0 && mode->hdisplay > maxPitch)
- mode->status = MODE_BAD_WIDTH;
-
if (maxX > 0 && mode->hdisplay > maxX)
mode->status = MODE_VIRTUAL_X;
@@ -934,12 +939,10 @@ EXPORT_SYMBOL(drm_mode_validate_size);
* @mode_list: list of modes to check
* @verbose: be verbose about it
*
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Once mode list generation is complete, a caller can use this routine to
- * remove invalid modes from a mode list. If any of the modes have a
- * status other than %MODE_OK, they are removed from @mode_list and freed.
+ * This helper function can be used to prune a display mode list after
+ * validation has been completed. All modes who's status is not MODE_OK will be
+ * removed from the list, and if @verbose the status code and mode name is also
+ * printed to dmesg.
*/
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose)
@@ -966,13 +969,10 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
* @lh_a: list_head for first mode
* @lh_b: list_head for second mode
*
- * LOCKING:
- * None.
- *
* Compare two modes, given by @lh_a and @lh_b, returning a value indicating
* which is better.
*
- * RETURNS:
+ * Returns:
* Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
* positive if @lh_b is better than @lh_a.
*/
@@ -1000,12 +1000,9 @@ static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head
/**
* drm_mode_sort - sort mode list
- * @mode_list: list to sort
+ * @mode_list: list of drm_display_mode structures to sort
*
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Sort @mode_list by favorability, putting good modes first.
+ * Sort @mode_list by favorability, moving good modes to the head of the list.
*/
void drm_mode_sort(struct list_head *mode_list)
{
@@ -1017,13 +1014,12 @@ EXPORT_SYMBOL(drm_mode_sort);
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
*
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
- * list and only adds different modes. All modes unverified after this point
- * will be removed by the prune invalid modes.
+ * list and only adds different/new modes.
+ *
+ * This is just a helper functions doesn't validate any modes itself and also
+ * doesn't prune any invalid modes. Callers need to do that themselves.
*/
void drm_mode_connector_list_update(struct drm_connector *connector)
{
@@ -1031,6 +1027,8 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
struct drm_display_mode *pmode, *pt;
int found_it;
+ WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
+
list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
head) {
found_it = 0;
@@ -1056,17 +1054,25 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
EXPORT_SYMBOL(drm_mode_connector_list_update);
/**
- * drm_mode_parse_command_line_for_connector - parse command line for connector
- * @mode_option - per connector mode option
- * @connector - connector to parse line for
+ * drm_mode_parse_command_line_for_connector - parse command line modeline for connector
+ * @mode_option: optional per connector mode option
+ * @connector: connector to parse modeline for
+ * @mode: preallocated drm_cmdline_mode structure to fill out
+ *
+ * This parses @mode_option command line modeline for modes and options to
+ * configure the connector. If @mode_option is NULL the default command line
+ * modeline in fb_mode_option will be parsed instead.
*
- * This parses the connector specific then generic command lines for
- * modes and options to configure the connector.
+ * This uses the same parameters as the fb modedb.c, except for an extra
+ * force-enable, force-enable-digital and force-disable bit at the end:
*
- * This uses the same parameters as the fb modedb.c, except for extra
* <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
*
- * enable/enable Digital/disable bit at the end
+ * The intermediate drm_cmdline_mode structure is required to store additional
+ * options from the command line modline like the force-enabel/disable flag.
+ *
+ * Returns:
+ * True if a valid modeline has been parsed, false otherwise.
*/
bool drm_mode_parse_command_line_for_connector(const char *mode_option,
struct drm_connector *connector,
@@ -1219,6 +1225,14 @@ done:
}
EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
+/**
+ * drm_mode_create_from_cmdline_mode - convert a command line modeline into a DRM display mode
+ * @dev: DRM device to create the new mode for
+ * @cmd: input command line modeline
+ *
+ * Returns:
+ * Pointer to converted mode on success, NULL on error.
+ */
struct drm_display_mode *
drm_mode_create_from_cmdline_mode(struct drm_device *dev,
struct drm_cmdline_mode *cmd)
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 56805c39c906..f1437b6c8dbf 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -68,7 +68,8 @@ struct drm_prime_attachment {
enum dma_data_direction dir;
};
-static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf, uint32_t handle)
{
struct drm_prime_member *member;
@@ -174,7 +175,7 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
}
static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
struct drm_prime_attachment *prime_attach = attach->priv;
struct drm_gem_object *obj = attach->dmabuf->priv;
@@ -211,11 +212,19 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
}
static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sgt, enum dma_data_direction dir)
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
{
/* nothing to be done here */
}
+/**
+ * drm_gem_dmabuf_release - dma_buf release implementation for GEM
+ * @dma_buf: buffer to be released
+ *
+ * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
+ * must use this in their dma_buf ops structure as the release callback.
+ */
void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
@@ -242,30 +251,30 @@ static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
}
static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
+ unsigned long page_num)
{
return NULL;
}
static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
+ unsigned long page_num, void *addr)
{
}
static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
- unsigned long page_num)
+ unsigned long page_num)
{
return NULL;
}
static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
+ unsigned long page_num, void *addr)
{
}
static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
- struct vm_area_struct *vma)
+ struct vm_area_struct *vma)
{
struct drm_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->dev;
@@ -315,6 +324,15 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
* driver's scatter/gather table
*/
+/**
+ * drm_gem_prime_export - helper library implemention of the export callback
+ * @dev: drm_device to export from
+ * @obj: GEM object to export
+ * @flags: flags like DRM_CLOEXEC
+ *
+ * This is the implementation of the gem_prime_export functions for GEM drivers
+ * using the PRIME helpers.
+ */
struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags)
{
@@ -355,9 +373,23 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
return dmabuf;
}
+/**
+ * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
+ * @dev: dev to export the buffer from
+ * @file_priv: drm file-private structure
+ * @handle: buffer handle to export
+ * @flags: flags like DRM_CLOEXEC
+ * @prime_fd: pointer to storage for the fd id of the create dma-buf
+ *
+ * This is the PRIME export function which must be used mandatorily by GEM
+ * drivers to ensure correct lifetime management of the underlying GEM object.
+ * The actual exporting from GEM object to a dma-buf is done through the
+ * gem_prime_export driver callback.
+ */
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
- struct drm_file *file_priv, uint32_t handle, uint32_t flags,
- int *prime_fd)
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags,
+ int *prime_fd)
{
struct drm_gem_object *obj;
int ret = 0;
@@ -441,6 +473,14 @@ out_unlock:
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+/**
+ * drm_gem_prime_import - helper library implemention of the import callback
+ * @dev: drm_device to import into
+ * @dma_buf: dma-buf object to import
+ *
+ * This is the implementation of the gem_prime_import functions for GEM drivers
+ * using the PRIME helpers.
+ */
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
@@ -496,8 +536,21 @@ fail_detach:
}
EXPORT_SYMBOL(drm_gem_prime_import);
+/**
+ * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
+ * @dev: dev to export the buffer from
+ * @file_priv: drm file-private structure
+ * @prime_fd: fd id of the dma-buf which should be imported
+ * @handle: pointer to storage for the handle of the imported buffer object
+ *
+ * This is the PRIME import function which must be used mandatorily by GEM
+ * drivers to ensure correct lifetime management of the underlying GEM object.
+ * The actual importing of GEM object from the dma-buf is done through the
+ * gem_import_export driver callback.
+ */
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd, uint32_t *handle)
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle)
{
struct dma_buf *dma_buf;
struct drm_gem_object *obj;
@@ -598,12 +651,14 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
args->fd, &args->handle);
}
-/*
- * drm_prime_pages_to_sg
+/**
+ * drm_prime_pages_to_sg - converts a page array into an sg list
+ * @pages: pointer to the array of page pointers to convert
+ * @nr_pages: length of the page vector
*
- * this helper creates an sg table object from a set of pages
+ * This helper creates an sg table object from a set of pages
* the driver is responsible for mapping the pages into the
- * importers address space
+ * importers address space for use with dma_buf itself.
*/
struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
{
@@ -628,9 +683,16 @@ out:
}
EXPORT_SYMBOL(drm_prime_pages_to_sg);
-/* export an sg table into an array of pages and addresses
- this is currently required by the TTM driver in order to do correct fault
- handling */
+/**
+ * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
+ * @sgt: scatter-gather table to convert
+ * @pages: array of page pointers to store the page array in
+ * @addrs: optional array to store the dma bus address of each page
+ * @max_pages: size of both the passed-in arrays
+ *
+ * Exports an sg table into an array of pages and addresses. This is currently
+ * required by the TTM driver in order to do correct fault handling.
+ */
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_pages)
{
@@ -663,7 +725,15 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
return 0;
}
EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
-/* helper function to cleanup a GEM/prime object */
+
+/**
+ * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
+ * @obj: GEM object which was created from a dma-buf
+ * @sg: the sg-table which was pinned at import time
+ *
+ * This is the cleanup functions which GEM drivers need to call when they use
+ * @drm_gem_prime_import to import dma-bufs.
+ */
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
{
struct dma_buf_attachment *attach;
@@ -683,11 +753,9 @@ void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
INIT_LIST_HEAD(&prime_fpriv->head);
mutex_init(&prime_fpriv->lock);
}
-EXPORT_SYMBOL(drm_prime_init_file_private);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
{
/* by now drm_gem_release should've made sure the list is empty */
WARN_ON(!list_empty(&prime_fpriv->head));
}
-EXPORT_SYMBOL(drm_prime_destroy_file_private);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index f33902ff2c22..b1445b73465b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,58 +3,69 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
-i915-y := i915_drv.o i915_dma.o i915_irq.o \
- i915_gpu_error.o \
+
+# Please keep these build lists sorted!
+
+# core driver code
+i915-y := i915_drv.o \
+ i915_params.o \
i915_suspend.o \
- i915_gem.o \
+ i915_sysfs.o \
+ intel_pm.o
+i915-$(CONFIG_COMPAT) += i915_ioc32.o
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+
+# GEM code
+i915-y += i915_cmd_parser.o \
i915_gem_context.o \
i915_gem_debug.o \
+ i915_gem_dmabuf.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
+ i915_gem.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
- i915_params.o \
- i915_sysfs.o \
+ i915_gpu_error.o \
+ i915_irq.o \
i915_trace_points.o \
- i915_ums.o \
+ intel_ringbuffer.o \
+ intel_uncore.o
+
+# modesetting core code
+i915-y += intel_bios.o \
intel_display.o \
- intel_crt.o \
- intel_lvds.o \
- intel_dsi.o \
- intel_dsi_cmd.o \
- intel_dsi_pll.o \
- intel_bios.o \
- intel_ddi.o \
- intel_dp.o \
- intel_hdmi.o \
- intel_sdvo.o \
intel_modes.o \
- intel_panel.o \
- intel_pm.o \
- intel_i2c.o \
- intel_tv.o \
- intel_dvo.o \
- intel_ringbuffer.o \
intel_overlay.o \
- intel_sprite.o \
intel_sideband.o \
- intel_uncore.o \
+ intel_sprite.o
+i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
+i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+
+# modesetting output/encoder code
+i915-y += dvo_ch7017.o \
dvo_ch7xxx.o \
- dvo_ch7017.o \
dvo_ivch.o \
- dvo_tfp410.o \
- dvo_sil164.o \
dvo_ns2501.o \
- i915_gem_dmabuf.o
-
-i915-$(CONFIG_COMPAT) += i915_ioc32.o
-
-i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
-
-i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+ dvo_sil164.o \
+ dvo_tfp410.o \
+ intel_crt.o \
+ intel_ddi.o \
+ intel_dp.o \
+ intel_dsi_cmd.o \
+ intel_dsi.o \
+ intel_dsi_pll.o \
+ intel_dvo.o \
+ intel_hdmi.o \
+ intel_i2c.o \
+ intel_lvds.o \
+ intel_panel.o \
+ intel_sdvo.o \
+ intel_tv.o
-i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+# legacy horrors
+i915-y += i915_dma.o \
+ i915_ums.o
obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
new file mode 100644
index 000000000000..7a5756e9bb86
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Brad Volkin <bradley.d.volkin@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+
+/**
+ * DOC: i915 batch buffer command parser
+ *
+ * Motivation:
+ * Certain OpenGL features (e.g. transform feedback, performance monitoring)
+ * require userspace code to submit batches containing commands such as
+ * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
+ * generations of the hardware will noop these commands in "unsecure" batches
+ * (which includes all userspace batches submitted via i915) even though the
+ * commands may be safe and represent the intended programming model of the
+ * device.
+ *
+ * The software command parser is similar in operation to the command parsing
+ * done in hardware for unsecure batches. However, the software parser allows
+ * some operations that would be noop'd by hardware, if the parser determines
+ * the operation is safe, and submits the batch as "secure" to prevent hardware
+ * parsing.
+ *
+ * Threats:
+ * At a high level, the hardware (and software) checks attempt to prevent
+ * granting userspace undue privileges. There are three categories of privilege.
+ *
+ * First, commands which are explicitly defined as privileged or which should
+ * only be used by the kernel driver. The parser generally rejects such
+ * commands, though it may allow some from the drm master process.
+ *
+ * Second, commands which access registers. To support correct/enhanced
+ * userspace functionality, particularly certain OpenGL extensions, the parser
+ * provides a whitelist of registers which userspace may safely access (for both
+ * normal and drm master processes).
+ *
+ * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
+ * The parser always rejects such commands.
+ *
+ * The majority of the problematic commands fall in the MI_* range, with only a
+ * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
+ *
+ * Implementation:
+ * Each ring maintains tables of commands and registers which the parser uses in
+ * scanning batch buffers submitted to that ring.
+ *
+ * Since the set of commands that the parser must check for is significantly
+ * smaller than the number of commands supported, the parser tables contain only
+ * those commands required by the parser. This generally works because command
+ * opcode ranges have standard command length encodings. So for commands that
+ * the parser does not need to check, it can easily skip them. This is
+ * implementated via a per-ring length decoding vfunc.
+ *
+ * Unfortunately, there are a number of commands that do not follow the standard
+ * length encoding for their opcode range, primarily amongst the MI_* commands.
+ * To handle this, the parser provides a way to define explicit "skip" entries
+ * in the per-ring command tables.
+ *
+ * Other command table entries map fairly directly to high level categories
+ * mentioned above: rejected, master-only, register whitelist. The parser
+ * implements a number of checks, including the privileged memory checks, via a
+ * general bitmasking mechanism.
+ */
+
+static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 subclient =
+ (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT)
+ return 0x3F;
+ else if (client == INSTR_RC_CLIENT) {
+ if (subclient == INSTR_MEDIA_SUBCLIENT)
+ return 0xFFFF;
+ else
+ return 0xFF;
+ }
+
+ DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
+static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 subclient =
+ (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT)
+ return 0x3F;
+ else if (client == INSTR_RC_CLIENT) {
+ if (subclient == INSTR_MEDIA_SUBCLIENT)
+ return 0xFFF;
+ else
+ return 0xFF;
+ }
+
+ DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
+static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT)
+ return 0x3F;
+ else if (client == INSTR_BC_CLIENT)
+ return 0xFF;
+
+ DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
+static void validate_cmds_sorted(struct intel_ring_buffer *ring)
+{
+ int i;
+
+ if (!ring->cmd_tables || ring->cmd_table_count == 0)
+ return;
+
+ for (i = 0; i < ring->cmd_table_count; i++) {
+ const struct drm_i915_cmd_table *table = &ring->cmd_tables[i];
+ u32 previous = 0;
+ int j;
+
+ for (j = 0; j < table->count; j++) {
+ const struct drm_i915_cmd_descriptor *desc =
+ &table->table[i];
+ u32 curr = desc->cmd.value & desc->cmd.mask;
+
+ if (curr < previous)
+ DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
+ ring->id, i, j, curr, previous);
+
+ previous = curr;
+ }
+ }
+}
+
+static void check_sorted(int ring_id, const u32 *reg_table, int reg_count)
+{
+ int i;
+ u32 previous = 0;
+
+ for (i = 0; i < reg_count; i++) {
+ u32 curr = reg_table[i];
+
+ if (curr < previous)
+ DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
+ ring_id, i, curr, previous);
+
+ previous = curr;
+ }
+}
+
+static void validate_regs_sorted(struct intel_ring_buffer *ring)
+{
+ check_sorted(ring->id, ring->reg_table, ring->reg_count);
+ check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count);
+}
+
+/**
+ * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
+ * @ring: the ringbuffer to initialize
+ *
+ * Optionally initializes fields related to batch buffer command parsing in the
+ * struct intel_ring_buffer based on whether the platform requires software
+ * command parsing.
+ */
+void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
+{
+ if (!IS_GEN7(ring->dev))
+ return;
+
+ switch (ring->id) {
+ case RCS:
+ ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+ break;
+ case VCS:
+ ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+ break;
+ case BCS:
+ ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+ break;
+ case VECS:
+ /* VECS can use the same length_mask function as VCS */
+ ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+ break;
+ default:
+ DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
+ ring->id);
+ BUG();
+ }
+
+ validate_cmds_sorted(ring);
+ validate_regs_sorted(ring);
+}
+
+static const struct drm_i915_cmd_descriptor*
+find_cmd_in_table(const struct drm_i915_cmd_table *table,
+ u32 cmd_header)
+{
+ int i;
+
+ for (i = 0; i < table->count; i++) {
+ const struct drm_i915_cmd_descriptor *desc = &table->table[i];
+ u32 masked_cmd = desc->cmd.mask & cmd_header;
+ u32 masked_value = desc->cmd.value & desc->cmd.mask;
+
+ if (masked_cmd == masked_value)
+ return desc;
+ }
+
+ return NULL;
+}
+
+/*
+ * Returns a pointer to a descriptor for the command specified by cmd_header.
+ *
+ * The caller must supply space for a default descriptor via the default_desc
+ * parameter. If no descriptor for the specified command exists in the ring's
+ * command parser tables, this function fills in default_desc based on the
+ * ring's default length encoding and returns default_desc.
+ */
+static const struct drm_i915_cmd_descriptor*
+find_cmd(struct intel_ring_buffer *ring,
+ u32 cmd_header,
+ struct drm_i915_cmd_descriptor *default_desc)
+{
+ u32 mask;
+ int i;
+
+ for (i = 0; i < ring->cmd_table_count; i++) {
+ const struct drm_i915_cmd_descriptor *desc;
+
+ desc = find_cmd_in_table(&ring->cmd_tables[i], cmd_header);
+ if (desc)
+ return desc;
+ }
+
+ mask = ring->get_cmd_length_mask(cmd_header);
+ if (!mask)
+ return NULL;
+
+ BUG_ON(!default_desc);
+ default_desc->flags = CMD_DESC_SKIP;
+ default_desc->length.mask = mask;
+
+ return default_desc;
+}
+
+static bool valid_reg(const u32 *table, int count, u32 addr)
+{
+ if (table && count != 0) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (table[i] == addr)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static u32 *vmap_batch(struct drm_i915_gem_object *obj)
+{
+ int i;
+ void *addr = NULL;
+ struct sg_page_iter sg_iter;
+ struct page **pages;
+
+ pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+ if (pages == NULL) {
+ DRM_DEBUG_DRIVER("Failed to get space for pages\n");
+ goto finish;
+ }
+
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ pages[i] = sg_page_iter_page(&sg_iter);
+ i++;
+ }
+
+ addr = vmap(pages, i, 0, PAGE_KERNEL);
+ if (addr == NULL) {
+ DRM_DEBUG_DRIVER("Failed to vmap pages\n");
+ goto finish;
+ }
+
+finish:
+ if (pages)
+ drm_free_large(pages);
+ return (u32*)addr;
+}
+
+/**
+ * i915_needs_cmd_parser() - should a given ring use software command parsing?
+ * @ring: the ring in question
+ *
+ * Only certain platforms require software batch buffer command parsing, and
+ * only when enabled via module paramter.
+ *
+ * Return: true if the ring requires software command parsing
+ */
+bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
+{
+ /* No command tables indicates a platform without parsing */
+ if (!ring->cmd_tables)
+ return false;
+
+ return (i915.enable_cmd_parser == 1);
+}
+
+#define LENGTH_BIAS 2
+
+/**
+ * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
+ * @ring: the ring on which the batch is to execute
+ * @batch_obj: the batch buffer in question
+ * @batch_start_offset: byte offset in the batch at which execution starts
+ * @is_master: is the submitting process the drm master?
+ *
+ * Parses the specified batch buffer looking for privilege violations as
+ * described in the overview.
+ *
+ * Return: non-zero if the parser finds violations or otherwise fails
+ */
+int i915_parse_cmds(struct intel_ring_buffer *ring,
+ struct drm_i915_gem_object *batch_obj,
+ u32 batch_start_offset,
+ bool is_master)
+{
+ int ret = 0;
+ u32 *cmd, *batch_base, *batch_end;
+ struct drm_i915_cmd_descriptor default_desc = { 0 };
+ int needs_clflush = 0;
+
+ ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
+ return ret;
+ }
+
+ batch_base = vmap_batch(batch_obj);
+ if (!batch_base) {
+ DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
+ i915_gem_object_unpin_pages(batch_obj);
+ return -ENOMEM;
+ }
+
+ if (needs_clflush)
+ drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
+
+ cmd = batch_base + (batch_start_offset / sizeof(*cmd));
+ batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
+
+ while (cmd < batch_end) {
+ const struct drm_i915_cmd_descriptor *desc;
+ u32 length;
+
+ if (*cmd == MI_BATCH_BUFFER_END)
+ break;
+
+ desc = find_cmd(ring, *cmd, &default_desc);
+ if (!desc) {
+ DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
+ *cmd);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (desc->flags & CMD_DESC_FIXED)
+ length = desc->length.fixed;
+ else
+ length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
+
+ if ((batch_end - cmd) < length) {
+ DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%ld\n",
+ *cmd,
+ length,
+ batch_end - cmd);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (desc->flags & CMD_DESC_REJECT) {
+ DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
+ ret = -EINVAL;
+ break;
+ }
+
+ if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
+ DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
+ *cmd);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (desc->flags & CMD_DESC_REGISTER) {
+ u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
+
+ if (!valid_reg(ring->reg_table,
+ ring->reg_count, reg_addr)) {
+ if (!is_master ||
+ !valid_reg(ring->master_reg_table,
+ ring->master_reg_count,
+ reg_addr)) {
+ DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
+ reg_addr,
+ *cmd,
+ ring->id);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ }
+
+ if (desc->flags & CMD_DESC_BITMASK) {
+ int i;
+
+ for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
+ u32 dword;
+
+ if (desc->bits[i].mask == 0)
+ break;
+
+ dword = cmd[desc->bits[i].offset] &
+ desc->bits[i].mask;
+
+ if (dword != desc->bits[i].expected) {
+ DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
+ *cmd,
+ desc->bits[i].mask,
+ desc->bits[i].expected,
+ dword, ring->id);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret)
+ break;
+ }
+
+ cmd += length;
+ }
+
+ if (cmd >= batch_end) {
+ DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+ ret = -EINVAL;
+ }
+
+ vunmap(batch_base);
+
+ i915_gem_object_unpin_pages(batch_obj);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d90a70744d93..a90d31c78643 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -602,7 +602,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
if (INTEL_INFO(dev)->gen >= 8) {
- int i;
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
@@ -615,16 +614,16 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
i, I915_READ(GEN8_GT_IER(i)));
}
- for_each_pipe(i) {
+ for_each_pipe(pipe) {
seq_printf(m, "Pipe %c IMR:\t%08x\n",
- pipe_name(i),
- I915_READ(GEN8_DE_PIPE_IMR(i)));
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IMR(pipe)));
seq_printf(m, "Pipe %c IIR:\t%08x\n",
- pipe_name(i),
- I915_READ(GEN8_DE_PIPE_IIR(i)));
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IIR(pipe)));
seq_printf(m, "Pipe %c IER:\t%08x\n",
- pipe_name(i),
- I915_READ(GEN8_DE_PIPE_IER(i)));
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IER(pipe)));
}
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -1348,6 +1347,8 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
return 0;
}
+ intel_runtime_pm_get(dev_priv);
+
if (intel_fbc_enabled(dev)) {
seq_puts(m, "FBC enabled\n");
} else {
@@ -1391,6 +1392,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
}
seq_putc(m, '\n');
}
+
+ intel_runtime_pm_put(dev_priv);
+
return 0;
}
@@ -1405,11 +1409,15 @@ static int i915_ips_status(struct seq_file *m, void *unused)
return 0;
}
+ intel_runtime_pm_get(dev_priv);
+
if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
seq_puts(m, "enabled\n");
else
seq_puts(m, "disabled\n");
+ intel_runtime_pm_put(dev_priv);
+
return 0;
}
@@ -1420,6 +1428,8 @@ static int i915_sr_status(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
bool sr_enabled = false;
+ intel_runtime_pm_get(dev_priv);
+
if (HAS_PCH_SPLIT(dev))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
@@ -1429,6 +1439,8 @@ static int i915_sr_status(struct seq_file *m, void *unused)
else if (IS_PINEVIEW(dev))
sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+ intel_runtime_pm_put(dev_priv);
+
seq_printf(m, "self-refresh: %s\n",
sr_enabled ? "enabled" : "disabled");
@@ -1468,7 +1480,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret = 0;
int gpu_freq, ia_freq;
if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
@@ -1476,12 +1488,13 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
+ intel_runtime_pm_get(dev_priv);
+
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
+ goto out;
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
@@ -1498,10 +1511,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
((ia_freq >> 8) & 0xff) * 100);
}
- intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->rps.hw_lock);
- return 0;
+out:
+ intel_runtime_pm_put(dev_priv);
+ return ret;
}
static int i915_gfxec(struct seq_file *m, void *unused)
@@ -1757,7 +1771,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
return;
seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
- seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages);
+ seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
for_each_ring(ring, dev_priv, unused) {
seq_printf(m, "%s\n", ring->name);
for (i = 0; i < 4; i++) {
@@ -1972,12 +1986,16 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
+ intel_runtime_pm_get(dev_priv);
+
rdmsrl(MSR_RAPL_POWER_UNIT, power);
power = (power & 0x1f00) >> 8;
units = 1000000 / (1 << power); /* convert to uJ */
power = I915_READ(MCH_SECP_NRG_STTS);
power *= units;
+ intel_runtime_pm_put(dev_priv);
+
seq_printf(m, "%llu", (long long unsigned)power);
return 0;
@@ -1997,7 +2015,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
mutex_lock(&dev_priv->pc8.lock);
seq_printf(m, "Requirements met: %s\n",
yesno(dev_priv->pc8.requirements_met));
- seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
+ seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
seq_printf(m, "IRQs disabled: %s\n",
yesno(dev_priv->pc8.irqs_disabled));
@@ -2030,6 +2048,28 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "TRANSCODER_C";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
+ case POWER_DOMAIN_PORT_DDI_A_2_LANES:
+ return "PORT_DDI_A_2_LANES";
+ case POWER_DOMAIN_PORT_DDI_A_4_LANES:
+ return "PORT_DDI_A_4_LANES";
+ case POWER_DOMAIN_PORT_DDI_B_2_LANES:
+ return "PORT_DDI_B_2_LANES";
+ case POWER_DOMAIN_PORT_DDI_B_4_LANES:
+ return "PORT_DDI_B_4_LANES";
+ case POWER_DOMAIN_PORT_DDI_C_2_LANES:
+ return "PORT_DDI_C_2_LANES";
+ case POWER_DOMAIN_PORT_DDI_C_4_LANES:
+ return "PORT_DDI_C_4_LANES";
+ case POWER_DOMAIN_PORT_DDI_D_2_LANES:
+ return "PORT_DDI_D_2_LANES";
+ case POWER_DOMAIN_PORT_DDI_D_4_LANES:
+ return "PORT_DDI_D_4_LANES";
+ case POWER_DOMAIN_PORT_DSI:
+ return "PORT_DSI";
+ case POWER_DOMAIN_PORT_CRT:
+ return "PORT_CRT";
+ case POWER_DOMAIN_PORT_OTHER:
+ return "PORT_OTHER";
case POWER_DOMAIN_VGA:
return "VGA";
case POWER_DOMAIN_AUDIO:
@@ -2180,6 +2220,7 @@ static void intel_connector_info(struct seq_file *m,
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_encoder *intel_encoder = intel_connector->encoder;
+ struct drm_display_mode *mode;
seq_printf(m, "connector %d: type %s, status: %s\n",
connector->base.id, drm_get_connector_name(connector),
@@ -2202,6 +2243,9 @@ static void intel_connector_info(struct seq_file *m,
else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
intel_lvds_info(m, intel_connector);
+ seq_printf(m, "\tmodes:\n");
+ list_for_each_entry(mode, &connector->modes, head)
+ intel_seq_print_mode(m, 2, mode);
}
static int i915_display_info(struct seq_file *m, void *unused)
@@ -3167,9 +3211,8 @@ i915_wedged_set(void *data, u64 val)
{
struct drm_device *dev = data;
- DRM_INFO("Manually setting wedged to %llu\n", val);
- i915_handle_error(dev, val);
-
+ i915_handle_error(dev, val,
+ "Manually setting wedged to %llu", val);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 7688abc83fc0..e4d2b9f15ae2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1321,12 +1321,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_vga_switcheroo;
+ intel_power_domains_init_hw(dev_priv);
+
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem_stolen;
- intel_power_domains_init_hw(dev);
-
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
@@ -1343,7 +1343,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = true;
if (INTEL_INFO(dev)->num_pipes == 0) {
- intel_display_power_put(dev, POWER_DOMAIN_VGA);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
return 0;
}
@@ -1381,7 +1381,7 @@ cleanup_gem:
WARN_ON(dev_priv->mm.aliasing_ppgtt);
drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_power:
- intel_display_power_put(dev, POWER_DOMAIN_VGA);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
drm_irq_uninstall(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
@@ -1480,12 +1480,16 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_device_info *info;
+ enum pipe pipe;
info = (struct intel_device_info *)&dev_priv->info;
- info->num_sprites = 1;
if (IS_VALLEYVIEW(dev))
- info->num_sprites = 2;
+ for_each_pipe(pipe)
+ info->num_sprites[pipe] = 2;
+ else
+ for_each_pipe(pipe)
+ info->num_sprites[pipe] = 1;
if (i915.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
@@ -1702,7 +1706,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gem_unload;
}
- intel_power_domains_init(dev);
+ intel_power_domains_init(dev_priv);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
@@ -1731,7 +1735,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
out_power_well:
- intel_power_domains_remove(dev);
+ intel_power_domains_remove(dev_priv);
drm_vblank_cleanup(dev);
out_gem_unload:
if (dev_priv->mm.inactive_shrinker.scan_objects)
@@ -1781,8 +1785,8 @@ int i915_driver_unload(struct drm_device *dev)
/* The i915.ko module is still not prepared to be loaded when
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
- intel_display_set_init_power(dev, true);
- intel_power_domains_remove(dev);
+ intel_display_set_init_power(dev_priv, true);
+ intel_power_domains_remove(dev_priv);
i915_teardown_sysfs(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2d05d7ce4c29..658fe24961eb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -265,6 +265,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
};
@@ -274,6 +275,7 @@ static const struct intel_device_info intel_broadwell_m_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
};
@@ -324,7 +326,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct pci_dev *pch;
+ struct pci_dev *pch = NULL;
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
* (which really amounts to a PCH but no South Display).
@@ -345,12 +347,9 @@ void intel_detect_pch(struct drm_device *dev)
* all the ISA bridge devices and check for the first match, instead
* of only checking the first one.
*/
- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
- while (pch) {
- struct pci_dev *curr = pch;
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
- unsigned short id;
- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+ unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
@@ -382,18 +381,16 @@ void intel_detect_pch(struct drm_device *dev)
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_ULT(dev));
- } else {
- goto check_next;
- }
- pci_dev_put(pch);
+ } else
+ continue;
+
break;
}
-check_next:
- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
- pci_dev_put(curr);
}
if (!pch)
- DRM_DEBUG_KMS("No PCH found?\n");
+ DRM_DEBUG_KMS("No PCH found.\n");
+
+ pci_dev_put(pch);
}
bool i915_semaphore_is_enabled(struct drm_device *dev)
@@ -401,15 +398,13 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
return false;
- /* Until we get further testing... */
- if (IS_GEN8(dev)) {
- WARN_ON(!i915.preliminary_hw_support);
- return false;
- }
-
if (i915.semaphores >= 0)
return i915.semaphores;
+ /* Until we get further testing... */
+ if (IS_GEN8(dev))
+ return false;
+
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
@@ -434,7 +429,7 @@ static int i915_drm_freeze(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
hsw_disable_package_c8(dev_priv);
- intel_display_set_init_power(dev, true);
+ intel_display_set_init_power(dev_priv, true);
drm_kms_helper_poll_disable(dev);
@@ -477,6 +472,8 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
console_unlock();
+ dev_priv->suspend_count++;
+
return 0;
}
@@ -556,7 +553,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
mutex_unlock(&dev->struct_mutex);
}
- intel_power_domains_init_hw(dev);
+ intel_power_domains_init_hw(dev_priv);
i915_restore_state(dev);
intel_opregion_setup(dev);
@@ -847,6 +844,7 @@ static int i915_runtime_suspend(struct device *device)
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!HAS_RUNTIME_PM(dev));
+ assert_force_wake_inactive(dev_priv);
DRM_DEBUG_KMS("Suspending device\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 05cfcc163a72..2a319ba91a71 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,7 +79,7 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites + (s) + 'A')
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
enum port {
PORT_A = 0,
@@ -114,6 +114,17 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_PORT_DDI_A_2_LANES,
+ POWER_DOMAIN_PORT_DDI_A_4_LANES,
+ POWER_DOMAIN_PORT_DDI_B_2_LANES,
+ POWER_DOMAIN_PORT_DDI_B_4_LANES,
+ POWER_DOMAIN_PORT_DDI_C_2_LANES,
+ POWER_DOMAIN_PORT_DDI_C_4_LANES,
+ POWER_DOMAIN_PORT_DDI_D_2_LANES,
+ POWER_DOMAIN_PORT_DDI_D_4_LANES,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_PORT_OTHER,
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
POWER_DOMAIN_INIT,
@@ -121,8 +132,6 @@ enum intel_display_power_domain {
POWER_DOMAIN_NUM,
};
-#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
-
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
@@ -130,14 +139,6 @@ enum intel_display_power_domain {
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
(tran) + POWER_DOMAIN_TRANSCODER_A)
-#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_EDP))
-#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
- BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-
enum hpd_pin {
HPD_NONE = 0,
HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
@@ -159,6 +160,7 @@ enum hpd_pin {
I915_GEM_DOMAIN_VERTEX)
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
+#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
@@ -303,6 +305,10 @@ struct drm_i915_error_state {
struct kref ref;
struct timeval time;
+ char error_msg[128];
+ u32 reset_count;
+ u32 suspend_count;
+
/* Generic register state */
u32 eir;
u32 pgtbl_er;
@@ -360,7 +366,7 @@ struct drm_i915_error_state {
int page_count;
u32 gtt_offset;
u32 *pages[0];
- } *ringbuffer, *batchbuffer, *ctx, *hws_page;
+ } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
struct drm_i915_error_request {
long jiffies;
@@ -375,6 +381,9 @@ struct drm_i915_error_state {
u32 pp_dir_base;
};
} vm_info;
+
+ pid_t pid;
+ char comm[TASK_COMM_LEN];
} ring[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
@@ -499,7 +508,7 @@ struct intel_uncore {
unsigned fw_rendercount;
unsigned fw_mediacount;
- struct delayed_work force_wake_work;
+ struct timer_list force_wake_timer;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -534,7 +543,7 @@ struct intel_uncore {
struct intel_device_info {
u32 display_mmio_offset;
u8 num_pipes:3;
- u8 num_sprites:2;
+ u8 num_sprites[I915_MAX_PIPES];
u8 gen;
u8 ring_mask; /* Rings supported by the HW */
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
@@ -652,12 +661,12 @@ struct i915_address_space {
enum i915_cache_level level,
bool valid); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm,
- unsigned int first_entry,
- unsigned int num_entries,
+ uint64_t start,
+ uint64_t length,
bool use_scratch);
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
- unsigned int first_entry,
+ uint64_t start,
enum i915_cache_level cache_level);
void (*cleanup)(struct i915_address_space *vm);
};
@@ -691,21 +700,21 @@ struct i915_gtt {
};
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
+#define GEN8_LEGACY_PDPS 4
struct i915_hw_ppgtt {
struct i915_address_space base;
struct kref ref;
struct drm_mm_node node;
unsigned num_pd_entries;
+ unsigned num_pd_pages; /* gen8+ */
union {
struct page **pt_pages;
- struct page *gen8_pt_pages;
+ struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
};
struct page *pd_pages;
- int num_pd_pages;
- int num_pt_pages;
union {
uint32_t pd_offset;
- dma_addr_t pd_dma_addr[4];
+ dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
};
union {
dma_addr_t *pt_dma_addr;
@@ -1016,6 +1025,36 @@ struct intel_ilk_power_mgmt {
struct drm_i915_gem_object *renderctx;
};
+struct drm_i915_private;
+struct i915_power_well;
+
+struct i915_power_well_ops {
+ /*
+ * Synchronize the well's hw state to match the current sw state, for
+ * example enable/disable it based on the current refcount. Called
+ * during driver init and resume time, possibly after first calling
+ * the enable/disable handlers.
+ */
+ void (*sync_hw)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /*
+ * Enable the well and resources that depend on it (for example
+ * interrupts located on the well). Called after the 0->1 refcount
+ * transition.
+ */
+ void (*enable)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /*
+ * Disable the well and resources that depend on it. Called after
+ * the 1->0 refcount transition.
+ */
+ void (*disable)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /* Returns the hw enabled state. */
+ bool (*is_enabled)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+};
+
/* Power well structure for haswell */
struct i915_power_well {
const char *name;
@@ -1023,11 +1062,8 @@ struct i915_power_well {
/* power well enable/disable usage count */
int count;
unsigned long domains;
- void *data;
- void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
- bool enable);
- bool (*is_enabled)(struct drm_device *dev,
- struct i915_power_well *power_well);
+ unsigned long data;
+ const struct i915_power_well_ops *ops;
};
struct i915_power_domains {
@@ -1124,6 +1160,14 @@ struct i915_gem_mm {
*/
bool interruptible;
+ /**
+ * Is the GPU currently considered idle, or busy executing userspace
+ * requests? Whilst idle, we attempt to power down the hardware and
+ * display clocks. In order to reduce the effect on performance, there
+ * is a slight delay before we do so.
+ */
+ bool busy;
+
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
@@ -1313,11 +1357,10 @@ struct ilk_wm_values {
* Ideally every piece of our code that needs PC8+ disabled would call
* hsw_disable_package_c8, which would increment disable_count and prevent the
* system from reaching PC8+. But we don't have a symmetric way to do this for
- * everything, so we have the requirements_met and gpu_idle variables. When we
- * switch requirements_met or gpu_idle to true we decrease disable_count, and
- * increase it in the opposite case. The requirements_met variable is true when
- * all the CRTCs, encoders and the power well are disabled. The gpu_idle
- * variable is true when the GPU is idle.
+ * everything, so we have the requirements_met variable. When we switch
+ * requirements_met to true we decrease disable_count, and increase it in the
+ * opposite case. The requirements_met variable is true when all the CRTCs,
+ * encoders and the power well are disabled.
*
* In addition to everything, we only actually enable PC8+ if disable_count
* stays at zero for at least some seconds. This is implemented with the
@@ -1340,7 +1383,6 @@ struct ilk_wm_values {
*/
struct i915_package_c8 {
bool requirements_met;
- bool gpu_idle;
bool irqs_disabled;
/* Only true after the delayed work task actually enables it. */
bool enabled;
@@ -1427,6 +1469,8 @@ typedef struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+ bool display_irqs_enabled;
+
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
struct pm_qos_request pm_qos;
@@ -1594,6 +1638,8 @@ typedef struct drm_i915_private {
struct i915_dri1_state dri1;
/* Old ums support infrastructure, same warning applies. */
struct i915_ums_state ums;
+
+ u32 suspend_count;
} drm_i915_private_t;
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
@@ -1745,7 +1791,6 @@ struct drm_i915_gem_object {
/** for phy allocated objects */
struct drm_i915_gem_phys_object *phys_obj;
};
-#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -1791,6 +1836,7 @@ struct drm_i915_gem_request {
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
+ struct drm_file *file;
struct {
spinlock_t lock;
@@ -1803,6 +1849,90 @@ struct drm_i915_file_private {
atomic_t rps_wait_boost;
};
+/*
+ * A command that requires special handling by the command parser.
+ */
+struct drm_i915_cmd_descriptor {
+ /*
+ * Flags describing how the command parser processes the command.
+ *
+ * CMD_DESC_FIXED: The command has a fixed length if this is set,
+ * a length mask if not set
+ * CMD_DESC_SKIP: The command is allowed but does not follow the
+ * standard length encoding for the opcode range in
+ * which it falls
+ * CMD_DESC_REJECT: The command is never allowed
+ * CMD_DESC_REGISTER: The command should be checked against the
+ * register whitelist for the appropriate ring
+ * CMD_DESC_MASTER: The command is allowed if the submitting process
+ * is the DRM master
+ */
+ u32 flags;
+#define CMD_DESC_FIXED (1<<0)
+#define CMD_DESC_SKIP (1<<1)
+#define CMD_DESC_REJECT (1<<2)
+#define CMD_DESC_REGISTER (1<<3)
+#define CMD_DESC_BITMASK (1<<4)
+#define CMD_DESC_MASTER (1<<5)
+
+ /*
+ * The command's unique identification bits and the bitmask to get them.
+ * This isn't strictly the opcode field as defined in the spec and may
+ * also include type, subtype, and/or subop fields.
+ */
+ struct {
+ u32 value;
+ u32 mask;
+ } cmd;
+
+ /*
+ * The command's length. The command is either fixed length (i.e. does
+ * not include a length field) or has a length field mask. The flag
+ * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
+ * a length mask. All command entries in a command table must include
+ * length information.
+ */
+ union {
+ u32 fixed;
+ u32 mask;
+ } length;
+
+ /*
+ * Describes where to find a register address in the command to check
+ * against the ring's register whitelist. Only valid if flags has the
+ * CMD_DESC_REGISTER bit set.
+ */
+ struct {
+ u32 offset;
+ u32 mask;
+ } reg;
+
+#define MAX_CMD_DESC_BITMASKS 3
+ /*
+ * Describes command checks where a particular dword is masked and
+ * compared against an expected value. If the command does not match
+ * the expected value, the parser rejects it. Only valid if flags has
+ * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
+ * are valid.
+ */
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 expected;
+ } bits[MAX_CMD_DESC_BITMASKS];
+};
+
+/*
+ * A table of commands requiring special handling by the command parser.
+ *
+ * Each ring has an array of tables. Each table consists of an array of command
+ * descriptors, which must be sorted with command opcodes in ascending order.
+ */
+struct drm_i915_cmd_table {
+ const struct drm_i915_cmd_descriptor *table;
+ int count;
+};
+
#define INTEL_INFO(dev) (&to_i915(dev)->info)
#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
@@ -1965,6 +2095,7 @@ struct i915_params {
int enable_pc8;
int pc8_timeout;
int invert_brightness;
+ int enable_cmd_parser;
/* leave bools at the end to not create holes */
bool enable_hangcheck;
bool fastboot;
@@ -2004,7 +2135,9 @@ extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
-void i915_handle_error(struct drm_device *dev, bool wedged);
+__printf(3, 4)
+void i915_handle_error(struct drm_device *dev, bool wedged,
+ const char *fmt, ...);
void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
int new_delay);
@@ -2025,6 +2158,9 @@ void
i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe,
u32 status_mask);
+void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
+void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
+
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -2097,6 +2233,9 @@ void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+ int *needs_clflush);
+
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
@@ -2163,8 +2302,10 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
}
}
+struct drm_i915_gem_request *
+i915_gem_find_active_request(struct intel_ring_buffer *ring);
+
bool i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
@@ -2365,63 +2506,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
intel_gtt_chipset_flush();
}
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
-static inline bool intel_enable_ppgtt(struct drm_device *dev, bool full)
-{
- if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
- return false;
-
- if (i915.enable_ppgtt == 1 && full)
- return false;
-
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
- DRM_INFO("Disabling PPGTT because VT-d is on\n");
- return false;
- }
-#endif
-
- if (full)
- return HAS_PPGTT(dev);
- else
- return HAS_ALIASING_PPGTT(dev);
-}
-
-static inline void ppgtt_release(struct kref *kref)
-{
- struct i915_hw_ppgtt *ppgtt = container_of(kref, struct i915_hw_ppgtt, ref);
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &ppgtt->base;
-
- if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
- (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
- ppgtt->base.cleanup(&ppgtt->base);
- return;
- }
-
- /*
- * Make sure vmas are unbound before we take down the drm_mm
- *
- * FIXME: Proper refcounting should take care of this, this shouldn't be
- * needed at all.
- */
- if (!list_empty(&vm->active_list)) {
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &vm->active_list, mm_list)
- if (WARN_ON(list_empty(&vma->vma_link) ||
- list_is_singular(&vma->vma_link)))
- break;
-
- i915_gem_evict_vm(&ppgtt->base, true);
- } else {
- i915_gem_retire_requests(dev);
- i915_gem_evict_vm(&ppgtt->base, false);
- }
-
- ppgtt->base.cleanup(&ppgtt->base);
-}
+bool intel_enable_ppgtt(struct drm_device *dev, bool full);
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
@@ -2478,7 +2563,8 @@ static inline void i915_error_state_buf_release(
{
kfree(eb->buf);
}
-void i915_capture_error_state(struct drm_device *dev);
+void i915_capture_error_state(struct drm_device *dev, bool wedge,
+ const char *error_msg);
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
@@ -2487,6 +2573,14 @@ void i915_destroy_error_state(struct drm_device *dev);
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(int type);
+/* i915_cmd_parser.c */
+void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
+bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
+int i915_parse_cmds(struct intel_ring_buffer *ring,
+ struct drm_i915_gem_object *batch_obj,
+ u32 batch_start_offset,
+ bool is_master);
+
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
@@ -2565,6 +2659,7 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
extern void i915_redisable_vga(struct drm_device *dev);
+extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -2599,6 +2694,7 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
+void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 928e15bb2cf7..9c52f68df66c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -61,6 +61,7 @@ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
@@ -326,6 +327,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
return 0;
}
+/*
+ * Pins the specified object's pages and synchronizes the object with
+ * GPU accesses. Sets needs_clflush to non-zero if the caller should
+ * flush the object from the CPU cache.
+ */
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+ int *needs_clflush)
+{
+ int ret;
+
+ *needs_clflush = 0;
+
+ if (!obj->base.filp)
+ return -EINVAL;
+
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+ /* If we're not in the cpu read domain, set ourself into the gtt
+ * read domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will dirty the data
+ * anyway again before the next pread happens. */
+ *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
+ obj->cache_level);
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
+ return ret;
+}
+
/* Per-page copy function for the shmem pread fastpath.
* Flushes invalid cachelines before reading the target if
* needs_clflush is set. */
@@ -423,23 +460,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
- /* If we're not in the cpu read domain, set ourself into the gtt
- * read domain and manually flush cachelines (if required). This
- * optimizes for the case when the gpu will dirty the data
- * anyway again before the next pread happens. */
- needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
- }
-
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
if (ret)
return ret;
- i915_gem_object_pin_pages(obj);
-
offset = args->offset;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
@@ -2149,7 +2173,6 @@ int __i915_add_request(struct intel_ring_buffer *ring,
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
u32 request_ring_position, request_start;
- int was_empty;
int ret;
request_start = intel_ring_get_tail(ring);
@@ -2200,7 +2223,6 @@ int __i915_add_request(struct intel_ring_buffer *ring,
i915_gem_context_reference(request->ctx);
request->emitted_jiffies = jiffies;
- was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
request->file_priv = NULL;
@@ -2221,13 +2243,11 @@ int __i915_add_request(struct intel_ring_buffer *ring,
if (!dev_priv->ums.mm_suspended) {
i915_queue_hangcheck(ring->dev);
- if (was_empty) {
- cancel_delayed_work_sync(&dev_priv->mm.idle_work);
- queue_delayed_work(dev_priv->wq,
- &dev_priv->mm.retire_work,
- round_jiffies_up_relative(HZ));
- intel_mark_busy(dev_priv->dev);
- }
+ cancel_delayed_work_sync(&dev_priv->mm.idle_work);
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
+ intel_mark_busy(dev_priv->dev);
}
if (out_seqno)
@@ -2260,14 +2280,13 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
return true;
if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
- if (dev_priv->gpu_error.stop_rings == 0 &&
- i915_gem_context_is_default(ctx)) {
- DRM_ERROR("gpu hanging too fast, banning!\n");
- } else {
+ if (!i915_gem_context_is_default(ctx)) {
DRM_DEBUG("context hanging too fast, banning!\n");
+ return true;
+ } else if (dev_priv->gpu_error.stop_rings == 0) {
+ DRM_ERROR("gpu hanging too fast, banning!\n");
+ return true;
}
-
- return true;
}
return false;
@@ -2304,11 +2323,13 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
kfree(request);
}
-static struct drm_i915_gem_request *
-i915_gem_find_first_non_complete(struct intel_ring_buffer *ring)
+struct drm_i915_gem_request *
+i915_gem_find_active_request(struct intel_ring_buffer *ring)
{
struct drm_i915_gem_request *request;
- const u32 completed_seqno = ring->get_seqno(ring, false);
+ u32 completed_seqno;
+
+ completed_seqno = ring->get_seqno(ring, false);
list_for_each_entry(request, &ring->request_list, list) {
if (i915_seqno_passed(completed_seqno, request->seqno))
@@ -2326,7 +2347,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
struct drm_i915_gem_request *request;
bool ring_hung;
- request = i915_gem_find_first_non_complete(ring);
+ request = i915_gem_find_active_request(ring);
if (request == NULL)
return;
@@ -2418,7 +2439,7 @@ void i915_gem_reset(struct drm_device *dev)
/**
* This function clears the request list as sequence numbers are passed.
*/
-void
+static void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
@@ -2745,7 +2766,7 @@ int i915_vma_unbind(struct i915_vma *vma)
i915_gem_gtt_finish_object(obj);
- list_del(&vma->mm_list);
+ list_del_init(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
if (i915_is_ggtt(vma->vm))
obj->map_and_fenceable = true;
@@ -4861,6 +4882,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = file_priv;
file_priv->dev_priv = dev->dev_private;
+ file_priv->file = file;
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f8c21a6dd663..ce41cff84346 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -99,6 +99,50 @@
static int do_switch(struct intel_ring_buffer *ring,
struct i915_hw_context *to);
+static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &ppgtt->base;
+
+ if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
+ (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
+ ppgtt->base.cleanup(&ppgtt->base);
+ return;
+ }
+
+ /*
+ * Make sure vmas are unbound before we take down the drm_mm
+ *
+ * FIXME: Proper refcounting should take care of this, this shouldn't be
+ * needed at all.
+ */
+ if (!list_empty(&vm->active_list)) {
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &vm->active_list, mm_list)
+ if (WARN_ON(list_empty(&vma->vma_link) ||
+ list_is_singular(&vma->vma_link)))
+ break;
+
+ i915_gem_evict_vm(&ppgtt->base, true);
+ } else {
+ i915_gem_retire_requests(dev);
+ i915_gem_evict_vm(&ppgtt->base, false);
+ }
+
+ ppgtt->base.cleanup(&ppgtt->base);
+}
+
+static void ppgtt_release(struct kref *kref)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(kref, struct i915_hw_ppgtt, ref);
+
+ do_ppgtt_cleanup(ppgtt);
+ kfree(ppgtt);
+}
+
static size_t get_context_alignment(struct drm_device *dev)
{
if (IS_GEN6(dev))
@@ -714,7 +758,7 @@ unpin_out:
* i915_switch_context() - perform a GPU context switch.
* @ring: ring for which we'll execute the context switch
* @file_priv: file_priv associated with the context, may be NULL
- * @id: context id number
+ * @to: the context to switch to
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -746,9 +790,6 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct i915_hw_context *ctx;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
if (!HAS_HW_CONTEXTS(dev))
return -ENODEV;
@@ -775,9 +816,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct i915_hw_context *ctx;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
if (args->ctx_id == DEFAULT_CONTEXT_ID)
return -ENOENT;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d7229ad2bd22..3851a1b1dc88 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1182,6 +1182,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+ if (i915_needs_cmd_parser(ring)) {
+ ret = i915_parse_cmds(ring,
+ batch_obj,
+ args->batch_start_offset,
+ file->is_master);
+ if (ret)
+ goto err;
+
+ /*
+ * XXX: Actually do this when enabling batch copy...
+ *
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
+ * from MI_BATCH_BUFFER_START commands issued in the
+ * dispatch_execbuffer implementations. We specifically don't
+ * want that set when the command parser is enabled.
+ */
+ }
+
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9673cffb07f3..63a6dc7a6bb6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1,5 +1,6 @@
/*
* Copyright © 2010 Daniel Vetter
+ * Copyright © 2011-2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -29,6 +30,29 @@
#include "i915_trace.h"
#include "intel_drv.h"
+bool intel_enable_ppgtt(struct drm_device *dev, bool full)
+{
+ if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+ return false;
+
+ if (i915.enable_ppgtt == 1 && full)
+ return false;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+ DRM_INFO("Disabling PPGTT because VT-d is on\n");
+ return false;
+ }
+#endif
+
+ /* Full ppgtt disabled by default for now due to issues. */
+ if (full)
+ return false; /* HAS_PPGTT(dev) */
+ else
+ return HAS_ALIASING_PPGTT(dev);
+}
+
#define GEN6_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
typedef uint64_t gen8_gtt_pte_t;
@@ -64,7 +88,19 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
-#define GEN8_LEGACY_PDPS 4
+
+/* GEN8 legacy style addressis defined as a 3 level page table:
+ * 31:30 | 29:21 | 20:12 | 11:0
+ * PDPE | PDE | PTE | offset
+ * The difference as compared to normal x86 3 level page table is the PDPEs are
+ * programmed via register.
+ */
+#define GEN8_PDPE_SHIFT 30
+#define GEN8_PDPE_MASK 0x3
+#define GEN8_PDE_SHIFT 21
+#define GEN8_PDE_MASK 0x1ff
+#define GEN8_PTE_SHIFT 12
+#define GEN8_PTE_MASK 0x1ff
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
@@ -254,84 +290,113 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
- unsigned first_entry,
- unsigned num_entries,
+ uint64_t start,
+ uint64_t length,
bool use_scratch)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_gtt_pte_t *pt_vaddr, scratch_pte;
- unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
- unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
+ unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
+ unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
+ unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
+ unsigned num_entries = length >> PAGE_SHIFT;
unsigned last_pte, i;
scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
I915_CACHE_LLC, use_scratch);
while (num_entries) {
- struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
+ struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
- last_pte = first_pte + num_entries;
+ last_pte = pte + num_entries;
if (last_pte > GEN8_PTES_PER_PAGE)
last_pte = GEN8_PTES_PER_PAGE;
pt_vaddr = kmap_atomic(page_table);
- for (i = first_pte; i < last_pte; i++)
+ for (i = pte; i < last_pte; i++) {
pt_vaddr[i] = scratch_pte;
+ num_entries--;
+ }
kunmap_atomic(pt_vaddr);
- num_entries -= last_pte - first_pte;
- first_pte = 0;
- act_pt++;
+ pte = 0;
+ if (++pde == GEN8_PDES_PER_PAGE) {
+ pdpe++;
+ pde = 0;
+ }
}
}
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
- unsigned first_entry,
+ uint64_t start,
enum i915_cache_level cache_level)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_gtt_pte_t *pt_vaddr;
- unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
- unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
+ unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
+ unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
+ unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
struct sg_page_iter sg_iter;
pt_vaddr = NULL;
+
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+ if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
+ break;
+
if (pt_vaddr == NULL)
- pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+ pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
- pt_vaddr[act_pte] =
+ pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
- if (++act_pte == GEN8_PTES_PER_PAGE) {
+ if (++pte == GEN8_PTES_PER_PAGE) {
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
- act_pt++;
- act_pte = 0;
+ if (++pde == GEN8_PDES_PER_PAGE) {
+ pdpe++;
+ pde = 0;
+ }
+ pte = 0;
}
}
if (pt_vaddr)
kunmap_atomic(pt_vaddr);
}
-static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
+static void gen8_free_page_tables(struct page **pt_pages)
{
int i;
- for (i = 0; i < ppgtt->num_pd_pages ; i++)
+ if (pt_pages == NULL)
+ return;
+
+ for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
+ if (pt_pages[i])
+ __free_pages(pt_pages[i], 0);
+}
+
+static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
+ for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
+ kfree(ppgtt->gen8_pt_pages[i]);
kfree(ppgtt->gen8_pt_dma_addr[i]);
+ }
- __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
__free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
}
static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
{
+ struct pci_dev *hwdev = ppgtt->base.dev->pdev;
int i, j;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
@@ -340,18 +405,14 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
if (!ppgtt->pd_dma_addr[i])
continue;
- pci_unmap_page(ppgtt->base.dev->pdev,
- ppgtt->pd_dma_addr[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
if (addr)
- pci_unmap_page(ppgtt->base.dev->pdev,
- addr,
- PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
-
+ pci_unmap_page(hwdev, addr, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
}
}
}
@@ -368,88 +429,198 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
gen8_ppgtt_free(ppgtt);
}
-/**
- * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
- * net effect resembling a 2-level page table in normal x86 terms. Each PDP
- * represents 1GB of memory
- * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
- *
- * TODO: Do something with the size parameter
- **/
-static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
+static struct page **__gen8_alloc_page_tables(void)
{
- struct page *pt_pages;
- int i, j, ret = -ENOMEM;
- const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
- const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+ struct page **pt_pages;
+ int i;
- if (size % (1<<30))
- DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
+ pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
+ if (!pt_pages)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
+ pt_pages[i] = alloc_page(GFP_KERNEL);
+ if (!pt_pages[i])
+ goto bail;
+ }
- /* FIXME: split allocation into smaller pieces. For now we only ever do
- * this once, but with full PPGTT, the multiple contiguous allocations
- * will be bad.
+ return pt_pages;
+
+bail:
+ gen8_free_page_tables(pt_pages);
+ kfree(pt_pages);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
+ const int max_pdp)
+{
+ struct page **pt_pages[GEN8_LEGACY_PDPS];
+ int i, ret;
+
+ for (i = 0; i < max_pdp; i++) {
+ pt_pages[i] = __gen8_alloc_page_tables();
+ if (IS_ERR(pt_pages[i])) {
+ ret = PTR_ERR(pt_pages[i]);
+ goto unwind_out;
+ }
+ }
+
+ /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
+ * "atomic" - for cleanup purposes.
*/
+ for (i = 0; i < max_pdp; i++)
+ ppgtt->gen8_pt_pages[i] = pt_pages[i];
+
+ return 0;
+
+unwind_out:
+ while (i--) {
+ gen8_free_page_tables(pt_pages[i]);
+ kfree(pt_pages[i]);
+ }
+
+ return ret;
+}
+
+static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
+ for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
+ sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!ppgtt->gen8_pt_dma_addr[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
+ const int max_pdp)
+{
ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
if (!ppgtt->pd_pages)
return -ENOMEM;
- pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
- if (!pt_pages) {
+ ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
+ BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+
+ return 0;
+}
+
+static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
+ const int max_pdp)
+{
+ int ret;
+
+ ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
+ if (ret)
+ return ret;
+
+ ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
+ if (ret) {
__free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
- return -ENOMEM;
+ return ret;
}
- ppgtt->gen8_pt_pages = pt_pages;
- ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
- ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
- ppgtt->enable = gen8_ppgtt_enable;
- ppgtt->switch_mm = gen8_mm_switch;
- ppgtt->base.clear_range = gen8_ppgtt_clear_range;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
- ppgtt->base.cleanup = gen8_ppgtt_cleanup;
- ppgtt->base.start = 0;
- ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
- BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+ ret = gen8_ppgtt_allocate_dma(ppgtt);
+ if (ret)
+ gen8_ppgtt_free(ppgtt);
- /*
- * - Create a mapping for the page directories.
- * - For each page directory:
- * allocate space for page table mappings.
- * map each page table
- */
- for (i = 0; i < max_pdp; i++) {
- dma_addr_t temp;
- temp = pci_map_page(ppgtt->base.dev->pdev,
- &ppgtt->pd_pages[i], 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
- goto err_out;
+ return ret;
+}
- ppgtt->pd_dma_addr[i] = temp;
+static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
+ const int pd)
+{
+ dma_addr_t pd_addr;
+ int ret;
- ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
- if (!ppgtt->gen8_pt_dma_addr[i])
- goto err_out;
+ pd_addr = pci_map_page(ppgtt->base.dev->pdev,
+ &ppgtt->pd_pages[pd], 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
- struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
- temp = pci_map_page(ppgtt->base.dev->pdev,
- p, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
+ if (ret)
+ return ret;
+
+ ppgtt->pd_dma_addr[pd] = pd_addr;
+
+ return 0;
+}
- if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
- goto err_out;
+static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
+ const int pd,
+ const int pt)
+{
+ dma_addr_t pt_addr;
+ struct page *p;
+ int ret;
- ppgtt->gen8_pt_dma_addr[i][j] = temp;
+ p = ppgtt->gen8_pt_pages[pd][pt];
+ pt_addr = pci_map_page(ppgtt->base.dev->pdev,
+ p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
+ if (ret)
+ return ret;
+
+ ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
+
+ return 0;
+}
+
+/**
+ * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
+ * with a net effect resembling a 2-level page table in normal x86 terms. Each
+ * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
+ * space.
+ *
+ * FIXME: split allocation into smaller pieces. For now we only ever do this
+ * once, but with full PPGTT, the multiple contiguous allocations will be bad.
+ * TODO: Do something with the size parameter
+ */
+static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
+{
+ const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
+ const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+ int i, j, ret;
+
+ if (size % (1<<30))
+ DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
+
+ /* 1. Do all our allocations for page directories and page tables. */
+ ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
+ if (ret)
+ return ret;
+
+ /*
+ * 2. Create DMA mappings for the page directories and page tables.
+ */
+ for (i = 0; i < max_pdp; i++) {
+ ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
+ if (ret)
+ goto bail;
+
+ for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
+ if (ret)
+ goto bail;
}
}
- /* For now, the PPGTT helper functions all require that the PDEs are
+ /*
+ * 3. Map all the page directory entires to point to the page tables
+ * we've allocated.
+ *
+ * For now, the PPGTT helper functions all require that the PDEs are
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
- * will never need to touch the PDEs again */
+ * will never need to touch the PDEs again.
+ */
for (i = 0; i < max_pdp; i++) {
gen8_ppgtt_pde_t *pd_vaddr;
pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
@@ -461,20 +632,26 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
kunmap_atomic(pd_vaddr);
}
- ppgtt->base.clear_range(&ppgtt->base, 0,
- ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
- true);
+ ppgtt->enable = gen8_ppgtt_enable;
+ ppgtt->switch_mm = gen8_mm_switch;
+ ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+ ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
+ ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+ ppgtt->base.start = 0;
+ ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
+
+ ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
- ppgtt->num_pt_pages,
- (ppgtt->num_pt_pages - num_pt_pages) +
- size % (1<<30));
+ ppgtt->num_pd_entries,
+ (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
return 0;
-err_out:
- ppgtt->base.cleanup(&ppgtt->base);
+bail:
+ gen8_ppgtt_unmap_pages(ppgtt);
+ gen8_ppgtt_free(ppgtt);
return ret;
}
@@ -776,13 +953,15 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
- unsigned first_entry,
- unsigned num_entries,
+ uint64_t start,
+ uint64_t length,
bool use_scratch)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr, scratch_pte;
+ unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned num_entries = length >> PAGE_SHIFT;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
@@ -809,12 +988,13 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
- unsigned first_entry,
+ uint64_t start,
enum i915_cache_level cache_level)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr;
+ unsigned first_entry = start >> PAGE_SHIFT;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
struct sg_page_iter sg_iter;
@@ -838,38 +1018,49 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
kunmap_atomic(pt_vaddr);
}
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
int i;
- list_del(&vm->global_link);
- drm_mm_takedown(&ppgtt->base.mm);
- drm_mm_remove_node(&ppgtt->node);
-
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
pci_unmap_page(ppgtt->base.dev->pdev,
ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
+}
+
+static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
__free_page(ppgtt->pt_pages[i]);
kfree(ppgtt->pt_pages);
- kfree(ppgtt);
}
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+
+ list_del(&vm->global_link);
+ drm_mm_takedown(&ppgtt->base.mm);
+ drm_mm_remove_node(&ppgtt->node);
+
+ gen6_ppgtt_unmap_pages(ppgtt);
+ gen6_ppgtt_free(ppgtt);
+}
+
+static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool retried = false;
- int i, ret;
+ int ret;
/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
* allocator works in address space sizes, so it's multiplied by page
@@ -896,42 +1087,60 @@ alloc:
if (ppgtt->node.start < dev_priv->gtt.mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
- ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
- if (IS_GEN6(dev)) {
- ppgtt->enable = gen6_ppgtt_enable;
- ppgtt->switch_mm = gen6_mm_switch;
- } else if (IS_HASWELL(dev)) {
- ppgtt->enable = gen7_ppgtt_enable;
- ppgtt->switch_mm = hsw_mm_switch;
- } else if (IS_GEN7(dev)) {
- ppgtt->enable = gen7_ppgtt_enable;
- ppgtt->switch_mm = gen7_mm_switch;
- } else
- BUG();
- ppgtt->base.clear_range = gen6_ppgtt_clear_range;
- ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->base.cleanup = gen6_ppgtt_cleanup;
- ppgtt->base.scratch = dev_priv->gtt.base.scratch;
- ppgtt->base.start = 0;
- ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
+ return ret;
+}
+
+static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
- if (!ppgtt->pt_pages) {
- drm_mm_remove_node(&ppgtt->node);
+
+ if (!ppgtt->pt_pages)
return -ENOMEM;
- }
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
- if (!ppgtt->pt_pages[i])
- goto err_pt_alloc;
+ if (!ppgtt->pt_pages[i]) {
+ gen6_ppgtt_free(ppgtt);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
+{
+ int ret;
+
+ ret = gen6_ppgtt_allocate_page_directories(ppgtt);
+ if (ret)
+ return ret;
+
+ ret = gen6_ppgtt_allocate_page_tables(ppgtt);
+ if (ret) {
+ drm_mm_remove_node(&ppgtt->node);
+ return ret;
}
ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
GFP_KERNEL);
- if (!ppgtt->pt_dma_addr)
- goto err_pt_alloc;
+ if (!ppgtt->pt_dma_addr) {
+ drm_mm_remove_node(&ppgtt->node);
+ gen6_ppgtt_free(ppgtt);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ int i;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
@@ -940,41 +1149,63 @@ alloc:
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
- ret = -EIO;
- goto err_pd_pin;
-
+ gen6_ppgtt_unmap_pages(ppgtt);
+ return -EIO;
}
+
ppgtt->pt_dma_addr[i] = pt_addr;
}
- ppgtt->base.clear_range(&ppgtt->base, 0,
- ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
+ return 0;
+}
+
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+ if (IS_GEN6(dev)) {
+ ppgtt->enable = gen6_ppgtt_enable;
+ ppgtt->switch_mm = gen6_mm_switch;
+ } else if (IS_HASWELL(dev)) {
+ ppgtt->enable = gen7_ppgtt_enable;
+ ppgtt->switch_mm = hsw_mm_switch;
+ } else if (IS_GEN7(dev)) {
+ ppgtt->enable = gen7_ppgtt_enable;
+ ppgtt->switch_mm = gen7_mm_switch;
+ } else
+ BUG();
+
+ ret = gen6_ppgtt_alloc(ppgtt);
+ if (ret)
+ return ret;
+
+ ret = gen6_ppgtt_setup_page_tables(ppgtt);
+ if (ret) {
+ gen6_ppgtt_free(ppgtt);
+ return ret;
+ }
+
+ ppgtt->base.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.scratch = dev_priv->gtt.base.scratch;
+ ppgtt->base.start = 0;
+ ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
- DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
- ppgtt->node.size >> 20,
- ppgtt->node.start / PAGE_SIZE);
ppgtt->pd_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
- return 0;
+ ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
-err_pd_pin:
- if (ppgtt->pt_dma_addr) {
- for (i--; i >= 0; i--)
- pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
- 4096, PCI_DMA_BIDIRECTIONAL);
- }
-err_pt_alloc:
- kfree(ppgtt->pt_dma_addr);
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- if (ppgtt->pt_pages[i])
- __free_page(ppgtt->pt_pages[i]);
- }
- kfree(ppgtt->pt_pages);
- drm_mm_remove_node(&ppgtt->node);
+ DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
+ ppgtt->node.size >> 20,
+ ppgtt->node.start / PAGE_SIZE);
- return ret;
+ return 0;
}
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
@@ -1012,20 +1243,17 @@ ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- const unsigned long entry = vma->node.start >> PAGE_SHIFT;
-
WARN_ON(flags);
- vma->vm->insert_entries(vma->vm, vma->obj->pages, entry, cache_level);
+ vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
+ cache_level);
}
static void ppgtt_unbind_vma(struct i915_vma *vma)
{
- const unsigned long entry = vma->node.start >> PAGE_SHIFT;
-
vma->vm->clear_range(vma->vm,
- entry,
- vma->obj->base.size >> PAGE_SHIFT,
+ vma->node.start,
+ vma->obj->base.size,
true);
}
@@ -1109,8 +1337,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev);
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start / PAGE_SIZE,
- dev_priv->gtt.base.total / PAGE_SIZE,
+ dev_priv->gtt.base.start,
+ dev_priv->gtt.base.total,
false);
}
@@ -1124,8 +1352,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
/* First fill our portion of the GTT with scratch pages */
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start / PAGE_SIZE,
- dev_priv->gtt.base.total / PAGE_SIZE,
+ dev_priv->gtt.base.start,
+ dev_priv->gtt.base.total,
true);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -1186,10 +1414,11 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
- unsigned int first_entry,
+ uint64_t start,
enum i915_cache_level level)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ unsigned first_entry = start >> PAGE_SHIFT;
gen8_gtt_pte_t __iomem *gtt_entries =
(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
@@ -1231,10 +1460,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
- unsigned int first_entry,
+ uint64_t start,
enum i915_cache_level level)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ unsigned first_entry = start >> PAGE_SHIFT;
gen6_gtt_pte_t __iomem *gtt_entries =
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
@@ -1266,11 +1496,13 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
}
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- unsigned int first_entry,
- unsigned int num_entries,
+ uint64_t start,
+ uint64_t length,
bool use_scratch)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned num_entries = length >> PAGE_SHIFT;
gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -1290,11 +1522,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
- unsigned int first_entry,
- unsigned int num_entries,
+ uint64_t start,
+ uint64_t length,
bool use_scratch)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned num_entries = length >> PAGE_SHIFT;
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -1327,10 +1561,12 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
- unsigned int first_entry,
- unsigned int num_entries,
+ uint64_t start,
+ uint64_t length,
bool unused)
{
+ unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned num_entries = length >> PAGE_SHIFT;
intel_gtt_clear_range(first_entry, num_entries);
}
@@ -1351,7 +1587,6 @@ static void ggtt_bind_vma(struct i915_vma *vma,
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
- const unsigned long entry = vma->node.start >> PAGE_SHIFT;
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
* or we have a global mapping already but the cacheability flags have
@@ -1367,7 +1602,8 @@ static void ggtt_bind_vma(struct i915_vma *vma,
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!obj->has_global_gtt_mapping ||
(cache_level != obj->cache_level)) {
- vma->vm->insert_entries(vma->vm, obj->pages, entry,
+ vma->vm->insert_entries(vma->vm, obj->pages,
+ vma->node.start,
cache_level);
obj->has_global_gtt_mapping = 1;
}
@@ -1378,7 +1614,9 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
- vma->obj->pages, entry, cache_level);
+ vma->obj->pages,
+ vma->node.start,
+ cache_level);
vma->obj->has_aliasing_ppgtt_mapping = 1;
}
}
@@ -1388,11 +1626,11 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
- const unsigned long entry = vma->node.start >> PAGE_SHIFT;
if (obj->has_global_gtt_mapping) {
- vma->vm->clear_range(vma->vm, entry,
- vma->obj->base.size >> PAGE_SHIFT,
+ vma->vm->clear_range(vma->vm,
+ vma->node.start,
+ obj->base.size,
true);
obj->has_global_gtt_mapping = 0;
}
@@ -1400,8 +1638,8 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
if (obj->has_aliasing_ppgtt_mapping) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.clear_range(&appgtt->base,
- entry,
- obj->base.size >> PAGE_SHIFT,
+ vma->node.start,
+ obj->base.size,
true);
obj->has_aliasing_ppgtt_mapping = 0;
}
@@ -1486,14 +1724,14 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
- const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
+ ggtt_vm->clear_range(ggtt_vm, hole_start,
+ hole_end - hole_start, true);
}
/* And finally clear the reserved guard page */
- ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
+ ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
}
void i915_gem_init_global_gtt(struct drm_device *dev)
@@ -1558,11 +1796,6 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
- if (bdw_gmch_ctl > 4) {
- WARN_ON(!i915.preliminary_hw_support);
- return 4<<20;
- }
-
return bdw_gmch_ctl << 20;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 1a24e84f2315..d58b4e287e32 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -82,9 +82,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
- DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
- base, base + (uint32_t)dev_priv->gtt.stolen_size);
- base = 0;
+ /*
+ * One more attempt but this time requesting region from
+ * base + 1, as we have seen that this resolves the region
+ * conflict with the PCI Bus.
+ * This is a BIOS w/a: Some BIOS wrap stolen in the root
+ * PCI bus, but have an off-by-one error. Hence retry the
+ * reservation starting from 1 instead of 0.
+ */
+ r = devm_request_mem_region(dev->dev, base + 1,
+ dev_priv->gtt.stolen_size - 1,
+ "Graphics Stolen Memory");
+ if (r == NULL) {
+ DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+ base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base = 0;
+ }
}
return base;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 000b3694f349..144a5e2bc7ef 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -304,22 +304,54 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
+static void print_error_obj(struct drm_i915_error_state_buf *m,
+ struct drm_i915_error_object *obj)
+{
+ int page, offset, elt;
+
+ for (page = offset = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ err_printf(m, "%08x : %08x\n", offset,
+ obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+}
+
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
struct drm_device *dev = error_priv->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error;
- int i, j, page, offset, elt;
+ int i, j, offset, elt;
+ int max_hangcheck_score;
if (!error) {
err_printf(m, "no error state collected\n");
goto out;
}
+ err_printf(m, "%s\n", error->error_msg);
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ max_hangcheck_score = 0;
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ if (error->ring[i].hangcheck_score > max_hangcheck_score)
+ max_hangcheck_score = error->ring[i].hangcheck_score;
+ }
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ if (error->ring[i].hangcheck_score == max_hangcheck_score &&
+ error->ring[i].pid != -1) {
+ err_printf(m, "Active process (on ring %s): %s [%d]\n",
+ ring_str(i),
+ error->ring[i].comm,
+ error->ring[i].pid);
+ }
+ }
+ err_printf(m, "Reset count: %u\n", error->reset_count);
+ err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
@@ -362,18 +394,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
struct drm_i915_error_object *obj;
- if ((obj = error->ring[i].batchbuffer)) {
- err_printf(m, "%s --- gtt_offset = 0x%08x\n",
- dev_priv->ring[i].name,
+ obj = error->ring[i].batchbuffer;
+ if (obj) {
+ err_puts(m, dev_priv->ring[i].name);
+ if (error->ring[i].pid != -1)
+ err_printf(m, " (submitted by %s [%d])",
+ error->ring[i].comm,
+ error->ring[i].pid);
+ err_printf(m, " --- gtt_offset = 0x%08x\n",
obj->gtt_offset);
- offset = 0;
- for (page = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n", offset,
- obj->pages[page][elt]);
- offset += 4;
- }
- }
+ print_error_obj(m, obj);
+ }
+
+ obj = error->ring[i].wa_batchbuffer;
+ if (obj) {
+ err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
+ dev_priv->ring[i].name, obj->gtt_offset);
+ print_error_obj(m, obj);
}
if (error->ring[i].num_requests) {
@@ -392,15 +429,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
- offset = 0;
- for (page = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n",
- offset,
- obj->pages[page][elt]);
- offset += 4;
- }
- }
+ print_error_obj(m, obj);
}
if ((obj = error->ring[i].hws_page)) {
@@ -666,7 +695,8 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
* It's only a small step better than a random number in its current form.
*/
static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error)
+ struct drm_i915_error_state *error,
+ int *ring_id)
{
uint32_t error_code = 0;
int i;
@@ -676,9 +706,14 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
* synchronization commands which almost always appear in the case
* strictly a client bug. Use instdone to differentiate those some.
*/
- for (i = 0; i < I915_NUM_RINGS; i++)
- if (error->ring[i].hangcheck_action == HANGCHECK_HUNG)
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
+ if (ring_id)
+ *ring_id = i;
+
return error->ring[i].ipehr ^ error->ring[i].instdone;
+ }
+ }
return error_code;
}
@@ -716,87 +751,6 @@ static void i915_gem_record_fences(struct drm_device *dev,
}
}
-/* This assumes all batchbuffers are executed from the PPGTT. It might have to
- * change in the future. */
-static bool is_active_vm(struct i915_address_space *vm,
- struct intel_ring_buffer *ring)
-{
- struct drm_device *dev = vm->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt;
-
- if (INTEL_INFO(dev)->gen < 7)
- return i915_is_ggtt(vm);
-
- /* FIXME: This ignores that the global gtt vm is also on this list. */
- ppgtt = container_of(vm, struct i915_hw_ppgtt, base);
-
- if (INTEL_INFO(dev)->gen >= 8) {
- u64 pdp0 = (u64)I915_READ(GEN8_RING_PDP_UDW(ring, 0)) << 32;
- pdp0 |= I915_READ(GEN8_RING_PDP_LDW(ring, 0));
- return pdp0 == ppgtt->pd_dma_addr[0];
- } else {
- u32 pp_db;
- pp_db = I915_READ(RING_PP_DIR_BASE(ring));
- return (pp_db >> 10) == ppgtt->pd_offset;
- }
-}
-
-static struct drm_i915_error_object *
-i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
-{
- struct i915_address_space *vm;
- struct i915_vma *vma;
- struct drm_i915_gem_object *obj;
- bool found_active = false;
- u32 seqno;
-
- if (!ring->get_seqno)
- return NULL;
-
- if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
- u32 acthd = I915_READ(ACTHD);
-
- if (WARN_ON(ring->id != RCS))
- return NULL;
-
- obj = ring->scratch.obj;
- if (obj != NULL &&
- acthd >= i915_gem_obj_ggtt_offset(obj) &&
- acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
- return i915_error_ggtt_object_create(dev_priv, obj);
- }
-
- seqno = ring->get_seqno(ring, false);
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- if (!is_active_vm(vm, ring))
- continue;
-
- found_active = true;
-
- list_for_each_entry(vma, &vm->active_list, mm_list) {
- obj = vma->obj;
- if (obj->ring != ring)
- continue;
-
- if (i915_seqno_passed(seqno, obj->last_read_seqno))
- continue;
-
- if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
- continue;
-
- /* We need to copy these to an anonymous buffer as the simplest
- * method to avoid being overwritten by userspace.
- */
- return i915_error_object_create(dev_priv, obj, vm);
- }
- }
-
- WARN_ON(!found_active);
- return NULL;
-}
-
static void i915_record_ring_state(struct drm_device *dev,
struct intel_ring_buffer *ring,
struct drm_i915_error_ring *ering)
@@ -945,8 +899,39 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_record_ring_state(dev, ring, &error->ring[i]);
- error->ring[i].batchbuffer =
- i915_error_first_batchbuffer(dev_priv, ring);
+ error->ring[i].pid = -1;
+ request = i915_gem_find_active_request(ring);
+ if (request) {
+ /* We need to copy these to an anonymous buffer
+ * as the simplest method to avoid being overwritten
+ * by userspace.
+ */
+ error->ring[i].batchbuffer =
+ i915_error_object_create(dev_priv,
+ request->batch_obj,
+ request->ctx ?
+ request->ctx->vm :
+ &dev_priv->gtt.base);
+
+ if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
+ ring->scratch.obj)
+ error->ring[i].wa_batchbuffer =
+ i915_error_ggtt_object_create(dev_priv,
+ ring->scratch.obj);
+
+ if (request->file_priv) {
+ struct task_struct *task;
+
+ rcu_read_lock();
+ task = pid_task(request->file_priv->file->pid,
+ PIDTYPE_PID);
+ if (task) {
+ strcpy(error->ring[i].comm, task->comm);
+ error->ring[i].pid = task->pid;
+ }
+ rcu_read_unlock();
+ }
+ }
error->ring[i].ringbuffer =
i915_error_ggtt_object_create(dev_priv, ring->obj);
@@ -1113,6 +1098,40 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
i915_get_extra_instdone(dev, error->extra_instdone);
}
+static void i915_error_capture_msg(struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ bool wedged,
+ const char *error_msg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 ecode;
+ int ring_id = -1, len;
+
+ ecode = i915_error_generate_code(dev_priv, error, &ring_id);
+
+ len = scnprintf(error->error_msg, sizeof(error->error_msg),
+ "GPU HANG: ecode %d:0x%08x", ring_id, ecode);
+
+ if (ring_id != -1 && error->ring[ring_id].pid != -1)
+ len += scnprintf(error->error_msg + len,
+ sizeof(error->error_msg) - len,
+ ", in %s [%d]",
+ error->ring[ring_id].comm,
+ error->ring[ring_id].pid);
+
+ scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
+ ", reason: %s, action: %s",
+ error_msg,
+ wedged ? "reset" : "continue");
+}
+
+static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ error->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ error->suspend_count = dev_priv->suspend_count;
+}
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -1122,19 +1141,13 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
* out a structure which becomes available in debugfs for user level tools
* to pick up.
*/
-void i915_capture_error_state(struct drm_device *dev)
+void i915_capture_error_state(struct drm_device *dev, bool wedged,
+ const char *error_msg)
{
static bool warned;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
- uint32_t ecode;
-
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- error = dev_priv->gpu_error.first_error;
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
- if (error)
- return;
/* Account for pipe specific data like PIPE*STAT */
error = kzalloc(sizeof(*error), GFP_ATOMIC);
@@ -1143,30 +1156,22 @@ void i915_capture_error_state(struct drm_device *dev)
return;
}
- DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
- dev->primary->index);
kref_init(&error->ref);
+ i915_capture_gen_state(dev_priv, error);
i915_capture_reg_state(dev_priv, error);
i915_gem_capture_buffers(dev_priv, error);
i915_gem_record_fences(dev, error);
i915_gem_record_rings(dev, error);
- ecode = i915_error_generate_code(dev_priv, error);
-
- if (!warned) {
- DRM_INFO("GPU HANG [%x]\n", ecode);
- DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
- DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
- DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
- DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
- warned = true;
- }
do_gettimeofday(&error->time);
error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
+ i915_error_capture_msg(dev, error, wedged, error_msg);
+ DRM_INFO("%s\n", error->error_msg);
+
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
if (dev_priv->gpu_error.first_error == NULL) {
dev_priv->gpu_error.first_error = error;
@@ -1174,8 +1179,19 @@ void i915_capture_error_state(struct drm_device *dev)
}
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
- if (error)
+ if (error) {
i915_error_state_free(&error->ref);
+ return;
+ }
+
+ if (!warned) {
+ DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+ DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+ DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+ DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
+ DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
+ warned = true;
+ }
}
void i915_error_state_get(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f68aee31e565..be2713f12e08 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -387,16 +387,15 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
*
* Returns the previous state of underrun reporting.
*/
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
bool ret;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ assert_spin_locked(&dev_priv->irq_lock);
ret = !intel_crtc->cpu_fifo_underrun_disabled;
@@ -415,7 +414,20 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
done:
+ return ret;
+}
+
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
return ret;
}
@@ -482,7 +494,7 @@ done:
}
-void
+static void
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask, u32 status_mask)
{
@@ -506,7 +518,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
POSTING_READ(reg);
}
-void
+static void
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask, u32 status_mask)
{
@@ -1296,8 +1308,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
- DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
- i915_handle_error(dev, false);
+ i915_handle_error(dev, false, "GT error interrupt 0x%08x",
+ gt_iir);
}
if (gt_iir & GT_PARITY_ERROR(dev))
@@ -1544,8 +1556,9 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
- DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
- i915_handle_error(dev_priv->dev, false);
+ i915_handle_error(dev_priv->dev, false,
+ "VEBOX CS error interrupt 0x%08x",
+ pm_iir);
}
}
}
@@ -1865,7 +1878,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe i;
+ enum pipe pipe;
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev);
@@ -1876,14 +1889,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(dev);
- for_each_pipe(i) {
- if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
- drm_handle_vblank(dev, i);
+ for_each_pipe(pipe) {
+ if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
+ drm_handle_vblank(dev, pipe);
/* plane/pipes map 1:1 on ilk+ */
- if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
- intel_prepare_page_flip(dev, i);
- intel_finish_page_flip_plane(dev, i);
+ if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
}
}
@@ -2277,11 +2290,18 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-void i915_handle_error(struct drm_device *dev, bool wedged)
+void i915_handle_error(struct drm_device *dev, bool wedged,
+ const char *fmt, ...)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ va_list args;
+ char error_msg[80];
+
+ va_start(args, fmt);
+ vscnprintf(error_msg, sizeof(error_msg), fmt, args);
+ va_end(args);
- i915_capture_error_state(dev);
+ i915_capture_error_state(dev, wedged, error_msg);
i915_report_and_clear_eir(dev);
if (wedged) {
@@ -2584,9 +2604,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
*/
tmp = I915_READ_CTL(ring);
if (tmp & RING_WAIT) {
- DRM_ERROR("Kicking stuck wait on %s\n",
- ring->name);
- i915_handle_error(dev, false);
+ i915_handle_error(dev, false,
+ "Kicking stuck wait on %s",
+ ring->name);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
}
@@ -2596,9 +2616,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
default:
return HANGCHECK_HUNG;
case 1:
- DRM_ERROR("Kicking stuck semaphore on %s\n",
- ring->name);
- i915_handle_error(dev, false);
+ i915_handle_error(dev, false,
+ "Kicking stuck semaphore on %s",
+ ring->name);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
case 0:
@@ -2720,7 +2740,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
}
if (rings_hung)
- return i915_handle_error(dev, true);
+ return i915_handle_error(dev, true, "Ring hung");
if (busy_count)
/* Reset timer case chip hangs without another request
@@ -3016,44 +3036,113 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
return 0;
}
+static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
+{
+ u32 pipestat_mask;
+ u32 iir_mask;
+
+ pipestat_mask = PIPESTAT_INT_STATUS_MASK |
+ PIPE_FIFO_UNDERRUN_STATUS;
+
+ I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
+ I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+ POSTING_READ(PIPESTAT(PIPE_A));
+
+ pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+ PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+ i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
+ PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+
+ iir_mask = I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ dev_priv->irq_mask &= ~iir_mask;
+
+ I915_WRITE(VLV_IIR, iir_mask);
+ I915_WRITE(VLV_IIR, iir_mask);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+ POSTING_READ(VLV_IER);
+}
+
+static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
+{
+ u32 pipestat_mask;
+ u32 iir_mask;
+
+ iir_mask = I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+
+ dev_priv->irq_mask |= iir_mask;
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IIR, iir_mask);
+ I915_WRITE(VLV_IIR, iir_mask);
+ POSTING_READ(VLV_IIR);
+
+ pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+ PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+ i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
+ PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+
+ pipestat_mask = PIPESTAT_INT_STATUS_MASK |
+ PIPE_FIFO_UNDERRUN_STATUS;
+ I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
+ I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+ POSTING_READ(PIPESTAT(PIPE_A));
+}
+
+void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (dev_priv->display_irqs_enabled)
+ return;
+
+ dev_priv->display_irqs_enabled = true;
+
+ if (dev_priv->dev->irq_enabled)
+ valleyview_display_irqs_install(dev_priv);
+}
+
+void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (!dev_priv->display_irqs_enabled)
+ return;
+
+ dev_priv->display_irqs_enabled = false;
+
+ if (dev_priv->dev->irq_enabled)
+ valleyview_display_irqs_uninstall(dev_priv);
+}
+
static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 enable_mask;
- u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
- PIPE_CRC_DONE_INTERRUPT_STATUS;
unsigned long irqflags;
- enable_mask = I915_DISPLAY_PORT_INTERRUPT;
- enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
- /*
- *Leave vblank interrupts masked initially. enable/disable will
- * toggle them based on usage.
- */
- dev_priv->irq_mask = (~enable_mask) |
- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+ dev_priv->irq_mask = ~0;
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- I915_WRITE(VLV_IER, enable_mask);
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(PIPESTAT(0), 0xffff);
- I915_WRITE(PIPESTAT(1), 0xffff);
POSTING_READ(VLV_IER);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
+ if (dev_priv->display_irqs_enabled)
+ valleyview_display_irqs_install(dev_priv);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
I915_WRITE(VLV_IIR, 0xffffffff);
@@ -3184,6 +3273,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
static void valleyview_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
int pipe;
if (!dev_priv)
@@ -3197,8 +3287,14 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xffffffff);
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- for_each_pipe(pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->display_irqs_enabled)
+ valleyview_display_irqs_uninstall(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ dev_priv->irq_mask = 0;
+
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IMR, 0xffffffff);
I915_WRITE(VLV_IER, 0x0);
@@ -3337,7 +3433,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false);
+ i915_handle_error(dev, false,
+ "Command parser error, iir 0x%08x",
+ iir);
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
@@ -3519,7 +3617,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false);
+ i915_handle_error(dev, false,
+ "Command parser error, iir 0x%08x",
+ iir);
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
@@ -3756,7 +3856,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false);
+ i915_handle_error(dev, false,
+ "Command parser error, iir 0x%08x",
+ iir);
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 3b482585c5ae..a66ffb652bee 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -48,6 +48,7 @@ struct i915_params i915 __read_mostly = {
.reset = true,
.invert_brightness = 0,
.disable_display = 0,
+ .enable_cmd_parser = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -157,3 +158,7 @@ MODULE_PARM_DESC(invert_brightness,
module_param_named(disable_display, i915.disable_display, bool, 0600);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
+
+module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
+MODULE_PARM_DESC(enable_cmd_parser,
+ "Enable command parsing (1=enabled, 0=disabled [default])");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2f564ce37d2c..146609ab42bb 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -175,6 +175,18 @@
#define VGA_CR_DATA_CGA 0x3d5
/*
+ * Instruction field definitions used by the command parser
+ */
+#define INSTR_CLIENT_SHIFT 29
+#define INSTR_CLIENT_MASK 0xE0000000
+#define INSTR_MI_CLIENT 0x0
+#define INSTR_BC_CLIENT 0x2
+#define INSTR_RC_CLIENT 0x3
+#define INSTR_SUBCLIENT_SHIFT 27
+#define INSTR_SUBCLIENT_MASK 0x18000000
+#define INSTR_MEDIA_SUBCLIENT 0x2
+
+/*
* Memory interface instructions used by the kernel
*/
#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
@@ -377,14 +389,30 @@
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
#define DSPFREQGUAR_SHIFT 14
#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
+
+/* See the PUNIT HAS v0.8 for the below bits */
+enum punit_power_well {
+ PUNIT_POWER_WELL_RENDER = 0,
+ PUNIT_POWER_WELL_MEDIA = 1,
+ PUNIT_POWER_WELL_DISP2D = 3,
+ PUNIT_POWER_WELL_DPIO_CMN_BC = 5,
+ PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6,
+ PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7,
+ PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8,
+ PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9,
+ PUNIT_POWER_WELL_DPIO_RX0 = 10,
+ PUNIT_POWER_WELL_DPIO_RX1 = 11,
+
+ PUNIT_POWER_WELL_NUM,
+};
+
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
-#define PUNIT_CLK_GATE 1
-#define PUNIT_PWR_RESET 2
-#define PUNIT_PWR_GATE 3
-#define RENDER_PWRGT (PUNIT_PWR_GATE << 0)
-#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2)
-#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
+#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
+#define PUNIT_PWRGT_PWR_ON(power_well) (0 << ((power_well) * 2))
+#define PUNIT_PWRGT_CLK_GATE(power_well) (1 << ((power_well) * 2))
+#define PUNIT_PWRGT_RESET(power_well) (2 << ((power_well) * 2))
+#define PUNIT_PWRGT_PWR_GATE(power_well) (3 << ((power_well) * 2))
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
@@ -798,7 +826,12 @@
# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
#define GEN6_GT_MODE 0x20d0
-#define GEN6_GT_MODE_HI (1 << 9)
+#define GEN7_GT_MODE 0x7008
+#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
+#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
+#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
+#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
+#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GFX_MODE 0x02520
@@ -944,6 +977,9 @@
#define GEN6_BLITTER_LOCK_SHIFT 16
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
+#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
+#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -1121,13 +1157,6 @@
#define FBC_REND_NUKE (1<<2)
#define FBC_REND_CACHE_CLEAN (1<<1)
-#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0
-#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4
-#define HSW_BYPASS_FBC_QUEUE (1<<22)
-#define HSW_PIPE_SLICE_CHICKEN_1(pipe) _PIPE(pipe, + \
- _HSW_PIPE_SLICE_CHICKEN_1_A, + \
- _HSW_PIPE_SLICE_CHICKEN_1_B)
-
/*
* GPIO regs
*/
@@ -4140,7 +4169,8 @@
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
-#define DPRS_MASK_VBLANK_SRD (1 << 0)
+#define HSW_FBCQ_DIS (1 << 22)
+#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define DISP_ARB_CTL 0x45000
@@ -4164,7 +4194,7 @@
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
#define GEN7_L3CNTLREG1 0xB01C
-#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
+#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1<<19)
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
@@ -4898,6 +4928,9 @@
#define GEN7_UCGCTL4 0x940c
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+#define GEN8_UCGCTL6 0x9430
+#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
+
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
@@ -5043,6 +5076,10 @@
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+#define GEN8_ROW_CHICKEN 0xe4f0
+#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
+#define STALL_DOP_GATING_DISABLE (1<<5)
+
#define GEN7_ROW_CHICKEN2 0xe4f4
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 86b95ca413d1..4867f4cc0938 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -599,14 +599,14 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
struct bdb_mipi *mipi;
- mipi = find_section(bdb, BDB_MIPI);
+ mipi = find_section(bdb, BDB_MIPI_CONFIG);
if (!mipi) {
DRM_DEBUG_KMS("No MIPI BDB found");
return;
}
/* XXX: add more info */
- dev_priv->vbt.dsi.panel_id = mipi->panel_id;
+ dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 282de5e9f39d..83b7629e4367 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -104,7 +104,8 @@ struct vbios_data {
#define BDB_LVDS_LFP_DATA 42
#define BDB_LVDS_BACKLIGHT 43
#define BDB_LVDS_POWER 44
-#define BDB_MIPI 50
+#define BDB_MIPI_CONFIG 52
+#define BDB_MIPI_SEQUENCE 53
#define BDB_SKIP 254 /* VBIOS private block, ignore */
struct bdb_general_features {
@@ -711,44 +712,159 @@ int intel_parse_bios(struct drm_device *dev);
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
-/* MIPI DSI panel info */
-struct bdb_mipi {
- u16 panel_id;
- u16 bridge_revision;
-
- /* General params */
- u32 dithering:1;
- u32 bpp_pixel_format:1;
- u32 rsvd1:1;
- u32 dphy_valid:1;
- u32 resvd2:28;
+/* Block 52 contains MIPI Panel info
+ * 6 such enteries will there. Index into correct
+ * entery is based on the panel_index in #40 LFP
+ */
+#define MAX_MIPI_CONFIGURATIONS 6
- u16 port_info;
- u16 rsvd3:2;
- u16 num_lanes:2;
- u16 rsvd4:12;
+#define MIPI_DSI_UNDEFINED_PANEL_ID 0
+#define MIPI_DSI_GENERIC_PANEL_ID 1
- /* DSI config */
- u16 virt_ch_num:2;
- u16 vtm:2;
- u16 rsvd5:12;
+struct mipi_config {
+ u16 panel_id;
- u32 dsi_clock;
+ /* General Params */
+ u32 enable_dithering:1;
+ u32 rsvd1:1;
+ u32 is_bridge:1;
+
+ u32 panel_arch_type:2;
+ u32 is_cmd_mode:1;
+
+#define NON_BURST_SYNC_PULSE 0x1
+#define NON_BURST_SYNC_EVENTS 0x2
+#define BURST_MODE 0x3
+ u32 video_transfer_mode:2;
+
+ u32 cabc_supported:1;
+ u32 pwm_blc:1;
+
+ /* Bit 13:10 */
+#define PIXEL_FORMAT_RGB565 0x1
+#define PIXEL_FORMAT_RGB666 0x2
+#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
+#define PIXEL_FORMAT_RGB888 0x4
+ u32 videomode_color_format:4;
+
+ /* Bit 15:14 */
+#define ENABLE_ROTATION_0 0x0
+#define ENABLE_ROTATION_90 0x1
+#define ENABLE_ROTATION_180 0x2
+#define ENABLE_ROTATION_270 0x3
+ u32 rotation:2;
+ u32 bta_enabled:1;
+ u32 rsvd2:15;
+
+ /* 2 byte Port Description */
+#define DUAL_LINK_NOT_SUPPORTED 0
+#define DUAL_LINK_FRONT_BACK 1
+#define DUAL_LINK_PIXEL_ALT 2
+ u16 dual_link:2;
+ u16 lane_cnt:2;
+ u16 rsvd3:12;
+
+ u16 rsvd4;
+
+ u8 rsvd5[5];
+ u32 dsi_ddr_clk;
u32 bridge_ref_clk;
- u16 rsvd_pwr;
- /* Dphy Params */
- u32 prepare_cnt:5;
- u32 rsvd6:3;
+#define BYTE_CLK_SEL_20MHZ 0
+#define BYTE_CLK_SEL_10MHZ 1
+#define BYTE_CLK_SEL_5MHZ 2
+ u8 byte_clk_sel:2;
+
+ u8 rsvd6:6;
+
+ /* DPHY Flags */
+ u16 dphy_param_valid:1;
+ u16 eot_pkt_disabled:1;
+ u16 enable_clk_stop:1;
+ u16 rsvd7:13;
+
+ u32 hs_tx_timeout;
+ u32 lp_rx_timeout;
+ u32 turn_around_timeout;
+ u32 device_reset_timer;
+ u32 master_init_timer;
+ u32 dbi_bw_timer;
+ u32 lp_byte_clk_val;
+
+ /* 4 byte Dphy Params */
+ u32 prepare_cnt:6;
+ u32 rsvd8:2;
u32 clk_zero_cnt:8;
u32 trail_cnt:5;
- u32 rsvd7:3;
+ u32 rsvd9:3;
u32 exit_zero_cnt:6;
- u32 rsvd8:2;
+ u32 rsvd10:2;
- u32 hl_switch_cnt;
- u32 lp_byte_clk;
u32 clk_lane_switch_cnt;
+ u32 hl_switch_cnt;
+
+ u32 rsvd11[6];
+
+ /* timings based on dphy spec */
+ u8 tclk_miss;
+ u8 tclk_post;
+ u8 rsvd12;
+ u8 tclk_pre;
+ u8 tclk_prepare;
+ u8 tclk_settle;
+ u8 tclk_term_enable;
+ u8 tclk_trail;
+ u16 tclk_prepare_clkzero;
+ u8 rsvd13;
+ u8 td_term_enable;
+ u8 teot;
+ u8 ths_exit;
+ u8 ths_prepare;
+ u16 ths_prepare_hszero;
+ u8 rsvd14;
+ u8 ths_settle;
+ u8 ths_skip;
+ u8 ths_trail;
+ u8 tinit;
+ u8 tlpx;
+ u8 rsvd15[3];
+
+ /* GPIOs */
+ u8 panel_enable;
+ u8 bl_enable;
+ u8 pwm_enable;
+ u8 reset_r_n;
+ u8 pwr_down_r;
+ u8 stdby_r_n;
+
} __packed;
+/* Block 52 contains MIPI configuration block
+ * 6 * bdb_mipi_config, followed by 6 pps data
+ * block below
+ *
+ * all delays has a unit of 100us
+ */
+struct mipi_pps_data {
+ u16 panel_on_delay;
+ u16 bl_enable_delay;
+ u16 bl_disable_delay;
+ u16 panel_off_delay;
+ u16 panel_power_cycle_delay;
+};
+
+struct bdb_mipi_config {
+ struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
+ struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
+};
+
+/* Block 53 contains MIPI sequences as needed by the panel
+ * for enabling it. This block can be variable in size and
+ * can be maximum of 6 blocks
+ */
+struct bdb_mipi_sequence {
+ u8 version;
+ u8 data[0];
+};
+
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9864aa1ccbe8..4ef6d69c078d 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -68,8 +68,13 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ enum intel_display_power_domain power_domain;
u32 tmp;
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
tmp = I915_READ(crt->adpa_reg);
if (!(tmp & ADPA_DAC_ENABLE))
@@ -262,6 +267,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_LPT(dev))
pipe_config->pipe_bpp = 24;
+ /* FDI must always be 2.7 GHz */
+ if (HAS_DDI(dev))
+ pipe_config->port_clock = 135000 * 2;
+
return true;
}
@@ -630,14 +639,22 @@ static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_encoder *intel_encoder = &crt->base;
+ enum intel_display_power_domain power_domain;
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
+ intel_runtime_pm_get(dev_priv);
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, drm_get_connector_name(connector),
force);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
if (I915_HAS_HOTPLUG(dev)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
@@ -645,23 +662,30 @@ intel_crt_detect(struct drm_connector *connector, bool force)
*/
if (intel_crt_detect_hotplug(connector)) {
DRM_DEBUG_KMS("CRT detected via hotplug\n");
- return connector_status_connected;
+ status = connector_status_connected;
+ goto out;
} else
DRM_DEBUG_KMS("CRT not detected via hotplug\n");
}
- if (intel_crt_detect_ddc(connector))
- return connector_status_connected;
+ if (intel_crt_detect_ddc(connector)) {
+ status = connector_status_connected;
+ goto out;
+ }
/* Load detection is broken on HPD capable machines. Whoever wants a
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev))
- return connector_status_disconnected;
+ if (I915_HAS_HOTPLUG(dev)) {
+ status = connector_status_disconnected;
+ goto out;
+ }
- if (!force)
- return connector->status;
+ if (!force) {
+ status = connector->status;
+ goto out;
+ }
/* for pre-945g platforms use load detect */
if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
@@ -673,6 +697,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
} else
status = connector_status_unknown;
+out:
+ intel_display_power_put(dev_priv, power_domain);
+ intel_runtime_pm_put(dev_priv);
+
return status;
}
@@ -686,17 +714,28 @@ static int intel_crt_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_encoder *intel_encoder = &crt->base;
+ enum intel_display_power_domain power_domain;
int ret;
struct i2c_adapter *i2c;
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
- return ret;
+ goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
- return intel_crt_ddc_get_modes(connector, i2c);
+ ret = intel_crt_ddc_get_modes(connector, i2c);
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static int intel_crt_set_property(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 2643d3b8b67d..e2665e09d5df 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1145,9 +1145,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(encoder);
+ enum intel_display_power_domain power_domain;
u32 tmp;
int i;
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
tmp = I915_READ(DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f19e6ea36dc4..0868afbb19d2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1095,12 +1095,12 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
struct drm_device *dev = dev_priv->dev;
bool cur_state;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
- else if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev) || IS_I865G(dev))
cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
- else
+ else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+ else
+ cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
@@ -1122,7 +1122,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
- if (!intel_display_power_enabled(dev_priv->dev,
+ if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
cur_state = false;
} else {
@@ -1188,16 +1188,16 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
- int reg, i;
+ int reg, sprite;
u32 val;
if (IS_VALLEYVIEW(dev)) {
- for (i = 0; i < INTEL_INFO(dev)->num_sprites; i++) {
- reg = SPCNTR(pipe, i);
+ for_each_sprite(pipe, sprite) {
+ reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
WARN((val & SP_ENABLE),
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
- sprite_name(pipe, i), pipe_name(pipe));
+ sprite_name(pipe, sprite), pipe_name(pipe));
}
} else if (INTEL_INFO(dev)->gen >= 7) {
reg = SPRCTL(pipe);
@@ -2321,6 +2321,25 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
return ret;
}
+static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long flags;
+ bool pending;
+
+ if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+ intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ return false;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ pending = to_intel_crtc(crtc)->unpin_work != NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return pending;
+}
+
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *fb)
@@ -2331,6 +2350,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb;
int ret;
+ if (intel_crtc_has_pending_flip(crtc)) {
+ DRM_ERROR("pipe is still busy with an old pageflip\n");
+ return -EBUSY;
+ }
+
/* no fb bound */
if (!fb) {
DRM_ERROR("No FB bound\n");
@@ -2956,25 +2980,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
}
-static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
- bool pending;
-
- if (i915_reset_in_progress(&dev_priv->gpu_error) ||
- intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
- return false;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- pending = to_intel_crtc(crtc)->unpin_work != NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- return pending;
-}
-
bool intel_has_pending_fb_unpin(struct drm_device *dev)
{
struct intel_crtc *crtc;
@@ -3953,6 +3958,117 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ if ((1 << (domain)) & (mask))
+
+enum intel_display_power_domain
+intel_display_port_power_domain(struct intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct intel_digital_port *intel_dig_port;
+
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_UNKNOWN:
+ /* Only DDI platforms should ever use this output type */
+ WARN_ON_ONCE(!HAS_DDI(dev));
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_HDMI:
+ case INTEL_OUTPUT_EDP:
+ intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ return POWER_DOMAIN_PORT_DDI_A_4_LANES;
+ case PORT_B:
+ return POWER_DOMAIN_PORT_DDI_B_4_LANES;
+ case PORT_C:
+ return POWER_DOMAIN_PORT_DDI_C_4_LANES;
+ case PORT_D:
+ return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+ default:
+ WARN_ON_ONCE(1);
+ return POWER_DOMAIN_PORT_OTHER;
+ }
+ case INTEL_OUTPUT_ANALOG:
+ return POWER_DOMAIN_PORT_CRT;
+ case INTEL_OUTPUT_DSI:
+ return POWER_DOMAIN_PORT_DSI;
+ default:
+ return POWER_DOMAIN_PORT_OTHER;
+ }
+}
+
+static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *intel_encoder;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
+ unsigned long mask;
+ enum transcoder transcoder;
+
+ transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
+
+ mask = BIT(POWER_DOMAIN_PIPE(pipe));
+ mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
+ if (pfit_enabled)
+ mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ mask |= BIT(intel_display_port_power_domain(intel_encoder));
+
+ return mask;
+}
+
+void intel_display_set_init_power(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ if (dev_priv->power_domains.init_power_on == enable)
+ return;
+
+ if (enable)
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ else
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+ dev_priv->power_domains.init_power_on = enable;
+}
+
+static void modeset_update_crtc_power_domains(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
+ struct intel_crtc *crtc;
+
+ /*
+ * First get all needed power domains, then put all unneeded, to avoid
+ * any unnecessary toggling of the power wells.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ enum intel_display_power_domain domain;
+
+ if (!crtc->base.enabled)
+ continue;
+
+ pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
+
+ for_each_power_domain(domain, pipe_domains[crtc->pipe])
+ intel_display_power_get(dev_priv, domain);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ enum intel_display_power_domain domain;
+
+ for_each_power_domain(domain, crtc->enabled_power_domains)
+ intel_display_power_put(dev_priv, domain);
+
+ crtc->enabled_power_domains = pipe_domains[crtc->pipe];
+ }
+
+ intel_display_set_init_power(dev_priv, false);
+}
+
int valleyview_get_vco(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
@@ -4113,6 +4229,7 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
if (req_cdclk != cur_cdclk)
valleyview_set_cdclk(dev, req_cdclk);
+ modeset_update_crtc_power_domains(dev);
}
static void valleyview_crtc_enable(struct drm_crtc *crtc)
@@ -5495,6 +5612,10 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
+ if (!intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
+ return false;
+
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -6156,7 +6277,7 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
* is 2.5%; use 5% for safety's sake.
*/
u32 bps = target_clock * bpp * 21 / 20;
- return bps / (link_bw * 8) + 1;
+ return DIV_ROUND_UP(bps, link_bw * 8);
}
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
@@ -6812,105 +6933,9 @@ done:
mutex_unlock(&dev_priv->pc8.lock);
}
-static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
-{
- if (!HAS_PC8(dev_priv->dev))
- return;
-
- mutex_lock(&dev_priv->pc8.lock);
- if (!dev_priv->pc8.gpu_idle) {
- dev_priv->pc8.gpu_idle = true;
- __hsw_enable_package_c8(dev_priv);
- }
- mutex_unlock(&dev_priv->pc8.lock);
-}
-
-static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
-{
- if (!HAS_PC8(dev_priv->dev))
- return;
-
- mutex_lock(&dev_priv->pc8.lock);
- if (dev_priv->pc8.gpu_idle) {
- dev_priv->pc8.gpu_idle = false;
- __hsw_disable_package_c8(dev_priv);
- }
- mutex_unlock(&dev_priv->pc8.lock);
-}
-
-#define for_each_power_domain(domain, mask) \
- for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- if ((1 << (domain)) & (mask))
-
-static unsigned long get_pipe_power_domains(struct drm_device *dev,
- enum pipe pipe, bool pfit_enabled)
-{
- unsigned long mask;
- enum transcoder transcoder;
-
- transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
-
- mask = BIT(POWER_DOMAIN_PIPE(pipe));
- mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
- if (pfit_enabled)
- mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
-
- return mask;
-}
-
-void intel_display_set_init_power(struct drm_device *dev, bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->power_domains.init_power_on == enable)
- return;
-
- if (enable)
- intel_display_power_get(dev, POWER_DOMAIN_INIT);
- else
- intel_display_power_put(dev, POWER_DOMAIN_INIT);
-
- dev_priv->power_domains.init_power_on = enable;
-}
-
-static void modeset_update_power_wells(struct drm_device *dev)
-{
- unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
- struct intel_crtc *crtc;
-
- /*
- * First get all needed power domains, then put all unneeded, to avoid
- * any unnecessary toggling of the power wells.
- */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
- enum intel_display_power_domain domain;
-
- if (!crtc->base.enabled)
- continue;
-
- pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
- crtc->pipe,
- crtc->config.pch_pfit.enabled);
-
- for_each_power_domain(domain, pipe_domains[crtc->pipe])
- intel_display_power_get(dev, domain);
- }
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
- enum intel_display_power_domain domain;
-
- for_each_power_domain(domain, crtc->enabled_power_domains)
- intel_display_power_put(dev, domain);
-
- crtc->enabled_power_domains = pipe_domains[crtc->pipe];
- }
-
- intel_display_set_init_power(dev, false);
-}
-
static void haswell_modeset_global_resources(struct drm_device *dev)
{
- modeset_update_power_wells(dev);
+ modeset_update_crtc_power_domains(dev);
hsw_update_package_c8(dev);
}
@@ -6961,6 +6986,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
enum intel_display_power_domain pfit_domain;
uint32_t tmp;
+ if (!intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
+ return false;
+
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -6986,7 +7015,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
- if (!intel_display_power_enabled(dev,
+ if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
@@ -7014,7 +7043,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- if (intel_display_power_enabled(dev, pfit_domain))
+ if (intel_display_power_enabled(dev_priv, pfit_domain))
ironlake_get_pfit_config(crtc, pipe_config);
if (IS_HASWELL(dev))
@@ -7549,7 +7578,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return -ENOENT;
if (obj->base.size < width * height * 4) {
- DRM_ERROR("buffer is to small\n");
+ DRM_DEBUG_KMS("buffer is to small\n");
ret = -ENOMEM;
goto fail;
}
@@ -7560,7 +7589,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
unsigned alignment;
if (obj->tiling_mode) {
- DRM_ERROR("cursor cannot be tiled\n");
+ DRM_DEBUG_KMS("cursor cannot be tiled\n");
ret = -EINVAL;
goto fail_locked;
}
@@ -7576,13 +7605,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
if (ret) {
- DRM_ERROR("failed to move cursor bo into the GTT\n");
+ DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
goto fail_locked;
}
ret = i915_gem_object_put_fence(obj);
if (ret) {
- DRM_ERROR("failed to release fence for cursor");
+ DRM_DEBUG_KMS("failed to release fence for cursor");
goto fail_unpin;
}
@@ -7593,7 +7622,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align);
if (ret) {
- DRM_ERROR("failed to attach phys object\n");
+ DRM_DEBUG_KMS("failed to attach phys object\n");
goto fail_locked;
}
addr = obj->phys_obj->handle->busaddr;
@@ -7692,7 +7721,7 @@ err:
return ERR_PTR(ret);
}
-struct drm_framebuffer *
+static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
@@ -8192,8 +8221,12 @@ void intel_mark_busy(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- hsw_package_c8_gpu_busy(dev_priv);
+ if (dev_priv->mm.busy)
+ return;
+
+ hsw_disable_package_c8(dev_priv);
i915_update_gfx_val(dev_priv);
+ dev_priv->mm.busy = true;
}
void intel_mark_idle(struct drm_device *dev)
@@ -8201,10 +8234,13 @@ void intel_mark_idle(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- hsw_package_c8_gpu_idle(dev_priv);
+ if (!dev_priv->mm.busy)
+ return;
+
+ dev_priv->mm.busy = false;
if (!i915.powersave)
- return;
+ goto out;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
@@ -8215,6 +8251,9 @@ void intel_mark_idle(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_idle(dev->dev_private);
+
+out:
+ hsw_enable_package_c8(dev_priv);
}
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -8678,6 +8717,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
fb->pitches[0] != crtc->fb->pitches[0]))
return -EINVAL;
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ goto out_hang;
+
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
@@ -8752,6 +8794,13 @@ cleanup:
free_work:
kfree(work);
+ if (ret == -EIO) {
+out_hang:
+ intel_crtc_wait_for_pending_flips(crtc);
+ ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
+ if (ret == 0 && event)
+ drm_send_vblank_event(dev, intel_crtc->pipe, event);
+ }
return ret;
}
@@ -10555,10 +10604,10 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.create_handle = intel_user_framebuffer_create_handle,
};
-int intel_framebuffer_init(struct drm_device *dev,
- struct intel_framebuffer *intel_fb,
- struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_i915_gem_object *obj)
+static int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *intel_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj)
{
int aligned_height;
int pitch_limit;
@@ -10996,7 +11045,8 @@ void intel_modeset_suspend_hw(struct drm_device *dev)
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i, j, ret;
+ int sprite, ret;
+ enum pipe pipe;
drm_mode_config_init(dev);
@@ -11033,13 +11083,13 @@ void intel_modeset_init(struct drm_device *dev)
INTEL_INFO(dev)->num_pipes,
INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
- for_each_pipe(i) {
- intel_crtc_init(dev, i);
- for (j = 0; j < INTEL_INFO(dev)->num_sprites; j++) {
- ret = intel_plane_init(dev, i, j);
+ for_each_pipe(pipe) {
+ intel_crtc_init(dev, pipe);
+ for_each_sprite(pipe, sprite) {
+ ret = intel_plane_init(dev, pipe, sprite);
if (ret)
DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
- pipe_name(i), sprite_name(i, j), ret);
+ pipe_name(pipe), sprite_name(pipe, sprite), ret);
}
}
@@ -11056,7 +11106,9 @@ void intel_modeset_init(struct drm_device *dev)
/* Just in case the BIOS is doing something questionable. */
intel_disable_fbc(dev);
+ mutex_lock(&dev->mode_config.mutex);
intel_modeset_setup_hw_state(dev, false);
+ mutex_unlock(&dev->mode_config.mutex);
}
static void
@@ -11239,11 +11291,21 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
-void i915_redisable_vga(struct drm_device *dev)
+void i915_redisable_vga_power_on(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 vga_reg = i915_vgacntrl_reg(dev);
+ if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ i915_disable_vga(dev);
+ }
+}
+
+void i915_redisable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
/* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
@@ -11251,14 +11313,10 @@ void i915_redisable_vga(struct drm_device *dev)
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
- if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
- (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
+ if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
- if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
- DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- i915_disable_vga(dev);
- }
+ i915_redisable_vga_power_on(dev);
}
static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -11602,7 +11660,8 @@ intel_display_capture_error_state(struct drm_device *dev)
for_each_pipe(i) {
error->pipe[i].power_domain_on =
- intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
+ intel_display_power_enabled_sw(dev_priv,
+ POWER_DOMAIN_PIPE(i));
if (!error->pipe[i].power_domain_on)
continue;
@@ -11640,7 +11699,7 @@ intel_display_capture_error_state(struct drm_device *dev)
enum transcoder cpu_transcoder = transcoders[i];
error->transcoder[i].power_domain_on =
- intel_display_power_enabled_sw(dev,
+ intel_display_power_enabled_sw(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!error->transcoder[i].power_domain_on)
continue;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index c512d78af271..49d12d341ab2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -916,8 +916,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp);
- for (clock = 0; clock <= max_clock; clock++) {
- for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (clock = 0; clock <= max_clock; clock++) {
link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
@@ -1333,7 +1333,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
- pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+ EDP_BLC_ENABLE);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
@@ -1485,7 +1486,14 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 tmp = I915_READ(intel_dp->output_reg);
+ enum intel_display_power_domain power_domain;
+ u32 tmp;
+
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
+ tmp = I915_READ(intel_dp->output_reg);
if (!(tmp & DP_PORT_EN))
return false;
@@ -1868,9 +1876,11 @@ static void intel_disable_dp(struct intel_encoder *encoder)
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
+ edp_panel_vdd_on(intel_dp);
intel_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
intel_edp_panel_off(intel_dp);
+ edp_panel_vdd_off(intel_dp, true);
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
@@ -3224,10 +3234,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum drm_connector_status status;
+ enum intel_display_power_domain power_domain;
struct edid *edid = NULL;
intel_runtime_pm_get(dev_priv);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
@@ -3258,21 +3272,32 @@ intel_dp_detect(struct drm_connector *connector, bool force)
status = connector_status_connected;
out:
+ intel_display_power_put(dev_priv, power_domain);
+
intel_runtime_pm_put(dev_priv);
+
return status;
}
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
+ intel_display_power_put(dev_priv, power_domain);
if (ret)
return ret;
@@ -3293,15 +3318,25 @@ static bool
intel_dp_detect_audio(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
struct edid *edid;
bool has_audio = false;
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
}
+ intel_display_power_put(dev_priv, power_domain);
+
return has_audio;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a4ffc021c317..9c7090590776 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -609,6 +609,8 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
/* i915_irq.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable);
+bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable);
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
@@ -732,7 +734,9 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
-void intel_display_set_init_power(struct drm_device *dev, bool enable);
+void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
+enum intel_display_power_domain
+intel_display_port_power_domain(struct intel_encoder *intel_encoder);
int valleyview_get_vco(struct drm_i915_private *dev_priv);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config);
@@ -871,18 +875,17 @@ bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
-int intel_power_domains_init(struct drm_device *dev);
-void intel_power_domains_remove(struct drm_device *dev);
-bool intel_display_power_enabled(struct drm_device *dev,
+int intel_power_domains_init(struct drm_i915_private *);
+void intel_power_domains_remove(struct drm_i915_private *);
+bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-bool intel_display_power_enabled_sw(struct drm_device *dev,
+bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-void intel_display_power_get(struct drm_device *dev,
+void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-void intel_display_power_put(struct drm_device *dev,
+void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-void intel_power_domains_init_hw(struct drm_device *dev);
-void intel_set_power_well(struct drm_device *dev, bool enable);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
void ironlake_teardown_rc6(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 3ee1db1407b0..cf7322e95278 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -243,11 +243,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ enum intel_display_power_domain power_domain;
u32 port, func;
enum pipe p;
DRM_DEBUG_KMS("\n");
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
/* XXX: this only works for one DSI output */
for (p = PIPE_A; p <= PIPE_B; p++) {
port = I915_READ(MIPI_PORT_CTRL(p));
@@ -488,8 +493,19 @@ static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force)
{
struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+ struct intel_encoder *intel_encoder = &intel_dsi->base;
+ enum intel_display_power_domain power_domain;
+ enum drm_connector_status connector_status;
+ struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
+
DRM_DEBUG_KMS("\n");
- return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+
+ intel_display_power_get(dev_priv, power_domain);
+ connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
+ intel_display_power_put(dev_priv, power_domain);
+
+ return connector_status;
}
static int intel_dsi_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 19be4bfbcc59..6b5beed28d70 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -289,7 +289,27 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_device *dev = fb_helper->dev;
int i, j;
bool *save_enabled;
- bool any_enabled = false;
+ bool fallback = true;
+ int num_connectors_enabled = 0;
+ int num_connectors_detected = 0;
+
+ /*
+ * If the user specified any force options, just bail here
+ * and use that config.
+ */
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_conn;
+ struct drm_connector *connector;
+
+ fb_conn = fb_helper->connector_info[i];
+ connector = fb_conn->connector;
+
+ if (!enabled[i])
+ continue;
+
+ if (connector->force != DRM_FORCE_UNSPECIFIED)
+ return false;
+ }
save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
GFP_KERNEL);
@@ -306,6 +326,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
+
+ if (connector->status == connector_status_connected)
+ num_connectors_detected++;
+
if (!enabled[i]) {
DRM_DEBUG_KMS("connector %d not enabled, skipping\n",
connector->base.id);
@@ -320,6 +344,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
continue;
}
+ num_connectors_enabled++;
+
new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
/*
@@ -329,7 +355,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
*/
for (j = 0; j < fb_helper->connector_count; j++) {
if (crtcs[j] == new_crtc) {
- any_enabled = false;
+ DRM_DEBUG_KMS("fallback: cloned configuration\n");
+ fallback = true;
goto out;
}
}
@@ -372,11 +399,25 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
encoder->crtc->base.id,
modes[i]->name);
- any_enabled = true;
+ fallback = false;
+ }
+
+ /*
+ * If the BIOS didn't enable everything it could, fall back to have the
+ * same user experiencing of lighting up as much as possible like the
+ * fbdev helper library.
+ */
+ if (num_connectors_enabled != num_connectors_detected &&
+ num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
+ DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
+ DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
+ num_connectors_detected);
+ fallback = true;
}
out:
- if (!any_enabled) {
+ if (fallback) {
+ DRM_DEBUG_KMS("Not using firmware configuration\n");
memcpy(enabled, save_enabled, dev->mode_config.num_connector);
kfree(save_enabled);
return false;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 98d68ab04de4..ceb479733991 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -667,8 +667,13 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ enum intel_display_power_domain power_domain;
u32 tmp;
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (!(tmp & SDVO_ENABLE))
@@ -820,7 +825,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- if (IS_G4X(dev))
+ if (!hdmi->has_hdmi_sink || IS_G4X(dev))
return 165000;
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
return 300000;
@@ -874,8 +879,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
- if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
- && HAS_PCH_SPLIT(dev)) {
+ if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
+ clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -909,11 +914,15 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = dev->dev_private;
struct edid *edid;
+ enum intel_display_power_domain power_domain;
enum drm_connector_status status = connector_status_disconnected;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
@@ -941,31 +950,48 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_encoder->type = INTEL_OUTPUT_HDMI;
}
+ intel_display_power_put(dev_priv, power_domain);
+
return status;
}
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ enum intel_display_power_domain power_domain;
+ int ret;
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(connector,
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ ret = intel_ddc_get_modes(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static bool
intel_hdmi_detect_audio(struct drm_connector *connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ enum intel_display_power_domain power_domain;
struct edid *edid;
bool has_audio = false;
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
@@ -975,6 +1001,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
kfree(edid);
}
+ intel_display_power_put(dev_priv, power_domain);
+
return has_audio;
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index ac519cb46f22..312961a8472e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1076,7 +1076,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
if (new_bo->tiling_mode) {
- DRM_ERROR("buffer used for overlay image can not be tiled\n");
+ DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
ret = -EINVAL;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 5bc3f6ea1014..cb058408c70e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -689,7 +689,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
freq /= 0xff;
ctl = freq << 17;
- if (IS_GEN2(dev) && panel->backlight.combination_mode)
+ if (panel->backlight.combination_mode)
ctl |= BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
@@ -970,7 +970,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
ctl = I915_READ(BLC_PWM_CTL);
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev))
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a6b877a4a916..ad58ce3b7675 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -294,11 +294,14 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
if (IS_IVYBRIDGE(dev)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
- I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
} else {
- /* WaFbcAsynchFlipDisableFbcQueue:hsw */
- I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
- HSW_BYPASS_FBC_QUEUE);
+ /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+ I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
+ I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
+ HSW_FBCQ_DIS);
}
I915_WRITE(SNB_DPFC_CTL_SA,
@@ -540,7 +543,7 @@ void intel_update_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
- if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
+ if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
intel_crtc->plane != PLANE_A) {
if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
DRM_DEBUG_KMS("plane not A, disabling compression\n");
@@ -1131,7 +1134,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
*plane_wm = display->max_wm;
/* Use the large buffer method to calculate cursor watermark */
- line_time_us = ((htotal * 1000) / clock);
+ line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
entries = line_count * 64 * pixel_size;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
@@ -1207,7 +1210,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
- line_time_us = (htotal * 1000) / clock;
+ line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
@@ -1440,7 +1443,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
unsigned long line_time_us;
int entries;
- line_time_us = ((htotal * 1000) / clock);
+ line_time_us = max(htotal * 1000 / clock, 1);
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
@@ -1566,7 +1569,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
unsigned long line_time_us;
int entries;
- line_time_us = (htotal * 1000) / clock;
+ line_time_us = max(htotal * 1000 / clock, 1);
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
@@ -3554,6 +3557,8 @@ static void valleyview_setup_pctx(struct drm_device *dev)
u32 pcbr;
int pctx_size = 24*1024;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
/* BIOS set it up already, grab the pre-alloc'd space */
@@ -3603,8 +3608,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- valleyview_setup_pctx(dev);
-
/* If VLV, Forcewake all wells, else re-direct to regular path */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
@@ -4471,6 +4474,8 @@ void intel_enable_gt_powersave(struct drm_device *dev)
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_VALLEYVIEW(dev))
+ valleyview_setup_pctx(dev);
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
@@ -4661,6 +4666,17 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_GT_MODE,
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+ /*
+ * BSpec recoomends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN6_GT_MODE,
+ GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
ilk_init_lp_watermarks(dev);
I915_WRITE(CACHE_MODE_0,
@@ -4688,9 +4704,9 @@ static void gen6_init_clock_gating(struct drm_device *dev)
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
- /* Bspec says we need to always set all mask bits. */
- I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
- _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
+ /* WaStripsFansDisableFastClipPerformanceFix:snb */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
/*
* Bspec says:
@@ -4724,11 +4740,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
g4x_disable_trickle_feed(dev);
- /* The default value should be 0x200 according to docs, but the two
- * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
- I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
- I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
-
cpt_init_clock_gating(dev);
gen6_check_mch_setup(dev);
@@ -4786,7 +4797,7 @@ static void lpt_suspend_hw(struct drm_device *dev)
static void gen8_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe i;
+ enum pipe pipe;
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
@@ -4795,6 +4806,15 @@ static void gen8_init_clock_gating(struct drm_device *dev)
/* FIXME(BDW): Check all the w/a, some might only apply to
* pre-production hw. */
+ /* WaDisablePartialInstShootdown:bdw */
+ I915_WRITE(GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
+
+ /* WaDisableThreadStallDopClockGating:bdw */
+ /* FIXME: Unclear whether we really need this on production bdw. */
+ I915_WRITE(GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+
/*
* This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
* pre-production hardware
@@ -4822,10 +4842,10 @@ static void gen8_init_clock_gating(struct drm_device *dev)
I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
- for_each_pipe(i) {
- I915_WRITE(CHICKEN_PIPESL_1(i),
- I915_READ(CHICKEN_PIPESL_1(i) |
- DPRS_MASK_VBLANK_SRD));
+ for_each_pipe(pipe) {
+ I915_WRITE(CHICKEN_PIPESL_1(pipe),
+ I915_READ(CHICKEN_PIPESL_1(pipe)) |
+ BDW_DPRS_MASK_VBLANK_SRD);
}
/* Use Force Non-Coherent whenever executing a 3D context. This is a
@@ -4841,6 +4861,24 @@ static void gen8_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_FF_THREAD_MODE,
I915_READ(GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
+ I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+
+ /* WaDisableSDEUnitClockGating:bdw */
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
}
static void haswell_init_clock_gating(struct drm_device *dev)
@@ -4871,6 +4909,17 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -4944,14 +4993,27 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
gen7_setup_fixed_func_scheduler(dev_priv);
- /* enable HiZ Raw Stall Optimization */
- I915_WRITE(CACHE_MODE_0_GEN7,
- _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
+ if (0) { /* causes HiZ corruption on ivb:gt1 */
+ /* enable HiZ Raw Stall Optimization */
+ I915_WRITE(CACHE_MODE_0_GEN7,
+ _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
+ }
/* WaDisable4x2SubspanOptimization:ivb */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
@@ -5004,9 +5066,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
- /* WaDisableL3CacheAging:vlv */
- I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
-
/* WaForceL3Serialization:vlv */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
@@ -5167,19 +5226,16 @@ void intel_suspend_hw(struct drm_device *dev)
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
-static bool hsw_power_well_enabled(struct drm_device *dev,
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
return I915_READ(HSW_PWR_WELL_DRIVER) ==
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
}
-bool intel_display_power_enabled_sw(struct drm_device *dev,
+bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
power_domains = &dev_priv->power_domains;
@@ -5187,10 +5243,9 @@ bool intel_display_power_enabled_sw(struct drm_device *dev,
return power_domains->domain_use_count[domain];
}
-bool intel_display_power_enabled(struct drm_device *dev,
+bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
bool is_enabled;
@@ -5205,7 +5260,7 @@ bool intel_display_power_enabled(struct drm_device *dev,
if (power_well->always_on)
continue;
- if (!power_well->is_enabled(dev, power_well)) {
+ if (!power_well->ops->is_enabled(dev_priv, power_well)) {
is_enabled = false;
break;
}
@@ -5215,6 +5270,12 @@ bool intel_display_power_enabled(struct drm_device *dev,
return is_enabled;
}
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -5251,10 +5312,17 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
}
}
+static void reset_vblank_counter(struct drm_device *dev, enum pipe pipe)
+{
+ assert_spin_locked(&dev->vbl_lock);
+
+ dev->vblank[pipe].last = 0;
+}
+
static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- enum pipe p;
+ enum pipe pipe;
unsigned long irqflags;
/*
@@ -5265,16 +5333,15 @@ static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
* FIXME: Should we do this in general in drm_vblank_post_modeset?
*/
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for_each_pipe(p)
- if (p != PIPE_A)
- dev->vblank[p].last = 0;
+ for_each_pipe(pipe)
+ if (pipe != PIPE_A)
+ reset_vblank_counter(dev, pipe);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-static void hsw_set_power_well(struct drm_device *dev,
+static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested;
uint32_t tmp;
@@ -5308,35 +5375,204 @@ static void hsw_set_power_well(struct drm_device *dev,
}
}
-static void __intel_power_well_get(struct drm_device *dev,
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
- if (!power_well->count++ && power_well->set) {
- hsw_disable_package_c8(dev_priv);
- power_well->set(dev, power_well, true);
- }
+ /*
+ * We're taking over the BIOS, so clear any requests made by it since
+ * the driver is in charge now.
+ */
+ if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
+ I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
-static void __intel_power_well_put(struct drm_device *dev,
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_disable_package_c8(dev_priv);
+ hsw_set_power_well(dev_priv, power_well, true);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ hsw_set_power_well(dev_priv, power_well, false);
+ hsw_enable_package_c8(dev_priv);
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return true;
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ enum punit_power_well power_well_id = power_well->data;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(power_well_id);
+ state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
+ PUNIT_PWRGT_PWR_GATE(power_well_id);
- WARN_ON(!power_well->count);
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl &= ~mask;
+ ctrl |= state;
+ vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int power_well_id = power_well->data;
+ bool enabled = false;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
- if (!--power_well->count && power_well->set &&
- i915.disable_power_well) {
- power_well->set(dev, power_well, false);
- hsw_enable_package_c8(dev_priv);
+ mask = PUNIT_PWRGT_MASK(power_well_id);
+ ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
+ state != PUNIT_PWRGT_PWR_GATE(power_well_id));
+ if (state == ctrl)
+ enabled = true;
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ WARN_ON(ctrl != state);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return enabled;
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization we need to defer enabling hotplug
+ * processing until fbdev is set up.
+ */
+ if (dev_priv->enable_hotplug_processing)
+ intel_hpd_init(dev_priv->dev);
+
+ i915_redisable_vga_power_on(dev_priv->dev);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum pipe pipe;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ for_each_pipe(pipe)
+ __intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ spin_lock_irq(&dev->vbl_lock);
+ for_each_pipe(pipe)
+ reset_vblank_counter(dev, pipe);
+ spin_unlock_irq(&dev->vbl_lock);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void check_power_well_state(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
+
+ if (power_well->always_on || !i915.disable_power_well) {
+ if (!enabled)
+ goto mismatch;
+
+ return;
}
+
+ if (enabled != (power_well->count > 0))
+ goto mismatch;
+
+ return;
+
+mismatch:
+ WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
+ power_well->name, power_well->always_on, enabled,
+ power_well->count, i915.disable_power_well);
}
-void intel_display_power_get(struct drm_device *dev,
+void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
@@ -5345,18 +5581,23 @@ void intel_display_power_get(struct drm_device *dev,
mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, BIT(domain), power_domains)
- __intel_power_well_get(dev, power_well);
+ for_each_power_well(i, power_well, BIT(domain), power_domains) {
+ if (!power_well->count++) {
+ DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+ power_well->ops->enable(dev_priv, power_well);
+ }
+
+ check_power_well_state(dev_priv, power_well);
+ }
power_domains->domain_use_count[domain]++;
mutex_unlock(&power_domains->lock);
}
-void intel_display_power_put(struct drm_device *dev,
+void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
@@ -5368,8 +5609,16 @@ void intel_display_power_put(struct drm_device *dev,
WARN_ON(!power_domains->domain_use_count[domain]);
power_domains->domain_use_count[domain]--;
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
- __intel_power_well_put(dev, power_well);
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+ WARN_ON(!power_well->count);
+
+ if (!--power_well->count && i915.disable_power_well) {
+ DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+ power_well->ops->disable(dev_priv, power_well);
+ }
+
+ check_power_well_state(dev_priv, power_well);
+ }
mutex_unlock(&power_domains->lock);
}
@@ -5386,7 +5635,7 @@ void i915_request_power_well(void)
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
- intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
@@ -5400,29 +5649,99 @@ void i915_release_power_well(void)
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
- intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_INIT))
+#define HSW_DISPLAY_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
+ HSW_ALWAYS_ON_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
+#define BDW_DISPLAY_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
+#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
+
+#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+ .sync_hw = i9xx_always_on_power_well_noop,
+ .enable = i9xx_always_on_power_well_noop,
+ .disable = i9xx_always_on_power_well_noop,
+ .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
static struct i915_power_well i9xx_always_on_power_well[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
},
};
+static const struct i915_power_well_ops hsw_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
},
{
.name = "display",
- .domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
- .is_enabled = hsw_power_well_enabled,
- .set = hsw_set_power_well,
+ .domains = HSW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
},
};
@@ -5431,12 +5750,83 @@ static struct i915_power_well bdw_power_wells[] = {
.name = "always-on",
.always_on = 1,
.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
},
{
.name = "display",
- .domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
- .is_enabled = hsw_power_well_enabled,
- .set = hsw_set_power_well,
+ .domains = BDW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_ops vlv_display_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_display_power_well_enable,
+ .disable = vlv_display_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_power_well_enable,
+ .disable = vlv_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well vlv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DISP2D,
+ .ops = &vlv_display_power_well_ops,
+ },
+ {
+ .name = "dpio-common",
+ .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .ops = &vlv_dpio_power_well_ops,
+ },
+ {
+ .name = "dpio-tx-b-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ },
+ {
+ .name = "dpio-tx-b-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ },
+ {
+ .name = "dpio-tx-c-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ },
+ {
+ .name = "dpio-tx-c-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
},
};
@@ -5445,9 +5835,8 @@ static struct i915_power_well bdw_power_wells[] = {
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
})
-int intel_power_domains_init(struct drm_device *dev)
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
mutex_init(&power_domains->lock);
@@ -5456,12 +5845,14 @@ int intel_power_domains_init(struct drm_device *dev)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev_priv->dev)) {
set_power_wells(power_domains, hsw_power_wells);
hsw_pwr = power_domains;
- } else if (IS_BROADWELL(dev)) {
+ } else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
hsw_pwr = power_domains;
+ } else if (IS_VALLEYVIEW(dev_priv->dev)) {
+ set_power_wells(power_domains, vlv_power_wells);
} else {
set_power_wells(power_domains, i9xx_always_on_power_well);
}
@@ -5469,47 +5860,28 @@ int intel_power_domains_init(struct drm_device *dev)
return 0;
}
-void intel_power_domains_remove(struct drm_device *dev)
+void intel_power_domains_remove(struct drm_i915_private *dev_priv)
{
hsw_pwr = NULL;
}
-static void intel_power_domains_resume(struct drm_device *dev)
+static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
- if (power_well->set)
- power_well->set(dev, power_well, power_well->count > 0);
- }
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
+ power_well->ops->sync_hw(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
}
-/*
- * Starting with Haswell, we have a "Power Down Well" that can be turned off
- * when not needed anymore. We have 4 registers that can request the power well
- * to be enabled, and it will only be disabled if none of the registers is
- * requesting it to be enabled.
- */
-void intel_power_domains_init_hw(struct drm_device *dev)
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* For now, we need the power well to be always enabled. */
- intel_display_set_init_power(dev, true);
- intel_power_domains_resume(dev);
-
- if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
- return;
-
- /* We're taking over the BIOS, so clear any requests made by it since
- * the driver is in charge now. */
- if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
- I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+ intel_display_set_init_power(dev_priv, true);
+ intel_power_domains_resume(dev_priv);
}
/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
@@ -5786,10 +6158,9 @@ void intel_pm_setup(struct drm_device *dev)
mutex_init(&dev_priv->pc8.lock);
dev_priv->pc8.requirements_met = false;
- dev_priv->pc8.gpu_idle = false;
dev_priv->pc8.irqs_disabled = false;
dev_priv->pc8.enabled = false;
- dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
+ dev_priv->pc8.disable_count = 1; /* requirements_met */
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b340c7587629..859092130c18 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -571,7 +571,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
- * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
+ * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw
*/
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
@@ -1388,6 +1388,8 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
+ i915_cmd_parser_init_ring(ring);
+
return 0;
err_unmap:
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 08b91c6ac70a..09af92099c1b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -164,6 +164,38 @@ struct intel_ring_buffer {
u32 gtt_offset;
volatile u32 *cpu_page;
} scratch;
+
+ /*
+ * Tables of commands the command parser needs to know about
+ * for this ring.
+ */
+ const struct drm_i915_cmd_table *cmd_tables;
+ int cmd_table_count;
+
+ /*
+ * Table of registers allowed in commands that read/write registers.
+ */
+ const u32 *reg_table;
+ int reg_count;
+
+ /*
+ * Table of registers allowed in commands that read/write registers, but
+ * only from the DRM master.
+ */
+ const u32 *master_reg_table;
+ int master_reg_count;
+
+ /*
+ * Returns the bitmask for the length field of the specified command.
+ * Return 0 for an unrecognized/invalid command.
+ *
+ * If the command parser finds an entry for a command in the ring's
+ * cmd_tables, it gets the command's length based on the table entry.
+ * If not, it calls this function to determine the per-ring length field
+ * encoding for the command (i.e. certain opcode ranges use certain bits
+ * to encode the command length in the header).
+ */
+ u32 (*get_cmd_length_mask)(u32 cmd_header);
};
static inline bool
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index c62841404c82..7861d97600e1 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -40,6 +40,12 @@
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
+static void
+assert_device_not_suspended(struct drm_i915_private *dev_priv)
+{
+ WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+ "Device suspended\n");
+}
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
@@ -83,14 +89,14 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
__gen6_gt_wait_for_thread_c0(dev_priv);
}
-static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
}
-static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
+static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
int fw_engine)
{
u32 forcewake_ack;
@@ -136,14 +142,16 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
gen6_gt_check_fifodbg(dev_priv);
}
-static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
+static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
int fw_engine)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
- gen6_gt_check_fifodbg(dev_priv);
+
+ if (IS_GEN7(dev_priv->dev))
+ gen6_gt_check_fifodbg(dev_priv);
}
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -251,16 +259,16 @@ void vlv_force_wake_get(struct drm_i915_private *dev_priv,
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (FORCEWAKE_RENDER & fw_engine) {
- if (dev_priv->uncore.fw_rendercount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_RENDER);
- }
- if (FORCEWAKE_MEDIA & fw_engine) {
- if (dev_priv->uncore.fw_mediacount++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- FORCEWAKE_MEDIA);
- }
+
+ if (fw_engine & FORCEWAKE_RENDER &&
+ dev_priv->uncore.fw_rendercount++ != 0)
+ fw_engine &= ~FORCEWAKE_RENDER;
+ if (fw_engine & FORCEWAKE_MEDIA &&
+ dev_priv->uncore.fw_mediacount++ != 0)
+ fw_engine &= ~FORCEWAKE_MEDIA;
+
+ if (fw_engine)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -272,46 +280,45 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (FORCEWAKE_RENDER & fw_engine) {
- WARN_ON(dev_priv->uncore.fw_rendercount == 0);
- if (--dev_priv->uncore.fw_rendercount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_RENDER);
- }
+ if (fw_engine & FORCEWAKE_RENDER &&
+ --dev_priv->uncore.fw_rendercount != 0)
+ fw_engine &= ~FORCEWAKE_RENDER;
+ if (fw_engine & FORCEWAKE_MEDIA &&
+ --dev_priv->uncore.fw_mediacount != 0)
+ fw_engine &= ~FORCEWAKE_MEDIA;
- if (FORCEWAKE_MEDIA & fw_engine) {
- WARN_ON(dev_priv->uncore.fw_mediacount == 0);
- if (--dev_priv->uncore.fw_mediacount == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv,
- FORCEWAKE_MEDIA);
- }
+ if (fw_engine)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void gen6_force_wake_work(struct work_struct *work)
+static void gen6_force_wake_timer(unsigned long arg)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
+ struct drm_i915_private *dev_priv = (void *)arg;
unsigned long irqflags;
+ assert_device_not_suspended(dev_priv);
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ intel_runtime_pm_put(dev_priv);
}
static void intel_uncore_forcewake_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev))
vlv_force_wake_reset(dev_priv);
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ else if (IS_GEN6(dev) || IS_GEN7(dev))
__gen6_gt_force_wake_reset(dev_priv);
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- __gen6_gt_force_wake_mt_reset(dev_priv);
- }
+
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
+ __gen7_gt_force_wake_mt_reset(dev_priv);
}
void intel_uncore_early_sanitize(struct drm_device *dev)
@@ -354,7 +361,9 @@ void intel_uncore_sanitize(struct drm_device *dev)
mutex_lock(&dev_priv->rps.hw_lock);
reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
- if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
+ if (reg_val & (PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_RENDER) |
+ PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_MEDIA) |
+ PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_DISP2D)))
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -393,25 +402,38 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
+ bool delayed = false;
if (!dev_priv->uncore.funcs.force_wake_put)
return;
/* Redirect to VLV specific routine */
- if (IS_VALLEYVIEW(dev_priv->dev))
- return vlv_force_wake_put(dev_priv, fw_engine);
+ if (IS_VALLEYVIEW(dev_priv->dev)) {
+ vlv_force_wake_put(dev_priv, fw_engine);
+ goto out;
+ }
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (--dev_priv->uncore.forcewake_count == 0) {
dev_priv->uncore.forcewake_count++;
- mod_delayed_work(dev_priv->wq,
- &dev_priv->uncore.force_wake_work,
- 1);
+ delayed = true;
+ mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
+ jiffies + 1);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
- intel_runtime_pm_put(dev_priv);
+out:
+ if (!delayed)
+ intel_runtime_pm_put(dev_priv);
+}
+
+void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->uncore.funcs.force_wake_get)
+ return;
+
+ WARN_ON(dev_priv->uncore.forcewake_count > 0);
}
/* We give fast paths for the really cool registers */
@@ -446,16 +468,10 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
}
}
-static void
-assert_device_not_suspended(struct drm_i915_private *dev_priv)
-{
- WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
- "Device suspended\n");
-}
-
#define REG_READ_HEADER(x) \
unsigned long irqflags; \
u##x val = 0; \
+ assert_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
#define REG_READ_FOOTER \
@@ -484,17 +500,15 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
- if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, \
- FORCEWAKE_ALL); \
- val = __raw_i915_read##x(dev_priv, reg); \
- if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, \
- FORCEWAKE_ALL); \
- } else { \
- val = __raw_i915_read##x(dev_priv, reg); \
+ if (dev_priv->uncore.forcewake_count == 0 && \
+ NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+ FORCEWAKE_ALL); \
+ dev_priv->uncore.forcewake_count++; \
+ mod_timer_pinned(&dev_priv->uncore.force_wake_timer, \
+ jiffies + 1); \
} \
+ val = __raw_i915_read##x(dev_priv, reg); \
REG_READ_FOOTER; \
}
@@ -502,27 +516,19 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
unsigned fwengine = 0; \
- unsigned *fwcount; \
REG_READ_HEADER(x); \
- if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
- fwengine = FORCEWAKE_RENDER; \
- fwcount = &dev_priv->uncore.fw_rendercount; \
- } \
- else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
- fwengine = FORCEWAKE_MEDIA; \
- fwcount = &dev_priv->uncore.fw_mediacount; \
+ if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
} \
- if (fwengine != 0) { \
- if ((*fwcount)++ == 0) \
- (dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
- fwengine); \
- val = __raw_i915_read##x(dev_priv, reg); \
- if (--(*fwcount) == 0) \
- (dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
- fwengine); \
- } else { \
- val = __raw_i915_read##x(dev_priv, reg); \
- } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
REG_READ_FOOTER; \
}
@@ -554,6 +560,7 @@ __gen4_read(64)
#define REG_WRITE_HEADER \
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+ assert_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
#define REG_WRITE_FOOTER \
@@ -584,7 +591,6 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- assert_device_not_suspended(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
@@ -600,7 +606,6 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- assert_device_not_suspended(dev_priv); \
hsw_unclaimed_reg_clear(dev_priv, reg); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
@@ -634,16 +639,17 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
REG_WRITE_HEADER; \
- if (__needs_put) { \
- dev_priv->uncore.funcs.force_wake_get(dev_priv, \
- FORCEWAKE_ALL); \
- } \
- __raw_i915_write##x(dev_priv, reg, val); \
- if (__needs_put) { \
- dev_priv->uncore.funcs.force_wake_put(dev_priv, \
- FORCEWAKE_ALL); \
+ if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
+ if (dev_priv->uncore.forcewake_count == 0) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+ FORCEWAKE_ALL); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (dev_priv->uncore.forcewake_count == 0) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+ FORCEWAKE_ALL); \
+ } else { \
+ __raw_i915_write##x(dev_priv, reg, val); \
} \
REG_WRITE_FOOTER; \
}
@@ -681,15 +687,15 @@ void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
- gen6_force_wake_work);
+ setup_timer(&dev_priv->uncore.force_wake_timer,
+ gen6_force_wake_timer, (unsigned long)dev_priv);
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
- dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
- dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
+ dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
} else if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
@@ -703,16 +709,16 @@ void intel_uncore_init(struct drm_device *dev)
* forcewake being disabled.
*/
mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
+ __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
+ __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
if (ecobus & FORCEWAKE_MT_ENABLE) {
dev_priv->uncore.funcs.force_wake_get =
- __gen6_gt_force_wake_mt_get;
+ __gen7_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put =
- __gen6_gt_force_wake_mt_put;
+ __gen7_gt_force_wake_mt_put;
} else {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
@@ -794,10 +800,11 @@ void intel_uncore_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- flush_delayed_work(&dev_priv->uncore.force_wake_work);
+ del_timer_sync(&dev_priv->uncore.force_wake_timer);
/* Paranoia: make sure we have disabled everything before we exit. */
intel_uncore_sanitize(dev);
+ intel_uncore_forcewake_reset(dev);
}
static const struct register_whitelist {
@@ -947,6 +954,7 @@ static int gen6_do_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
unsigned long irqflags;
+ u32 fw_engine = 0;
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly
@@ -966,14 +974,25 @@ static int gen6_do_reset(struct drm_device *dev)
intel_uncore_forcewake_reset(dev);
- /* If reset with a user forcewake, try to restore, otherwise turn it off */
- if (dev_priv->uncore.forcewake_count)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
- else
- dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
+ /* If reset with a user forcewake, try to restore */
+ if (IS_VALLEYVIEW(dev)) {
+ if (dev_priv->uncore.fw_rendercount)
+ fw_engine |= FORCEWAKE_RENDER;
+
+ if (dev_priv->uncore.fw_mediacount)
+ fw_engine |= FORCEWAKE_MEDIA;
+ } else {
+ if (dev_priv->uncore.forcewake_count)
+ fw_engine = FORCEWAKE_ALL;
+ }
- /* Restore fifo count */
- dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
+ if (fw_engine)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
+
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ dev_priv->uncore.fifo_count =
+ __raw_i915_read32(dev_priv, GTFIFOCTL) &
+ GT_FIFO_FREE_ENTRIES_MASK;
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return ret;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 0d19f4f94d5a..daa4dd375ab1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1774,6 +1774,20 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
return ATOM_PPLL1;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
+ } else if (ASIC_IS_DCE41(rdev)) {
+ /* Don't share PLLs on DCE4.1 chips */
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+ if (rdev->clock.dp_extclk)
+ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ }
+ pll_in_use = radeon_get_pll_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
} else if (ASIC_IS_DCE4(rdev)) {
/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
* depending on the asic:
@@ -1801,7 +1815,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
if (pll != ATOM_PPLL_INVALID)
return pll;
}
- } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
+ } else {
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2cec2ab02f80..607dc14d195e 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1314,7 +1314,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
if (is_dp)
args.v5.ucLaneNum = dp_lane_count;
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v5.ucLaneNum = 8;
else
args.v5.ucLaneNum = 4;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 92e38b54efb9..0ae991d3289a 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3047,7 +3047,7 @@ static u32 cik_create_bitmask(u32 bit_width)
}
/**
- * cik_select_se_sh - select which SE, SH to address
+ * cik_get_rb_disabled - computes the mask of disabled RBs
*
* @rdev: radeon_device pointer
* @max_rb_num: max RBs (render backends) for the asic
@@ -4133,8 +4133,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
WREG32(CP_MEC_CNTL, 0);
- else
+ else {
WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ }
udelay(50);
}
@@ -7949,7 +7952,8 @@ int cik_resume(struct radeon_device *rdev)
/* init golden registers */
cik_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = cik_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 00150ac49cd2..89b4afa5041c 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -264,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
}
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
}
/**
@@ -291,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
u32 me_cntl, reg_offset;
int i;
+ if (enable == false) {
+ cik_sdma_gfx_stop(rdev);
+ cik_sdma_rlc_stop(rdev);
+ }
+
for (i = 0; i < 2; i++) {
if (i == 0)
reg_offset = SDMA0_REGISTER_OFFSET;
@@ -418,10 +425,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
if (!rdev->sdma_fw)
return -EINVAL;
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
-
/* halt the MEs */
cik_sdma_enable(rdev, false);
@@ -490,9 +493,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
*/
void cik_sdma_fini(struct radeon_device *rdev)
{
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
/* halt the MEs */
cik_sdma_enable(rdev, false);
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 713a5d359901..94e858751994 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -278,13 +278,15 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev)
return !ASIC_IS_NODCE(rdev);
}
-static void dce6_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- bool enable)
+void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
+ if (!pin)
+ return;
+
WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
- AUDIO_ENABLED);
- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
+ enable ? AUDIO_ENABLED : 0);
}
static const u32 pin_offsets[7] =
@@ -323,7 +325,8 @@ int dce6_audio_init(struct radeon_device *rdev)
rdev->audio.pin[i].connected = false;
rdev->audio.pin[i].offset = pin_offsets[i];
rdev->audio.pin[i].id = i;
- dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
+ /* disable audio. it will be set up later */
+ dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
}
return 0;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b1f1253e2ced..b406546440da 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -5295,7 +5295,8 @@ int evergreen_resume(struct radeon_device *rdev)
/* init golden registers */
evergreen_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = evergreen_startup(rdev);
@@ -5471,9 +5472,9 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- evergreen_pcie_gart_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 0c6d5cef4cf1..05b0c95813fd 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -306,6 +306,15 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
return;
offset = dig->afmt->offset;
+ /* disable audio prior to setting up hw */
+ if (ASIC_IS_DCE6(rdev)) {
+ dig->afmt->pin = dce6_audio_get_pin(rdev);
+ dce6_audio_enable(rdev, dig->afmt->pin, false);
+ } else {
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+ }
+
evergreen_audio_set_dto(encoder, mode->clock);
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
@@ -409,12 +418,16 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+
+ /* enable audio after to setting up hw */
+ if (ASIC_IS_DCE6(rdev))
+ dce6_audio_enable(rdev, dig->afmt->pin, true);
+ else
+ r600_audio_enable(rdev, dig->afmt->pin, true);
}
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -427,15 +440,6 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
- if (enable) {
- if (ASIC_IS_DCE6(rdev))
- dig->afmt->pin = dce6_audio_get_pin(rdev);
- else
- dig->afmt->pin = r600_audio_get_pin(rdev);
- } else {
- dig->afmt->pin = NULL;
- }
-
dig->afmt->enabled = enable;
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
index 76ada8cfe902..3a03ba37d043 100644
--- a/drivers/gpu/drm/radeon/evergreen_smc.h
+++ b/drivers/gpu/drm/radeon/evergreen_smc.h
@@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
#define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
-#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0
+#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8
#define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC
#define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 85168ecd216b..d246e043421a 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2103,7 +2103,8 @@ int cayman_resume(struct radeon_device *rdev)
/* init golden registers */
ni_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = cayman_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 0a894aee7406..030f8e49c5ee 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3939,8 +3939,6 @@ int r100_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r100_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 41cdf236ee9a..206caf9700b7 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1430,8 +1430,6 @@ int r300_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r300_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 3768aab2710b..802b19220a21 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -325,8 +325,6 @@ int r420_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index e209eb75024f..98d6053c36c6 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -240,8 +240,6 @@ int r520_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r520_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 0f4ab928a15a..6e887d004eba 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2964,7 +2964,8 @@ int r600_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = r600_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 47fc2b886979..bffac10c4296 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -142,12 +142,15 @@ void r600_audio_update_hdmi(struct work_struct *work)
}
/* enable the audio stream */
-static void r600_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- bool enable)
+void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
u32 value = 0;
+ if (!pin)
+ return;
+
if (ASIC_IS_DCE4(rdev)) {
if (enable) {
value |= 0x81000000; /* Required to enable audio */
@@ -158,7 +161,6 @@ static void r600_audio_enable(struct radeon_device *rdev,
WREG32_P(R600_AUDIO_ENABLE,
enable ? 0x81000000 : 0x0, ~0x81000000);
}
- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
}
/*
@@ -178,8 +180,8 @@ int r600_audio_init(struct radeon_device *rdev)
rdev->audio.pin[0].status_bits = 0;
rdev->audio.pin[0].category_code = 0;
rdev->audio.pin[0].id = 0;
-
- r600_audio_enable(rdev, &rdev->audio.pin[0], true);
+ /* disable audio. it will be set up later */
+ r600_audio_enable(rdev, &rdev->audio.pin[0], false);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 3016fc14f502..85a2bb28aed2 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -329,9 +329,6 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
- /* XXX: setting this register causes hangs on some asics */
- return;
-
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
@@ -460,6 +457,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
return;
offset = dig->afmt->offset;
+ /* disable audio prior to setting up hw */
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+
r600_audio_set_dto(encoder, mode->clock);
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
@@ -531,6 +532,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
r600_hdmi_audio_workaround(encoder);
+
+ /* enable audio after to setting up hw */
+ r600_audio_enable(rdev, dig->afmt->pin, true);
}
/*
@@ -651,11 +655,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
- if (enable)
- dig->afmt->pin = r600_audio_get_pin(rdev);
- else
- dig->afmt->pin = NULL;
-
/* Older chipsets require setting HDMI and routing manually */
if (!ASIC_IS_DCE3(rdev)) {
if (enable)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 111deab2492a..f21db7a0b34d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2833,6 +2833,12 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
void r600_audio_update_hdmi(struct work_struct *work);
struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
+void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable);
+void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable);
/*
* R600 vram scratch functions
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 485848f889f5..fa9a9c02751e 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -219,7 +219,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
memcpy(&output, info->buffer.pointer, size);
/* TODO: check version? */
- printk("ATPX version %u\n", output.version);
+ printk("ATPX version %u, functions 0x%08x\n",
+ output.version, output.function_bits);
radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7db44de90d6c..2e72dcd94b13 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1517,13 +1517,16 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
- if (rdev->pm.dpm_enabled) {
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
/* do dpm late init */
r = radeon_pm_late_init(rdev);
if (r) {
rdev->pm.dpm_enabled = false;
DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
}
+ } else {
+ /* resume old pm late */
+ radeon_pm_resume(rdev);
}
radeon_restore_bios_scratch_regs(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 37b1deacb5b1..6f1dfac17507 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -33,6 +33,13 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_is_px(void);
+#else
+static inline bool radeon_is_px(void) { return false; }
+#endif
+
/**
* radeon_driver_unload_kms - Main unload function for KMS.
*
@@ -130,7 +137,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
- if (radeon_runtime_pm != 0) {
+ if ((radeon_runtime_pm == 1) ||
+ ((radeon_runtime_pm == -1) && radeon_is_px())) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
@@ -567,6 +575,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (r)
return r;
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+ if (r)
+ return r;
+
/* map the ib pool buffer read only into
* virtual address space */
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 2db486666d91..c8a8a5144ec1 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -722,6 +722,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
+ /* Change the size here instead of the init above so only lpfn is affected */
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->stollen_vga_memory);
@@ -942,7 +945,7 @@ static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
while (size) {
loff_t p = *pos / PAGE_SIZE;
unsigned off = *pos & ~PAGE_MASK;
- ssize_t cur_size = min(size, PAGE_SIZE - off);
+ size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
struct page *page;
void *ptr;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6a2e3ff68374..5748bdaeacce 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -171,6 +171,8 @@ void radeon_uvd_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
+
release_firmware(rdev->uvd_fw);
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index b5c2369cda2f..130d5cc50d43 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -474,8 +474,6 @@ int rs400_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rs400_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index fdcde7693032..72d3616de08e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -1048,8 +1048,6 @@ int rs600_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rs600_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 35950738bd5e..3462b64369bf 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -756,8 +756,6 @@ int rs690_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rs690_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 98e8138ff779..237dd29d9f1c 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -586,8 +586,6 @@ int rv515_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rv515_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 6c772e58c784..fef310773aad 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1811,7 +1811,8 @@ int rv770_resume(struct radeon_device *rdev)
/* init golden registers */
rv770_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = rv770_startup(rdev);
@@ -1955,9 +1956,9 @@ void rv770_fini(struct radeon_device *rdev)
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- rv770_pcie_gart_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
+ rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 8008cb8d5324..d589475fe9e6 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6610,7 +6610,8 @@ int si_resume(struct radeon_device *rdev)
/* init golden registers */
si_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = si_startup(rdev);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 88a529008ce0..c71594754f46 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -104,7 +104,7 @@ static void tegra_drm_context_free(struct tegra_drm_context *context)
static void tegra_drm_lastclose(struct drm_device *drm)
{
-#ifdef CONFIG_TEGRA_DRM_FBDEV
+#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
tegra_fbdev_restore_mode(tegra->fbdev);
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 338f7f6561d7..0266fb40479e 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -15,6 +15,7 @@
struct tegra_rgb {
struct tegra_output output;
struct tegra_dc *dc;
+ bool enabled;
struct clk *clk_parent;
struct clk *clk;
@@ -89,6 +90,9 @@ static int tegra_output_rgb_enable(struct tegra_output *output)
struct tegra_rgb *rgb = to_rgb(output);
unsigned long value;
+ if (rgb->enabled)
+ return 0;
+
tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
@@ -122,6 +126,8 @@ static int tegra_output_rgb_enable(struct tegra_output *output)
tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ rgb->enabled = true;
+
return 0;
}
@@ -130,6 +136,9 @@ static int tegra_output_rgb_disable(struct tegra_output *output)
struct tegra_rgb *rgb = to_rgb(output);
unsigned long value;
+ if (!rgb->enabled)
+ return 0;
+
value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
@@ -144,6 +153,8 @@ static int tegra_output_rgb_disable(struct tegra_output *output)
tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+ rgb->enabled = false;
+
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 79238d2face9..9df79ac7b8f5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
moved:
if (bo->evicted) {
- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
- if (ret)
- pr_err("Can not flush read caches\n");
+ if (bdev->driver->invalidate_caches) {
+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+ if (ret)
+ pr_err("Can not flush read caches\n");
+ }
bo->evicted = false;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 801231c9ae48..0ce48e5a9cb4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -339,11 +339,13 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
vma->vm_private_data = bo;
/*
- * PFNMAP is faster than MIXEDMAP due to reduced page
- * administration. So use MIXEDMAP only if private VMA, where
- * we need to support COW.
+ * We'd like to use VM_PFNMAP on shared mappings, where
+ * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
+ * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
+ * bad for performance. Until that has been sorted out, use
+ * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
*/
- vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
out_unref:
@@ -359,7 +361,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
vma->vm_ops = &ttm_bo_vm_ops;
vma->vm_private_data = ttm_bo_reference(bo);
- vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND;
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index bb594c11605e..f58dc7dd15c5 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -261,12 +261,7 @@ typedef enum SVGA3dSurfaceFormat {
/* Planar video formats. */
SVGA3D_YV12 = 121,
- /* Shader constant formats. */
- SVGA3D_SURFACE_SHADERCONST_FLOAT = 122,
- SVGA3D_SURFACE_SHADERCONST_INT = 123,
- SVGA3D_SURFACE_SHADERCONST_BOOL = 124,
-
- SVGA3D_FORMAT_MAX = 125,
+ SVGA3D_FORMAT_MAX = 122,
} SVGA3dSurfaceFormat;
typedef uint32 SVGA3dColor; /* a, r, g, b */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 9e4be1725985..07831554dad7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,7 +40,7 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20121114"
+#define VMWGFX_DRIVER_DATE "20140228"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 5
#define VMWGFX_DRIVER_PATCHLEVEL 0
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index d4a5a19cb8c3..04a64b8cd3cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -188,18 +188,20 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
bo = otable->page_table->pt_bo;
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL))
- DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
-
- memset(cmd, 0, sizeof(*cmd));
- cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.type = type;
- cmd->body.baseAddress = 0;
- cmd->body.sizeInBytes = 0;
- cmd->body.validSizeInBytes = 0;
- cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for OTable "
+ "takedown.\n");
+ } else {
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = type;
+ cmd->body.baseAddress = 0;
+ cmd->body.sizeInBytes = 0;
+ cmd->body.validSizeInBytes = 0;
+ cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ }
if (bo) {
int ret;
@@ -562,11 +564,12 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for Memory "
"Object unbinding.\n");
+ } else {
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.mobid = mob->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
- cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.mobid = mob->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
if (bo) {
vmw_fence_single_bo(bo, NULL);
ttm_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 2aa4bc6a4d60..9757b57f8388 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -427,8 +427,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- (user) ? ttm_bo_type_device :
- ttm_bo_type_kernel, placement,
+ ttm_bo_type_device, placement,
0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 82468d902915..e7af580ab977 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -830,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_unlock;
+ /*
+ * A gb-aware client referencing a shared surface will
+ * expect a backup buffer to be present.
+ */
+ if (dev_priv->has_mob && req->shareable) {
+ uint32_t backup_handle;
+
+ ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
+ res->backup_size,
+ true,
+ &backup_handle,
+ &res->backup);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+ }
+
tmp = vmw_resource_reference(&srf->res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,