summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-04 15:49:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-04 15:49:32 -0700
commitf377ea88b862bf7151be96d276f4cb740f8e1c41 (patch)
tree6205913431c012e285316281b6221a20d4a92128 /drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
parent51e771c0d25b43d0f12b2c7c01939942becbbe28 (diff)
parent73bf1b7be7aab60d7c651402441dd0b0b4991098 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This is the main pull request for the drm for 4.3. Nouveau is probably the biggest amount of changes in here, since it missed 4.2. Highlights below, along with the usual bunch of fixes. All stuff outside drm should have applicable acks. Highlights: - new drivers: freescale dcu kms driver - core: more atomic fixes disable some dri1 interfaces on kms drivers drop fb panic handling, this was just getting more broken, as more locking was required. new core fbdev Kconfig support - instead of each driver enable/disabling it struct_mutex cleanups - panel: more new panels cleanup Kconfig - i915: Skylake support enabled by default legacy modesetting using atomic infrastructure Skylake fixes GEN9 workarounds - amdgpu: Fiji support CGS support for amdgpu Initial GPU scheduler - off by default Lots of bug fixes and optimisations. - radeon: DP fixes misc fixes - amdkfd: Add Carrizo support for amdkfd using amdgpu. - nouveau: long pending cleanup to complete driver, fully bisectable which makes it larger, perfmon work more reclocking improvements maxwell displayport fixes - vmwgfx: new DX device support, supports OpenGL 3.3 screen targets support - mgag200: G200eW support G200e new revision support - msm: dragonboard 410c support, msm8x94 support, msm8x74v1 support yuv format support dma plane support mdp5 rotation initial hdcp - sti: atomic support - exynos: lots of cleanups atomic modesetting/pageflipping support render node support - tegra: tegra210 support (dc, dsi, dp/hdmi) dpms with atomic modesetting support - atmel: support for 3 more atmel SoCs new input formats, PRIME support. - dwhdmi: preparing to add audio support - rockchip: yuv plane support" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (1369 commits) drm/amdgpu: rename gmc_v8_0_init_compute_vmid drm/amdgpu: fix vce3 instance handling drm/amdgpu: remove ib test for the second VCE Ring drm/amdgpu: properly enable VM fault interrupts drm/amdgpu: fix warning in scheduler drm/amdgpu: fix buffer placement under memory pressure drm/amdgpu/cz: fix cz_dpm_update_low_memory_pstate logic drm/amdgpu: fix typo in dce11 watermark setup drm/amdgpu: fix typo in dce10 watermark setup drm/amdgpu: use top down allocation for non-CPU accessible vram drm/amdgpu: be explicit about cpu vram access for driver BOs (v2) drm/amdgpu: set MEC doorbell range for Fiji drm/amdgpu: implement burst NOP for SDMA drm/amdgpu: add insert_nop ring func and default implementation drm/amdgpu: add amdgpu_get_sdma_instance helper function drm/amdgpu: add AMDGPU_MAX_SDMA_INSTANCES drm/amdgpu: add burst_nop flag for sdma drm/amdgpu: add count field for the SDMA NOP packet v2 drm/amdgpu: use PT for VM sync on unmap drm/amdgpu: make wait_event uninterruptible in push_job ...
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c145
1 files changed, 112 insertions, 33 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39f2b03888e7..80c40c31d4f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,9 +29,14 @@
#include <drm/drmP.h>
#include <drm/ttm/ttm_placement.h>
+struct vmw_temp_set_context {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXTempSetContext body;
+};
+
bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
@@ -71,8 +76,8 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (hwversion < SVGA3D_HWVERSION_WS8_B1)
return false;
- /* Non-Screen Object path does not support surfaces */
- if (!dev_priv->sou_priv)
+ /* Legacy Display Unit does not support surfaces */
+ if (dev_priv->active_display_unit == vmw_du_legacy)
return false;
return true;
@@ -80,7 +85,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t caps;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
@@ -95,11 +100,11 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
- uint32_t dummy;
+ fifo->dx = false;
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL))
@@ -112,10 +117,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mutex_init(&fifo->fifo_mutex);
init_rwsem(&fifo->rwsem);
- /*
- * Allow mapping the first page read-only to user-space.
- */
-
DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
@@ -123,7 +124,10 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
- vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
+
+ vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
+ SVGA_REG_ENABLE_HIDE);
+ vmw_write(dev_priv, SVGA_REG_TRACES, 0);
min = 4;
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
@@ -155,12 +159,13 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
vmw_marker_queue_init(&fifo->marker_queue);
- return vmw_fifo_send_fence(dev_priv, &dummy);
+
+ return 0;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
static DEFINE_SPINLOCK(ping_lock);
unsigned long irq_flags;
@@ -178,7 +183,7 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
@@ -208,7 +213,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -312,10 +317,11 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
* Returns:
* Pointer to the fifo, or null on error (possible hardware hang).
*/
-void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
+ uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
uint32_t next_cmd;
@@ -372,7 +378,8 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
if (reserveable)
iowrite32(bytes, fifo_mem +
SVGA_FIFO_RESERVED);
- return fifo_mem + (next_cmd >> 2);
+ return (void __force *) (fifo_mem +
+ (next_cmd >> 2));
} else {
need_bounce = true;
}
@@ -391,11 +398,36 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
out_err:
fifo_state->reserved_size = 0;
mutex_unlock(&fifo_state->fifo_mutex);
+
return NULL;
}
+void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
+ int ctx_id)
+{
+ void *ret;
+
+ if (dev_priv->cman)
+ ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
+ ctx_id, false, NULL);
+ else if (ctx_id == SVGA3D_INVALID_ID)
+ ret = vmw_local_fifo_reserve(dev_priv, bytes);
+ else {
+ WARN_ON("Command buffer has not been allocated.\n");
+ ret = NULL;
+ }
+ if (IS_ERR_OR_NULL(ret)) {
+ DRM_ERROR("Fifo reserve failure of %u bytes.\n",
+ (unsigned) bytes);
+ dump_stack();
+ return NULL;
+ }
+
+ return ret;
+}
+
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
- __le32 __iomem *fifo_mem,
+ u32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
@@ -417,7 +449,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
}
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
- __le32 __iomem *fifo_mem,
+ u32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
@@ -436,15 +468,19 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
}
}
-void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
+ if (fifo_state->dx)
+ bytes += sizeof(struct vmw_temp_set_context);
+
+ fifo_state->dx = false;
BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size);
@@ -482,13 +518,53 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
mutex_unlock(&fifo_state->fifo_mutex);
}
+void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ if (dev_priv->cman)
+ vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
+ else
+ vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+
+/**
+ * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @bytes: Number of bytes to commit.
+ */
+void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ if (dev_priv->cman)
+ vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
+ else
+ vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+/**
+ * vmw_fifo_flush - Flush any buffered commands and make sure command processing
+ * starts.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @interruptible: Whether to wait interruptible if function needs to sleep.
+ */
+int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
+{
+ might_sleep();
+
+ if (dev_priv->cman)
+ return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
+ else
+ return 0;
+}
+
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
- void *fm;
+ u32 *fm;
int ret = 0;
- uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
+ uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
@@ -514,12 +590,10 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
return 0;
}
- *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
- cmd_fence = (struct svga_fifo_cmd_fence *)
- ((unsigned long)fm + sizeof(__le32));
-
- iowrite32(*seqno, &cmd_fence->fence);
- vmw_fifo_commit(dev_priv, bytes);
+ *fm++ = SVGA_CMD_FENCE;
+ cmd_fence = (struct svga_fifo_cmd_fence *) fm;
+ cmd_fence->fence = *seqno;
+ vmw_fifo_commit_flush(dev_priv, bytes);
(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
vmw_update_seqno(dev_priv, fifo_state);
@@ -545,7 +619,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
* without writing to the query result structure.
*/
- struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+ struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery body;
@@ -594,7 +668,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
* without writing to the query result structure.
*/
- struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+ struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery body;
@@ -647,3 +721,8 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
}
+
+void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
+}