summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c108
1 files changed, 79 insertions, 29 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 02c4eb28cef4..4ca6fb30743d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -27,6 +27,7 @@
#include <core/memory.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
+#include <subdev/gsp.h>
#include <subdev/mmu.h>
struct nv50_instmem {
@@ -64,7 +65,7 @@ nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
+ imem->base.func->set_bar0_window_addr(device, base);
imem->addr = base;
}
nvkm_wr32(device, 0x700000 + addr, data);
@@ -84,7 +85,7 @@ nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
+ imem->base.func->set_bar0_window_addr(device, base);
imem->addr = base;
}
data = nvkm_rd32(device, 0x700000 + addr);
@@ -133,12 +134,12 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
* into it. The lock has to be dropped while doing this due
* to the possibility of recursion for page table allocation.
*/
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
/* Evict unused mappings, and keep retrying until we either
* succeed,or there's no more objects left on the LRU.
*/
- mutex_lock(&subdev->mutex);
+ mutex_lock(&imem->base.mutex);
eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru);
if (eobj) {
nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
@@ -151,7 +152,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
emap = eobj->map;
eobj->map = NULL;
}
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
if (!eobj)
break;
iounmap(emap);
@@ -160,18 +161,18 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
if (ret == 0)
ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
- mutex_lock(&subdev->mutex);
+ mutex_lock(&imem->base.mutex);
if (ret || iobj->bar) {
/* We either failed, or another thread beat us. */
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
nvkm_vmm_put(vmm, &bar);
- mutex_lock(&subdev->mutex);
+ mutex_lock(&imem->base.mutex);
return;
}
/* Make the mapping visible to the host. */
iobj->bar = bar;
- iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
+ iobj->map = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST) +
(u32)iobj->bar->addr, size);
if (!iobj->map) {
nvkm_warn(subdev, "PRAMIN ioremap failed\n");
@@ -197,7 +198,7 @@ nv50_instobj_release(struct nvkm_memory *memory)
wmb();
nvkm_bar_flush(subdev->device->bar);
- if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) {
+ if (refcount_dec_and_mutex_lock(&iobj->maps, &imem->base.mutex)) {
/* Add the now-unused mapping to the LRU instead of directly
* unmapping it here, in case we need to map it again later.
*/
@@ -208,7 +209,7 @@ nv50_instobj_release(struct nvkm_memory *memory)
/* Switch back to NULL accessors when last map is gone. */
iobj->base.memory.ptrs = NULL;
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
}
}
@@ -221,15 +222,18 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL;
/* Already mapped? */
- if (refcount_inc_not_zero(&iobj->maps))
+ if (refcount_inc_not_zero(&iobj->maps)) {
+ /* read barrier match the wmb on refcount set */
+ smp_rmb();
return iobj->map;
+ }
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
*/
- mutex_lock(&imem->subdev.mutex);
+ mutex_lock(&imem->mutex);
if (refcount_inc_not_zero(&iobj->maps)) {
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
return iobj->map;
}
@@ -249,10 +253,12 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
+ /* barrier to ensure the ptrs are written before refcount is set */
+ smp_wmb();
refcount_set(&iobj->maps, 1);
}
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
return map;
}
@@ -265,7 +271,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
/* Exclude bootstrapped objects (ie. the page tables for the
* instmem BAR itself) from eviction.
*/
- mutex_lock(&imem->subdev.mutex);
+ mutex_lock(&imem->mutex);
if (likely(iobj->lru.next)) {
list_del_init(&iobj->lru);
iobj->lru.next = NULL;
@@ -273,7 +279,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
nv50_instobj_kmap(iobj, vmm);
nvkm_instmem_boot(imem);
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
}
static u64
@@ -313,14 +319,14 @@ nv50_instobj_dtor(struct nvkm_memory *memory)
struct nv50_instobj *iobj = nv50_instobj(memory);
struct nvkm_instmem *imem = &iobj->imem->base;
struct nvkm_vma *bar;
- void *map = map;
+ void *map;
- mutex_lock(&imem->subdev.mutex);
+ mutex_lock(&imem->mutex);
if (likely(iobj->lru.next))
list_del(&iobj->lru);
map = iobj->map;
bar = iobj->bar;
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
if (map) {
struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
@@ -347,14 +353,12 @@ nv50_instobj_func = {
.map = nv50_instobj_map,
};
-static int
-nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
- struct nvkm_memory **pmemory)
+int
+nv50_instobj_wrap(struct nvkm_instmem *base,
+ struct nvkm_memory *memory, struct nvkm_memory **pmemory)
{
struct nv50_instmem *imem = nv50_instmem(base);
struct nv50_instobj *iobj;
- struct nvkm_device *device = imem->base.subdev.device;
- u8 page = max(order_base_2(align), 12);
if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
return -ENOMEM;
@@ -365,7 +369,25 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
refcount_set(&iobj->maps, 0);
INIT_LIST_HEAD(&iobj->lru);
- return nvkm_ram_get(device, 0, 1, page, size, true, true, &iobj->ram);
+ iobj->ram = nvkm_memory_ref(memory);
+ return 0;
+}
+
+int
+nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
+ struct nvkm_memory **pmemory)
+{
+ u8 page = max(order_base_2(align), 12);
+ struct nvkm_memory *ram;
+ int ret;
+
+ ret = nvkm_ram_get(imem->subdev.device, 0, 1, page, size, true, true, &ram);
+ if (ret)
+ return ret;
+
+ ret = nv50_instobj_wrap(imem, ram, pmemory);
+ nvkm_memory_unref(&ram);
+ return ret;
}
/******************************************************************************
@@ -373,28 +395,56 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
*****************************************************************************/
static void
+nv50_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr)
+{
+ nvkm_wr32(device, 0x001700, addr >> 16);
+}
+
+void
nv50_instmem_fini(struct nvkm_instmem *base)
{
nv50_instmem(base)->addr = ~0ULL;
}
+static void *
+nv50_instmem_dtor(struct nvkm_instmem *base)
+{
+ return nv50_instmem(base);
+}
+
static const struct nvkm_instmem_func
nv50_instmem = {
+ .dtor = nv50_instmem_dtor,
.fini = nv50_instmem_fini,
+ .suspend = nv04_instmem_suspend,
+ .resume = nv04_instmem_resume,
.memory_new = nv50_instobj_new,
+ .memory_wrap = nv50_instobj_wrap,
.zero = false,
+ .set_bar0_window_addr = nv50_instmem_set_bar0_window_addr,
};
int
-nv50_instmem_new(struct nvkm_device *device, int index,
- struct nvkm_instmem **pimem)
+nv50_instmem_new_(const struct nvkm_instmem_func *func,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_instmem **pimem)
{
struct nv50_instmem *imem;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
- nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
+ nvkm_instmem_ctor(func, device, type, inst, &imem->base);
INIT_LIST_HEAD(&imem->lru);
*pimem = &imem->base;
return 0;
}
+
+int
+nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_instmem **pimem)
+{
+ if (nvkm_gsp_rm(device->gsp))
+ return r535_instmem_new(&nv50_instmem, device, type, inst, pimem);
+
+ return nv50_instmem_new_(&nv50_instmem, device, type, inst, pimem);
+}