diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r-- | drivers/gpu/drm/nouveau/dispnv50/wndw.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_abi16.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 40 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_chan.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fbcon.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fence.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mem.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mem.h | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_ttm.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_vmm.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv17_fence.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_fence.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c | 6 |
14 files changed, 73 insertions, 71 deletions
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 0cb1f9d848d3..8d048bacd6f0 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -561,7 +561,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) asyw->image.handle[0] = ctxdma->object.handle; } - asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv); + asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv); asyw->image.offset[0] = nvbo->offset; if (wndw->func->prepare) { diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 0a9334deffe2..b45ec3086285 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -312,7 +312,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART; else - if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) + if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; else init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c390f24f25f3..520b1ea9d16c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -433,7 +433,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) if (nvbo->bo.pin_count) { bool error = evict; - switch (bo->mem.mem_type) { + switch (bo->resource->mem_type) { case TTM_PL_VRAM: error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM); break; @@ -446,7 +446,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) if (error) { NV_ERROR(drm, "bo %p pinned elsewhere: " "0x%08x vs 0x%08x\n", bo, - bo->mem.mem_type, domain); + bo->resource->mem_type, domain); ret = -EBUSY; } ttm_bo_pin(&nvbo->bo); @@ -467,7 +467,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) ttm_bo_pin(&nvbo->bo); - switch (bo->mem.mem_type) { + switch (bo->resource->mem_type) { case TTM_PL_VRAM: drm->gem.vram_available -= bo->base.size; break; @@ -498,7 +498,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) ttm_bo_unpin(&nvbo->bo); if (!nvbo->bo.pin_count) { - switch (bo->mem.mem_type) { + switch (bo->resource->mem_type) { case TTM_PL_VRAM: drm->gem.vram_available += bo->base.size; break; @@ -523,7 +523,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo) if (ret) return ret; - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); + ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap); ttm_bo_unreserve(&nvbo->bo); return ret; @@ -737,7 +737,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) { struct nouveau_bo *nvbo = nouveau_bo(bo); - switch (bo->mem.mem_type) { + switch (bo->resource->mem_type) { case TTM_PL_VRAM: nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, NOUVEAU_GEM_DOMAIN_CPU); @@ -754,7 +754,7 @@ static int nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, struct ttm_resource *reg) { - struct nouveau_mem *old_mem = nouveau_mem(&bo->mem); + struct nouveau_mem *old_mem = nouveau_mem(bo->resource); struct nouveau_mem *new_mem = nouveau_mem(reg); struct nvif_vmm *vmm = &drm->client.vmm.vmm; int ret; @@ -809,7 +809,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); if (ret == 0) { - ret = drm->ttm.move(chan, bo, &bo->mem, new_reg); + ret = drm->ttm.move(chan, bo, bo->resource, new_reg); if (ret == 0) { ret = nouveau_fence_new(chan, false, &fence); if (ret == 0) { @@ -918,12 +918,8 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, } } - if (new_reg) { - if (new_reg->mm_node) - nvbo->offset = (new_reg->start << PAGE_SHIFT); - else - nvbo->offset = 0; - } + if (new_reg) + nvbo->offset = (new_reg->start << PAGE_SHIFT); } @@ -955,7 +951,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; - struct dma_fence *fence = dma_resv_get_excl(bo->base.resv); + struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv); nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; @@ -969,7 +965,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); - struct ttm_resource *old_reg = &bo->mem; + struct ttm_resource *old_reg = bo->resource; struct nouveau_drm_tile *new_tile = NULL; int ret = 0; @@ -1009,7 +1005,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_reg->mem_type == TTM_PL_TT && new_reg->mem_type == TTM_PL_SYSTEM) { nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, &bo->mem); + ttm_resource_free(bo, &bo->resource); ttm_bo_assign_mem(bo, new_reg); goto out; } @@ -1045,7 +1041,7 @@ out: } out_ntfy: if (ret) { - nouveau_bo_move_ntfy(bo, &bo->mem); + nouveau_bo_move_ntfy(bo, bo->resource); } return ret; } @@ -1170,7 +1166,7 @@ out: list_del_init(&nvbo->io_reserve_lru); drm_vma_node_unmap(&nvbo->bo.base.vma_node, bdev->dev_mapping); - nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem); + nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); goto retry; } @@ -1200,12 +1196,12 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) /* as long as the bo isn't in vram, and isn't tiled, we've got * nothing to do here. */ - if (bo->mem.mem_type != TTM_PL_VRAM) { + if (bo->resource->mem_type != TTM_PL_VRAM) { if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !nvbo->kind) return 0; - if (bo->mem.mem_type != TTM_PL_SYSTEM) + if (bo->resource->mem_type != TTM_PL_SYSTEM) return 0; nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); @@ -1213,7 +1209,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) } else { /* make sure bo is in mappable vram */ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || - bo->mem.start + bo->mem.num_pages < mappable) + bo->resource->start + bo->resource->num_pages < mappable) return 0; for (i = 0; i < nvbo->placement.num_placement; ++i) { diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 7cfac265fd45..40362600eed2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -212,7 +212,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, args.start = 0; args.limit = chan->vmm->vmm.limit - 1; } else - if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) { + if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) { if (device->info.family == NV_DEVICE_INFO_V0_TNT) { /* nv04 vram pushbuf hack, retarget to its location in * the framebuffer bar rather than direct vram access.. diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 93ac78bda750..4f9b3aa5deda 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -378,7 +378,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; info->fbops = &nouveau_fbcon_sw_ops; - info->fix.smem_start = nvbo->bo.mem.bus.offset; + info->fix.smem_start = nvbo->bo.resource->bus.offset; info->fix.smem_len = nvbo->bo.base.size; info->screen_base = nvbo_kmap_obj_iovirtual(nvbo); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index e5dcbf67de7e..6b43918035df 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -355,8 +355,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e return ret; } - fobj = dma_resv_get_list(resv); - fence = dma_resv_get_excl(resv); + fobj = dma_resv_shared_list(resv); + fence = dma_resv_excl_fence(resv); if (fence && (!exclusive || !fobj || !fobj->shared_count)) { struct nouveau_channel *prev = NULL; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 722e1decc202..5b27845075a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -276,7 +276,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, if (is_power_of_2(nvbo->valid_domains)) rep->domain = nvbo->valid_domains; - else if (nvbo->bo.mem.mem_type == TTM_PL_TT) + else if (nvbo->bo.resource->mem_type == TTM_PL_TT) rep->domain = NOUVEAU_GEM_DOMAIN_GART; else rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; @@ -347,11 +347,11 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART); if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && - bo->mem.mem_type == TTM_PL_VRAM) + bo->resource->mem_type == TTM_PL_VRAM) pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM; else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && - bo->mem.mem_type == TTM_PL_TT) + bo->resource->mem_type == TTM_PL_TT) pref_domains |= NOUVEAU_GEM_DOMAIN_GART; else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) @@ -561,13 +561,13 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { if (nvbo->offset == b->presumed.offset && - ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && + ((nvbo->bo.resource->mem_type == TTM_PL_VRAM && b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || - (nvbo->bo.mem.mem_type == TTM_PL_TT && + (nvbo->bo.resource->mem_type == TTM_PL_TT && b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) continue; - if (nvbo->bo.mem.mem_type == TTM_PL_TT) + if (nvbo->bo.resource->mem_type == TTM_PL_TT) b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; else b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; @@ -681,7 +681,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, } if (!nvbo->kmap.virtual) { - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, + ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap); if (ret) { NV_PRINTK(err, cli, "failed kmap for reloc\n"); @@ -870,7 +870,7 @@ revalidate: if (unlikely(cmd != req->suffix0)) { if (!nvbo->kmap.virtual) { ret = ttm_bo_kmap(&nvbo->bo, 0, - nvbo->bo.mem. + nvbo->bo.resource-> num_pages, &nvbo->kmap); if (ret) { @@ -964,8 +964,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, return -ENOENT; nvbo = nouveau_gem_object(gem); - lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true, - no_wait ? 0 : 30 * HZ); + lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true, + no_wait ? 0 : 30 * HZ); if (!lret) ret = -EBUSY; else if (lret > 0) diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index a1049e9feee1..0de6549fb875 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -178,25 +178,24 @@ void nouveau_mem_del(struct ttm_resource *reg) { struct nouveau_mem *mem = nouveau_mem(reg); - if (!mem) - return; + nouveau_mem_fini(mem); - kfree(reg->mm_node); - reg->mm_node = NULL; + kfree(mem); } int nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp, - struct ttm_resource *reg) + struct ttm_resource **res) { struct nouveau_mem *mem; if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL))) return -ENOMEM; + mem->cli = cli; mem->kind = kind; mem->comp = comp; - reg->mm_node = mem; + *res = &mem->base; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h index 7df3848e85aa..2c01166a90f2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.h +++ b/drivers/gpu/drm/nouveau/nouveau_mem.h @@ -6,13 +6,8 @@ struct ttm_tt; #include <nvif/mem.h> #include <nvif/vmm.h> -static inline struct nouveau_mem * -nouveau_mem(struct ttm_resource *reg) -{ - return reg->mm_node; -} - struct nouveau_mem { + struct ttm_resource base; struct nouveau_cli *cli; u8 kind; u8 comp; @@ -20,8 +15,14 @@ struct nouveau_mem { struct nvif_vma vma[2]; }; +static inline struct nouveau_mem * +nouveau_mem(struct ttm_resource *reg) +{ + return container_of(reg, struct nouveau_mem, base); +} + int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp, - struct ttm_resource *); + struct ttm_resource **); void nouveau_mem_del(struct ttm_resource *); int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page); int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 65430912ff72..f4c2e46b6fe1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -26,6 +26,8 @@ #include <linux/limits.h> #include <linux/swiotlb.h> +#include <drm/ttm/ttm_range_manager.h> + #include "nouveau_drv.h" #include "nouveau_gem.h" #include "nouveau_mem.h" @@ -43,7 +45,7 @@ static int nouveau_vram_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *reg) + struct ttm_resource **res) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); @@ -52,13 +54,15 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man, if (drm->client.device.info.ram_size == 0) return -ENOMEM; - ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); + ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res); if (ret) return ret; - ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page); + ttm_resource_init(bo, place, *res); + + ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page); if (ret) { - nouveau_mem_del(reg); + nouveau_mem_del(*res); return ret; } @@ -74,17 +78,18 @@ static int nouveau_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *reg) + struct ttm_resource **res) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); int ret; - ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); + ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res); if (ret) return ret; - reg->start = 0; + ttm_resource_init(bo, place, *res); + (*res)->start = 0; return 0; } @@ -97,26 +102,27 @@ static int nv04_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *reg) + struct ttm_resource **res) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_mem *mem; int ret; - ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); - mem = nouveau_mem(reg); + ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res); if (ret) return ret; + mem = nouveau_mem(*res); + ttm_resource_init(bo, place, *res); ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0, - (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]); + (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]); if (ret) { - nouveau_mem_del(reg); + nouveau_mem_del(*res); return ret; } - reg->start = mem->vma[0].addr >> PAGE_SHIFT; + (*res)->start = mem->vma[0].addr >> PAGE_SHIFT; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c index a49e88129c92..67d6619fcd5e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c @@ -77,7 +77,7 @@ int nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm, struct nouveau_vma **pvma) { - struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem); + struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource); struct nouveau_vma *vma; struct nvif_vma tmp; int ret; @@ -96,7 +96,7 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm, vma->fence = NULL; list_add_tail(&vma->head, &nvbo->vma_list); - if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && + if (nvbo->bo.resource->mem_type != TTM_PL_SYSTEM && mem->mem.page == nvbo->page) { ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0, mem->mem.size, &tmp); diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index b1cd8d7dd87d..07c2e0878c24 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -77,8 +77,8 @@ static int nv17_fence_context_new(struct nouveau_channel *chan) { struct nv10_fence_priv *priv = chan->drm->fence; + struct ttm_resource *reg = priv->bo->bo.resource; struct nv10_fence_chan *fctx; - struct ttm_resource *reg = &priv->bo->bo.mem; u32 start = reg->start * PAGE_SIZE; u32 limit = start + priv->bo->bo.base.size - 1; int ret = 0; diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 1625826505f6..ea1e1f480bfe 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -37,7 +37,7 @@ nv50_fence_context_new(struct nouveau_channel *chan) { struct nv10_fence_priv *priv = chan->drm->fence; struct nv10_fence_chan *fctx; - struct ttm_resource *reg = &priv->bo->bo.mem; + struct ttm_resource *reg = priv->bo->bo.resource; u32 start = reg->start * PAGE_SIZE; u32 limit = start + priv->bo->bo.base.size - 1; int ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c index 58db83ebadc5..a96084b34a78 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c @@ -46,7 +46,7 @@ tu102_mc_intr_update(struct tu102_mc *mc) nvkm_wr32(device, 0xb81610, 0x6); } -void +static void tu102_mc_intr_unarm(struct nvkm_mc *base) { struct tu102_mc *mc = tu102_mc(base); @@ -58,7 +58,7 @@ tu102_mc_intr_unarm(struct nvkm_mc *base) spin_unlock_irqrestore(&mc->lock, flags); } -void +static void tu102_mc_intr_rearm(struct nvkm_mc *base) { struct tu102_mc *mc = tu102_mc(base); @@ -70,7 +70,7 @@ tu102_mc_intr_rearm(struct nvkm_mc *base) spin_unlock_irqrestore(&mc->lock, flags); } -void +static void tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr) { struct tu102_mc *mc = tu102_mc(base); |