diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c')
| -rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 1421 |
1 files changed, 1012 insertions, 409 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 99689f4de502..3ea447f6a45b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -26,17 +26,19 @@ #include "fuc/os.h" #include <core/client.h> -#include <core/option.h> #include <core/firmware.h> -#include <subdev/secboot.h> +#include <core/option.h> +#include <subdev/acr.h> #include <subdev/fb.h> #include <subdev/mc.h> #include <subdev/pmu.h> +#include <subdev/therm.h> #include <subdev/timer.h> #include <engine/fifo.h> #include <nvif/class.h> #include <nvif/cl9097.h> +#include <nvif/if900d.h> #include <nvif/unpack.h> /******************************************************************************* @@ -65,7 +67,7 @@ gf100_gr_zbc_color_get(struct gf100_gr *gr, int format, struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; int zbc = -ENOSPC, i; - for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { + for (i = ltc->zbc_color_min; i <= ltc->zbc_color_max; i++) { if (gr->zbc_color[i].format) { if (gr->zbc_color[i].format != format) continue; @@ -90,7 +92,7 @@ gf100_gr_zbc_color_get(struct gf100_gr *gr, int format, memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2)); gr->zbc_color[zbc].format = format; nvkm_ltc_zbc_color_get(ltc, zbc, l2); - gf100_gr_zbc_clear_color(gr, zbc); + gr->func->zbc->clear_color(gr, zbc); return zbc; } @@ -112,7 +114,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format, struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; int zbc = -ENOSPC, i; - for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { + for (i = ltc->zbc_depth_min; i <= ltc->zbc_depth_max; i++) { if (gr->zbc_depth[i].format) { if (gr->zbc_depth[i].format != format) continue; @@ -135,10 +137,16 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format, gr->zbc_depth[zbc].ds = ds; gr->zbc_depth[zbc].l2 = l2; nvkm_ltc_zbc_depth_get(ltc, zbc, l2); - gf100_gr_zbc_clear_depth(gr, zbc); + gr->func->zbc->clear_depth(gr, zbc); return zbc; } +const struct gf100_gr_func_zbc +gf100_gr_zbc = { + .clear_color = gf100_gr_zbc_clear_color, + .clear_depth = gf100_gr_zbc_clear_depth, +}; + /******************************************************************************* * Graphics object classes ******************************************************************************/ @@ -327,13 +335,13 @@ gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, if (!gr->firmware) { nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2); - nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8); + nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma->addr >> 8); } else { nvkm_wo32(*pgpuobj, 0xf4, 0); nvkm_wo32(*pgpuobj, 0xf8, 0); nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2); - nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset)); - nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset)); + nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma->addr)); + nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma->addr)); nvkm_wo32(*pgpuobj, 0x1c, 1); nvkm_wo32(*pgpuobj, 0x20, 0); nvkm_wo32(*pgpuobj, 0x28, 0); @@ -347,21 +355,15 @@ static void * gf100_gr_chan_dtor(struct nvkm_object *object) { struct gf100_gr_chan *chan = gf100_gr_chan(object); - int i; - for (i = 0; i < ARRAY_SIZE(chan->data); i++) { - if (chan->data[i].vma.node) { - nvkm_vm_unmap(&chan->data[i].vma); - nvkm_vm_put(&chan->data[i].vma); - } - nvkm_memory_del(&chan->data[i].mem); - } + nvkm_vmm_put(chan->vmm, &chan->mmio_vma); + nvkm_memory_unref(&chan->mmio); - if (chan->mmio_vma.node) { - nvkm_vm_unmap(&chan->mmio_vma); - nvkm_vm_put(&chan->mmio_vma); - } - nvkm_memory_del(&chan->mmio); + nvkm_vmm_put(chan->vmm, &chan->attrib_cb); + nvkm_vmm_put(chan->vmm, &chan->unknown); + nvkm_vmm_put(chan->vmm, &chan->bundle_cb); + nvkm_vmm_put(chan->vmm, &chan->pagepool); + nvkm_vmm_unref(&chan->vmm); return chan; } @@ -372,72 +374,109 @@ gf100_gr_chan = { }; static int -gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, +gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch, const struct nvkm_oclass *oclass, struct nvkm_object **pobject) { struct gf100_gr *gr = gf100_gr(base); - struct gf100_gr_data *data = gr->mmio_data; - struct gf100_gr_mmio *mmio = gr->mmio_list; struct gf100_gr_chan *chan; + struct gf100_vmm_map_v0 args = { .priv = 1 }; struct nvkm_device *device = gr->base.engine.subdev.device; - int ret, i; + int ret; if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) return -ENOMEM; nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object); chan->gr = gr; + chan->vmm = nvkm_vmm_ref(fifoch->vmm); *pobject = &chan->object; - /* allocate memory for a "mmio list" buffer that's used by the HUB - * fuc to modify some per-context register settings on first load - * of the context. - */ - ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x100, - false, &chan->mmio); + /* Map pagepool. */ + ret = nvkm_vmm_get(chan->vmm, 12, nvkm_memory_size(gr->pagepool), &chan->pagepool); if (ret) return ret; - ret = nvkm_vm_get(fifoch->vm, 0x1000, 12, NV_MEM_ACCESS_RW | - NV_MEM_ACCESS_SYS, &chan->mmio_vma); + ret = nvkm_memory_map(gr->pagepool, 0, chan->vmm, chan->pagepool, &args, sizeof(args)); if (ret) return ret; - nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0); + /* Map bundle circular buffer. */ + ret = nvkm_vmm_get(chan->vmm, 12, nvkm_memory_size(gr->bundle_cb), &chan->bundle_cb); + if (ret) + return ret; - /* allocate buffers referenced by mmio list */ - for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) { - ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, - data->size, data->align, false, - &chan->data[i].mem); + ret = nvkm_memory_map(gr->bundle_cb, 0, chan->vmm, chan->bundle_cb, &args, sizeof(args)); + if (ret) + return ret; + + /* Map attribute circular buffer. */ + ret = nvkm_vmm_get(chan->vmm, 12, nvkm_memory_size(gr->attrib_cb), &chan->attrib_cb); + if (ret) + return ret; + + if (device->card_type < GP100) { + ret = nvkm_memory_map(gr->attrib_cb, 0, chan->vmm, chan->attrib_cb, NULL, 0); if (ret) return ret; + } else { + ret = nvkm_memory_map(gr->attrib_cb, 0, chan->vmm, chan->attrib_cb, + &args, sizeof(args)); + if (ret) + return ret; + } - ret = nvkm_vm_get(fifoch->vm, - nvkm_memory_size(chan->data[i].mem), 12, - data->access, &chan->data[i].vma); + /* Map some context buffer of unknown purpose. */ + if (gr->func->grctx->unknown_size) { + ret = nvkm_vmm_get(chan->vmm, 12, nvkm_memory_size(gr->unknown), &chan->unknown); if (ret) return ret; - nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0); - data++; + ret = nvkm_memory_map(gr->unknown, 0, chan->vmm, chan->unknown, + &args, sizeof(args)); + if (ret) + return ret; } - /* finally, fill in the mmio list and point the context at it */ - nvkm_kmap(chan->mmio); - for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) { - u32 addr = mmio->addr; - u32 data = mmio->data; - - if (mmio->buffer >= 0) { - u64 info = chan->data[mmio->buffer].vma.offset; - data |= info >> mmio->shift; + /* Generate golden context image. */ + mutex_lock(&gr->fecs.mutex); + if (gr->data == NULL) { + ret = gf100_grctx_generate(gr, chan, fifoch->inst); + if (ret) { + nvkm_error(&base->engine.subdev, "failed to construct context\n"); + mutex_unlock(&gr->fecs.mutex); + return ret; } - - nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, addr); - nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, data); - mmio++; } + mutex_unlock(&gr->fecs.mutex); + + /* allocate memory for a "mmio list" buffer that's used by the HUB + * fuc to modify some per-context register settings on first load + * of the context. + */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x100, + false, &chan->mmio); + if (ret) + return ret; + + ret = nvkm_vmm_get(fifoch->vmm, 12, 0x1000, &chan->mmio_vma); + if (ret) + return ret; + + ret = nvkm_memory_map(chan->mmio, 0, fifoch->vmm, + chan->mmio_vma, &args, sizeof(args)); + if (ret) + return ret; + + /* finally, fill in the mmio list and point the context at it */ + nvkm_kmap(chan->mmio); + gr->func->grctx->pagepool(chan, chan->pagepool->addr); + gr->func->grctx->bundle(chan, chan->bundle_cb->addr, gr->func->grctx->bundle_size); + gr->func->grctx->attrib_cb(chan, chan->attrib_cb->addr, gr->func->grctx->attrib_cb_size(gr)); + gr->func->grctx->attrib(chan); + if (gr->func->grctx->patch_ltc) + gr->func->grctx->patch_ltc(chan); + if (gr->func->grctx->unknown_size) + gr->func->grctx->unknown(chan, chan->unknown->addr, gr->func->grctx->unknown_size); nvkm_done(chan->mmio); return 0; } @@ -702,6 +741,244 @@ gf100_gr_pack_mmio[] = { * PGRAPH engine/subdev functions ******************************************************************************/ +static u32 +gf100_gr_ctxsw_inst(struct nvkm_gr *gr) +{ + return nvkm_rd32(gr->engine.subdev.device, 0x409b00); +} + +static int +gf100_gr_fecs_ctrl_ctxsw(struct gf100_gr *gr, u32 mthd) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_wr32(device, 0x409804, 0xffffffff); + nvkm_wr32(device, 0x409800, 0x00000000); + nvkm_wr32(device, 0x409500, 0xffffffff); + nvkm_wr32(device, 0x409504, mthd); + nvkm_msec(device, 2000, + u32 stat = nvkm_rd32(device, 0x409804); + if (stat == 0x00000002) + return -EIO; + if (stat == 0x00000001) + return 0; + ); + + return -ETIMEDOUT; +} + +static int +gf100_gr_fecs_start_ctxsw(struct nvkm_gr *base) +{ + struct gf100_gr *gr = gf100_gr(base); + int ret = 0; + + mutex_lock(&gr->fecs.mutex); + if (!--gr->fecs.disable) { + if (WARN_ON(ret = gf100_gr_fecs_ctrl_ctxsw(gr, 0x39))) + gr->fecs.disable++; + } + mutex_unlock(&gr->fecs.mutex); + return ret; +} + +static int +gf100_gr_fecs_stop_ctxsw(struct nvkm_gr *base) +{ + struct gf100_gr *gr = gf100_gr(base); + int ret = 0; + + mutex_lock(&gr->fecs.mutex); + if (!gr->fecs.disable++) { + if (WARN_ON(ret = gf100_gr_fecs_ctrl_ctxsw(gr, 0x38))) + gr->fecs.disable--; + } + mutex_unlock(&gr->fecs.mutex); + return ret; +} + +static int +gf100_gr_fecs_halt_pipeline(struct gf100_gr *gr) +{ + int ret = 0; + + if (gr->firmware) { + mutex_lock(&gr->fecs.mutex); + ret = gf100_gr_fecs_ctrl_ctxsw(gr, 0x04); + mutex_unlock(&gr->fecs.mutex); + } + + return ret; +} + +int +gf100_gr_fecs_wfi_golden_save(struct gf100_gr *gr, u32 inst) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_mask(device, 0x409800, 0x00000003, 0x00000000); + nvkm_wr32(device, 0x409500, inst); + nvkm_wr32(device, 0x409504, 0x00000009); + nvkm_msec(device, 2000, + u32 stat = nvkm_rd32(device, 0x409800); + if (stat & 0x00000002) + return -EIO; + if (stat & 0x00000001) + return 0; + ); + + return -ETIMEDOUT; +} + +int +gf100_gr_fecs_bind_pointer(struct gf100_gr *gr, u32 inst) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_mask(device, 0x409800, 0x00000030, 0x00000000); + nvkm_wr32(device, 0x409500, inst); + nvkm_wr32(device, 0x409504, 0x00000003); + nvkm_msec(device, 2000, + u32 stat = nvkm_rd32(device, 0x409800); + if (stat & 0x00000020) + return -EIO; + if (stat & 0x00000010) + return 0; + ); + + return -ETIMEDOUT; +} + +static int +gf100_gr_fecs_set_reglist_virtual_address(struct gf100_gr *gr, u64 addr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_wr32(device, 0x409810, addr >> 8); + nvkm_wr32(device, 0x409800, 0x00000000); + nvkm_wr32(device, 0x409500, 0x00000001); + nvkm_wr32(device, 0x409504, 0x00000032); + nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x409800) == 0x00000001) + return 0; + ); + + return -ETIMEDOUT; +} + +static int +gf100_gr_fecs_set_reglist_bind_instance(struct gf100_gr *gr, u32 inst) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_wr32(device, 0x409810, inst); + nvkm_wr32(device, 0x409800, 0x00000000); + nvkm_wr32(device, 0x409500, 0x00000001); + nvkm_wr32(device, 0x409504, 0x00000031); + nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x409800) == 0x00000001) + return 0; + ); + + return -ETIMEDOUT; +} + +static int +gf100_gr_fecs_discover_reglist_image_size(struct gf100_gr *gr, u32 *psize) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_wr32(device, 0x409800, 0x00000000); + nvkm_wr32(device, 0x409500, 0x00000001); + nvkm_wr32(device, 0x409504, 0x00000030); + nvkm_msec(device, 2000, + if ((*psize = nvkm_rd32(device, 0x409800))) + return 0; + ); + + return -ETIMEDOUT; +} + +static int +gf100_gr_fecs_elpg_bind(struct gf100_gr *gr) +{ + u32 size; + int ret; + + ret = gf100_gr_fecs_discover_reglist_image_size(gr, &size); + if (ret) + return ret; + + /*XXX: We need to allocate + map the above into PMU's inst block, + * which which means we probably need a proper PMU before we + * even bother. + */ + + ret = gf100_gr_fecs_set_reglist_bind_instance(gr, 0); + if (ret) + return ret; + + return gf100_gr_fecs_set_reglist_virtual_address(gr, 0); +} + +static int +gf100_gr_fecs_discover_pm_image_size(struct gf100_gr *gr, u32 *psize) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_wr32(device, 0x409800, 0x00000000); + nvkm_wr32(device, 0x409500, 0x00000000); + nvkm_wr32(device, 0x409504, 0x00000025); + nvkm_msec(device, 2000, + if ((*psize = nvkm_rd32(device, 0x409800))) + return 0; + ); + + return -ETIMEDOUT; +} + +static int +gf100_gr_fecs_discover_zcull_image_size(struct gf100_gr *gr, u32 *psize) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_wr32(device, 0x409800, 0x00000000); + nvkm_wr32(device, 0x409500, 0x00000000); + nvkm_wr32(device, 0x409504, 0x00000016); + nvkm_msec(device, 2000, + if ((*psize = nvkm_rd32(device, 0x409800))) + return 0; + ); + + return -ETIMEDOUT; +} + +static int +gf100_gr_fecs_discover_image_size(struct gf100_gr *gr, u32 *psize) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_wr32(device, 0x409800, 0x00000000); + nvkm_wr32(device, 0x409500, 0x00000000); + nvkm_wr32(device, 0x409504, 0x00000010); + nvkm_msec(device, 2000, + if ((*psize = nvkm_rd32(device, 0x409800))) + return 0; + ); + + return -ETIMEDOUT; +} + +static void +gf100_gr_fecs_set_watchdog_timeout(struct gf100_gr *gr, u32 timeout) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_wr32(device, 0x409800, 0x00000000); + nvkm_wr32(device, 0x409500, timeout); + nvkm_wr32(device, 0x409504, 0x00000021); +} + static bool gf100_gr_chsw_load(struct nvkm_gr *base) { @@ -737,24 +1014,34 @@ gf100_gr_zbc_init(struct gf100_gr *gr) const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 }; struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; - int index; + int index, c = ltc->zbc_color_min, d = ltc->zbc_depth_min, s = ltc->zbc_depth_min; if (!gr->zbc_color[0].format) { - gf100_gr_zbc_color_get(gr, 1, & zero[0], &zero[4]); - gf100_gr_zbc_color_get(gr, 2, & one[0], &one[4]); - gf100_gr_zbc_color_get(gr, 4, &f32_0[0], &f32_0[4]); - gf100_gr_zbc_color_get(gr, 4, &f32_1[0], &f32_1[4]); - gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000); - gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000); + gf100_gr_zbc_color_get(gr, 1, & zero[0], &zero[4]); c++; + gf100_gr_zbc_color_get(gr, 2, & one[0], &one[4]); c++; + gf100_gr_zbc_color_get(gr, 4, &f32_0[0], &f32_0[4]); c++; + gf100_gr_zbc_color_get(gr, 4, &f32_1[0], &f32_1[4]); c++; + gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000); d++; + gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000); d++; + if (gr->func->zbc->stencil_get) { + gr->func->zbc->stencil_get(gr, 1, 0x00, 0x00); s++; + gr->func->zbc->stencil_get(gr, 1, 0x01, 0x01); s++; + gr->func->zbc->stencil_get(gr, 1, 0xff, 0xff); s++; + } } - for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) - gf100_gr_zbc_clear_color(gr, index); - for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) - gf100_gr_zbc_clear_depth(gr, index); + for (index = c; index <= ltc->zbc_color_max; index++) + gr->func->zbc->clear_color(gr, index); + for (index = d; index <= ltc->zbc_depth_max; index++) + gr->func->zbc->clear_depth(gr, index); + + if (gr->func->zbc->clear_stencil) { + for (index = s; index <= ltc->zbc_depth_max; index++) + gr->func->zbc->clear_stencil(gr, index); + } } -/** +/* * Wait until GR goes idle. GR is considered idle if it is disabled by the * MC (0x200) register, or GR is not busy and a context switch is not in * progress. @@ -775,7 +1062,7 @@ gf100_gr_wait_idle(struct gf100_gr *gr) nvkm_rd32(device, 0x400700); gr_enabled = nvkm_rd32(device, 0x200) & 0x1000; - ctxsw_active = nvkm_rd32(device, 0x2640) & 0x8000; + ctxsw_active = nvkm_fifo_ctxsw_in_progress(&gr->base.engine); gr_busy = nvkm_rd32(device, 0x40060c) & 0x1; if (!gr_enabled || (!gr_busy && !ctxsw_active)) @@ -811,7 +1098,7 @@ gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p) struct nvkm_device *device = gr->base.engine.subdev.device; const struct gf100_gr_pack *pack; const struct gf100_gr_init *init; - u32 data = 0; + u64 data = 0; nvkm_wr32(device, 0x400208, 0x80000000); @@ -821,6 +1108,8 @@ gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p) if ((pack == p && init == p->init) || data != init->data) { nvkm_wr32(device, 0x400204, init->data); + if (pack->type == 64) + nvkm_wr32(device, 0x40020c, upper_32_bits(init->data)); data = init->data; } @@ -964,7 +1253,7 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc) nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); } -static const struct nvkm_enum gf100_mp_warp_error[] = { +const struct nvkm_enum gf100_mp_warp_error[] = { { 0x01, "STACK_ERROR" }, { 0x02, "API_STACK_ERROR" }, { 0x03, "RET_EMPTY_STACK_ERROR" }, @@ -989,7 +1278,7 @@ static const struct nvkm_enum gf100_mp_warp_error[] = { {} }; -static const struct nvkm_bitfield gf100_mp_global_error[] = { +const struct nvkm_bitfield gf100_mp_global_error[] = { { 0x00000001, "SM_TO_SM_FAULT" }, { 0x00000002, "L1_ERROR" }, { 0x00000004, "MULTIPLE_WARP_ERRORS" }, @@ -1003,7 +1292,7 @@ static const struct nvkm_bitfield gf100_mp_global_error[] = { {} }; -static void +void gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc) { struct nvkm_subdev *subdev = &gr->base.engine.subdev; @@ -1039,7 +1328,7 @@ gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc) } if (stat & 0x00000002) { - gf100_gr_trap_mp(gr, gpc, tpc); + gr->func->trap_mp(gr, gpc, tpc); stat &= ~0x00000002; } @@ -1314,13 +1603,13 @@ gf100_gr_ctxctl_isr(struct gf100_gr *gr) } } -static void -gf100_gr_intr(struct nvkm_gr *base) +static irqreturn_t +gf100_gr_intr(struct nvkm_inth *inth) { - struct gf100_gr *gr = gf100_gr(base); + struct gf100_gr *gr = container_of(inth, typeof(*gr), base.engine.subdev.inth); struct nvkm_subdev *subdev = &gr->base.engine.subdev; struct nvkm_device *device = subdev->device; - struct nvkm_fifo_chan *chan; + struct nvkm_chan *chan; unsigned long flags; u64 inst = nvkm_rd32(device, 0x409b00) & 0x0fffffff; u32 stat = nvkm_rd32(device, 0x400100); @@ -1333,10 +1622,10 @@ gf100_gr_intr(struct nvkm_gr *base) const char *name = "unknown"; int chid = -1; - chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags); + chan = nvkm_chan_get_inst(&gr->base.engine, (u64)inst << 12, &flags); if (chan) { - name = chan->object.client->name; - chid = chan->chid; + name = chan->name; + chid = chan->id; } if (device->card_type < NV_E0 || subc < 4) @@ -1403,12 +1692,13 @@ gf100_gr_intr(struct nvkm_gr *base) } nvkm_wr32(device, 0x400500, 0x00010001); - nvkm_fifo_chan_put(device->fifo, flags, &chan); + nvkm_chan_put(&chan, flags); + return IRQ_HANDLED; } static void gf100_gr_init_fw(struct nvkm_falcon *falcon, - struct gf100_gr_fuc *code, struct gf100_gr_fuc *data) + struct nvkm_blob *code, struct nvkm_blob *data) { nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0); nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false); @@ -1462,25 +1752,30 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr) { struct nvkm_subdev *subdev = &gr->base.engine.subdev; struct nvkm_device *device = subdev->device; - struct nvkm_secboot *sb = device->secboot; - u32 secboot_mask = 0; + u32 lsf_mask = 0; + int ret; /* load fuc microcode */ nvkm_mc_unk260(device, 0); /* securely-managed falcons must be reset using secure boot */ - if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS)) - secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS); - else - gf100_gr_init_fw(gr->fecs, &gr->fuc409c, &gr->fuc409d); - if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) - secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS); - else - gf100_gr_init_fw(gr->gpccs, &gr->fuc41ac, &gr->fuc41ad); + if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_FECS)) { + gf100_gr_init_fw(&gr->fecs.falcon, &gr->fecs.inst, + &gr->fecs.data); + } else { + lsf_mask |= BIT(NVKM_ACR_LSF_FECS); + } + + if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_GPCCS)) { + gf100_gr_init_fw(&gr->gpccs.falcon, &gr->gpccs.inst, + &gr->gpccs.data); + } else { + lsf_mask |= BIT(NVKM_ACR_LSF_GPCCS); + } - if (secboot_mask != 0) { - int ret = nvkm_secboot_reset(sb, secboot_mask); + if (lsf_mask) { + ret = nvkm_acr_bootstrap_falcons(device, lsf_mask); if (ret) return ret; } @@ -1488,12 +1783,12 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr) nvkm_mc_unk260(device, 1); /* start both of them running */ - nvkm_wr32(device, 0x409840, 0xffffffff); + nvkm_wr32(device, 0x409800, 0x00000000); nvkm_wr32(device, 0x41a10c, 0x00000000); nvkm_wr32(device, 0x40910c, 0x00000000); - nvkm_falcon_start(gr->gpccs); - nvkm_falcon_start(gr->fecs); + nvkm_falcon_start(&gr->gpccs.falcon); + nvkm_falcon_start(&gr->fecs.falcon); if (nvkm_msec(device, 2000, if (nvkm_rd32(device, 0x409800) & 0x00000001) @@ -1501,78 +1796,33 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr) ) < 0) return -EBUSY; - nvkm_wr32(device, 0x409840, 0xffffffff); - nvkm_wr32(device, 0x409500, 0x7fffffff); - nvkm_wr32(device, 0x409504, 0x00000021); - - nvkm_wr32(device, 0x409840, 0xffffffff); - nvkm_wr32(device, 0x409500, 0x00000000); - nvkm_wr32(device, 0x409504, 0x00000010); - if (nvkm_msec(device, 2000, - if ((gr->size = nvkm_rd32(device, 0x409800))) - break; - ) < 0) - return -EBUSY; - - nvkm_wr32(device, 0x409840, 0xffffffff); - nvkm_wr32(device, 0x409500, 0x00000000); - nvkm_wr32(device, 0x409504, 0x00000016); - if (nvkm_msec(device, 2000, - if (nvkm_rd32(device, 0x409800)) - break; - ) < 0) - return -EBUSY; + gf100_gr_fecs_set_watchdog_timeout(gr, 0x7fffffff); - nvkm_wr32(device, 0x409840, 0xffffffff); - nvkm_wr32(device, 0x409500, 0x00000000); - nvkm_wr32(device, 0x409504, 0x00000025); - if (nvkm_msec(device, 2000, - if (nvkm_rd32(device, 0x409800)) - break; - ) < 0) - return -EBUSY; + /* Determine how much memory is required to store main context image. */ + ret = gf100_gr_fecs_discover_image_size(gr, &gr->size); + if (ret) + return ret; - if (device->chipset >= 0xe0) { - nvkm_wr32(device, 0x409800, 0x00000000); - nvkm_wr32(device, 0x409500, 0x00000001); - nvkm_wr32(device, 0x409504, 0x00000030); - if (nvkm_msec(device, 2000, - if (nvkm_rd32(device, 0x409800)) - break; - ) < 0) - return -EBUSY; - - nvkm_wr32(device, 0x409810, 0xb00095c8); - nvkm_wr32(device, 0x409800, 0x00000000); - nvkm_wr32(device, 0x409500, 0x00000001); - nvkm_wr32(device, 0x409504, 0x00000031); - if (nvkm_msec(device, 2000, - if (nvkm_rd32(device, 0x409800)) - break; - ) < 0) - return -EBUSY; - - nvkm_wr32(device, 0x409810, 0x00080420); - nvkm_wr32(device, 0x409800, 0x00000000); - nvkm_wr32(device, 0x409500, 0x00000001); - nvkm_wr32(device, 0x409504, 0x00000032); - if (nvkm_msec(device, 2000, - if (nvkm_rd32(device, 0x409800)) - break; - ) < 0) - return -EBUSY; + /* Determine how much memory is required to store ZCULL image. */ + ret = gf100_gr_fecs_discover_zcull_image_size(gr, &gr->size_zcull); + if (ret) + return ret; - nvkm_wr32(device, 0x409614, 0x00000070); - nvkm_wr32(device, 0x409614, 0x00000770); - nvkm_wr32(device, 0x40802c, 0x00000001); - } + /* Determine how much memory is required to store PerfMon image. */ + ret = gf100_gr_fecs_discover_pm_image_size(gr, &gr->size_pm); + if (ret) + return ret; - if (gr->data == NULL) { - int ret = gf100_grctx_generate(gr); - if (ret) { - nvkm_error(subdev, "failed to construct context\n"); + /*XXX: We (likely) require PMU support to even bother with this. + * + * Also, it seems like not all GPUs support ELPG. Traces I + * have here show RM enabling it on Kepler/Turing, but none + * of the GPUs between those. NVGPU decides this by PCIID. + */ + if (0) { + ret = gf100_gr_fecs_elpg_bind(gr); + if (ret) return ret; - } } return 0; @@ -1591,21 +1841,26 @@ gf100_gr_init_ctxctl_int(struct gf100_gr *gr) /* load HUB microcode */ nvkm_mc_unk260(device, 0); - nvkm_falcon_load_dmem(gr->fecs, gr->func->fecs.ucode->data.data, 0x0, + nvkm_falcon_load_dmem(&gr->fecs.falcon, + gr->func->fecs.ucode->data.data, 0x0, gr->func->fecs.ucode->data.size, 0); - nvkm_falcon_load_imem(gr->fecs, gr->func->fecs.ucode->code.data, 0x0, + nvkm_falcon_load_imem(&gr->fecs.falcon, + gr->func->fecs.ucode->code.data, 0x0, gr->func->fecs.ucode->code.size, 0, 0, false); /* load GPC microcode */ - nvkm_falcon_load_dmem(gr->gpccs, gr->func->gpccs.ucode->data.data, 0x0, + nvkm_falcon_load_dmem(&gr->gpccs.falcon, + gr->func->gpccs.ucode->data.data, 0x0, gr->func->gpccs.ucode->data.size, 0); - nvkm_falcon_load_imem(gr->gpccs, gr->func->gpccs.ucode->code.data, 0x0, + nvkm_falcon_load_imem(&gr->gpccs.falcon, + gr->func->gpccs.ucode->code.data, 0x0, gr->func->gpccs.ucode->code.size, 0, 0, false); nvkm_mc_unk260(device, 1); /* load register lists */ gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000); - gf100_gr_init_csdata(gr, grctx->gpc, 0x41a000, 0x000, 0x418000); + gf100_gr_init_csdata(gr, grctx->gpc_0, 0x41a000, 0x000, 0x418000); + gf100_gr_init_csdata(gr, grctx->gpc_1, 0x41a000, 0x000, 0x418000); gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800); gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00); @@ -1621,14 +1876,6 @@ gf100_gr_init_ctxctl_int(struct gf100_gr *gr) } gr->size = nvkm_rd32(device, 0x409804); - if (gr->data == NULL) { - int ret = gf100_grctx_generate(gr); - if (ret) { - nvkm_error(subdev, "failed to construct context\n"); - return ret; - } - } - return 0; } @@ -1645,20 +1892,115 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr) return ret; } +int +gf100_gr_oneinit_sm_id(struct gf100_gr *gr) +{ + int tpc, gpc; + + for (tpc = 0; tpc < gr->tpc_max; tpc++) { + for (gpc = 0; gpc < gr->gpc_nr; gpc++) { + if (tpc < gr->tpc_nr[gpc]) { + gr->sm[gr->sm_nr].gpc = gpc; + gr->sm[gr->sm_nr].tpc = tpc; + gr->sm_nr++; + } + } + } + + return 0; +} + +void +gf100_gr_oneinit_tiles(struct gf100_gr *gr) +{ + static const u8 primes[] = { + 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61 + }; + int init_frac[GPC_MAX], init_err[GPC_MAX], run_err[GPC_MAX], i, j; + u32 mul_factor, comm_denom; + u8 gpc_map[GPC_MAX]; + bool sorted; + + switch (gr->tpc_total) { + case 15: gr->screen_tile_row_offset = 0x06; break; + case 14: gr->screen_tile_row_offset = 0x05; break; + case 13: gr->screen_tile_row_offset = 0x02; break; + case 11: gr->screen_tile_row_offset = 0x07; break; + case 10: gr->screen_tile_row_offset = 0x06; break; + case 7: + case 5: gr->screen_tile_row_offset = 0x01; break; + case 3: gr->screen_tile_row_offset = 0x02; break; + case 2: + case 1: gr->screen_tile_row_offset = 0x01; break; + default: gr->screen_tile_row_offset = 0x03; + for (i = 0; i < ARRAY_SIZE(primes); i++) { + if (gr->tpc_total % primes[i]) { + gr->screen_tile_row_offset = primes[i]; + break; + } + } + break; + } + + /* Sort GPCs by TPC count, highest-to-lowest. */ + for (i = 0; i < gr->gpc_nr; i++) + gpc_map[i] = i; + sorted = false; + + while (!sorted) { + for (sorted = true, i = 0; i < gr->gpc_nr - 1; i++) { + if (gr->tpc_nr[gpc_map[i + 1]] > + gr->tpc_nr[gpc_map[i + 0]]) { + u8 swap = gpc_map[i]; + gpc_map[i + 0] = gpc_map[i + 1]; + gpc_map[i + 1] = swap; + sorted = false; + } + } + } + + /* Determine tile->GPC mapping */ + mul_factor = gr->gpc_nr * gr->tpc_max; + if (mul_factor & 1) + mul_factor = 2; + else + mul_factor = 1; + + comm_denom = gr->gpc_nr * gr->tpc_max * mul_factor; + + for (i = 0; i < gr->gpc_nr; i++) { + init_frac[i] = gr->tpc_nr[gpc_map[i]] * gr->gpc_nr * mul_factor; + init_err[i] = i * gr->tpc_max * mul_factor - comm_denom/2; + run_err[i] = init_frac[i] + init_err[i]; + } + + for (i = 0; i < gr->tpc_total;) { + for (j = 0; j < gr->gpc_nr; j++) { + if ((run_err[j] * 2) >= comm_denom) { + gr->tile[i++] = gpc_map[j]; + run_err[j] += init_frac[j] - comm_denom; + } else { + run_err[j] += init_frac[j]; + } + } + } +} + static int gf100_gr_oneinit(struct nvkm_gr *base) { struct gf100_gr *gr = gf100_gr(base); struct nvkm_subdev *subdev = &gr->base.engine.subdev; struct nvkm_device *device = subdev->device; - int i, j; - int ret; + struct nvkm_intr *intr = &device->mc->intr; + enum nvkm_intr_type intr_type = NVKM_INTR_SUBDEV; + int ret, i, j; - ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs); - if (ret) - return ret; + if (gr->func->oneinit_intr) + intr = gr->func->oneinit_intr(gr, &intr_type); - ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs); + ret = nvkm_inth_add(intr, intr_type, NVKM_INTR_PRIO_NORMAL, &gr->base.engine.subdev, + gf100_gr_intr, &gr->base.engine.subdev.inth); if (ret) return ret; @@ -1668,56 +2010,55 @@ gf100_gr_oneinit(struct nvkm_gr *base) gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f; for (i = 0; i < gr->gpc_nr; i++) { gr->tpc_nr[i] = nvkm_rd32(device, GPC_UNIT(i, 0x2608)); + gr->tpc_max = max(gr->tpc_max, gr->tpc_nr[i]); gr->tpc_total += gr->tpc_nr[i]; - gr->ppc_nr[i] = gr->func->ppc_nr; - for (j = 0; j < gr->ppc_nr[i]; j++) { - u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); - if (mask) - gr->ppc_mask[i] |= (1 << j); - gr->ppc_tpc_nr[i][j] = hweight8(mask); + for (j = 0; j < gr->func->ppc_nr; j++) { + gr->ppc_tpc_mask[i][j] = + nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); + if (gr->ppc_tpc_mask[i][j] == 0) + continue; + + gr->ppc_nr[i]++; + + gr->ppc_mask[i] |= (1 << j); + gr->ppc_tpc_nr[i][j] = hweight8(gr->ppc_tpc_mask[i][j]); + if (gr->ppc_tpc_min == 0 || + gr->ppc_tpc_min > gr->ppc_tpc_nr[i][j]) + gr->ppc_tpc_min = gr->ppc_tpc_nr[i][j]; + if (gr->ppc_tpc_max < gr->ppc_tpc_nr[i][j]) + gr->ppc_tpc_max = gr->ppc_tpc_nr[i][j]; } + + gr->ppc_total += gr->ppc_nr[i]; } - /*XXX: these need figuring out... though it might not even matter */ - switch (device->chipset) { - case 0xc0: - if (gr->tpc_total == 11) { /* 465, 3/4/4/0, 4 */ - gr->screen_tile_row_offset = 0x07; - } else - if (gr->tpc_total == 14) { /* 470, 3/3/4/4, 5 */ - gr->screen_tile_row_offset = 0x05; - } else - if (gr->tpc_total == 15) { /* 480, 3/4/4/4, 6 */ - gr->screen_tile_row_offset = 0x06; - } - break; - case 0xc3: /* 450, 4/0/0/0, 2 */ - gr->screen_tile_row_offset = 0x03; - break; - case 0xc4: /* 460, 3/4/0/0, 4 */ - gr->screen_tile_row_offset = 0x01; - break; - case 0xc1: /* 2/0/0/0, 1 */ - gr->screen_tile_row_offset = 0x01; - break; - case 0xc8: /* 4/4/3/4, 5 */ - gr->screen_tile_row_offset = 0x06; - break; - case 0xce: /* 4/4/0/0, 4 */ - gr->screen_tile_row_offset = 0x03; - break; - case 0xcf: /* 4/0/0/0, 3 */ - gr->screen_tile_row_offset = 0x03; - break; - case 0xd7: - case 0xd9: /* 1/0/0/0, 1 */ - case 0xea: /* gk20a */ - case 0x12b: /* gm20b */ - gr->screen_tile_row_offset = 0x01; - break; + /* Allocate global context buffers. */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST_SR_LOST, + gr->func->grctx->pagepool_size, 0x100, false, &gr->pagepool); + if (ret) + return ret; + + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST_SR_LOST, gr->func->grctx->bundle_size, + 0x100, false, &gr->bundle_cb); + if (ret) + return ret; + + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST_SR_LOST, + gr->func->grctx->attrib_cb_size(gr), 0x1000, false, &gr->attrib_cb); + if (ret) + return ret; + + if (gr->func->grctx->unknown_size) { + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, gr->func->grctx->unknown_size, + 0x100, false, &gr->unknown); + if (ret) + return ret; } - return 0; + memset(gr->tile, 0xff, sizeof(gr->tile)); + gr->func->oneinit_tiles(gr); + + return gr->func->oneinit_sm_id(gr); } static int @@ -1725,223 +2066,241 @@ gf100_gr_init_(struct nvkm_gr *base) { struct gf100_gr *gr = gf100_gr(base); struct nvkm_subdev *subdev = &base->engine.subdev; - u32 ret; + struct nvkm_device *device = subdev->device; + bool reset = device->chipset == 0x137 || device->chipset == 0x138; + int ret; + + /* On certain GP107/GP108 boards, we trigger a weird issue where + * GR will stop responding to PRI accesses after we've asked the + * SEC2 RTOS to boot the GR falcons. This happens with far more + * frequency when cold-booting a board (ie. returning from D3). + * + * The root cause for this is not known and has proven difficult + * to isolate, with many avenues being dead-ends. + * + * A workaround was discovered by Karol, whereby putting GR into + * reset for an extended period right before initialisation + * prevents the problem from occuring. + * + * XXX: As RM does not require any such workaround, this is more + * of a hack than a true fix. + */ + reset = nvkm_boolopt(device->cfgopt, "NvGrResetWar", reset); + if (reset) { + nvkm_mask(device, 0x000200, 0x00001000, 0x00000000); + nvkm_rd32(device, 0x000200); + msleep(50); + nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); + nvkm_rd32(device, 0x000200); + } nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); - ret = nvkm_falcon_get(gr->fecs, subdev); + ret = nvkm_falcon_get(&gr->fecs.falcon, subdev); if (ret) return ret; - ret = nvkm_falcon_get(gr->gpccs, subdev); + ret = nvkm_falcon_get(&gr->gpccs.falcon, subdev); if (ret) return ret; - return gr->func->init(gr); + ret = gr->func->init(gr); + if (ret) + return ret; + + nvkm_inth_allow(&subdev->inth); + return 0; } static int -gf100_gr_fini_(struct nvkm_gr *base, bool suspend) +gf100_gr_fini(struct nvkm_gr *base, bool suspend) { struct gf100_gr *gr = gf100_gr(base); struct nvkm_subdev *subdev = &gr->base.engine.subdev; - nvkm_falcon_put(gr->gpccs, subdev); - nvkm_falcon_put(gr->fecs, subdev); - return 0; -} -void -gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc) -{ - kfree(fuc->data); - fuc->data = NULL; -} + nvkm_inth_block(&subdev->inth); -static void -gf100_gr_dtor_init(struct gf100_gr_pack *pack) -{ - vfree(pack); + nvkm_falcon_put(&gr->gpccs.falcon, subdev); + nvkm_falcon_put(&gr->fecs.falcon, subdev); + return 0; } -void * +static void * gf100_gr_dtor(struct nvkm_gr *base) { struct gf100_gr *gr = gf100_gr(base); - if (gr->func->dtor) - gr->func->dtor(gr); kfree(gr->data); - nvkm_falcon_del(&gr->gpccs); - nvkm_falcon_del(&gr->fecs); - - gf100_gr_dtor_fw(&gr->fuc409c); - gf100_gr_dtor_fw(&gr->fuc409d); - gf100_gr_dtor_fw(&gr->fuc41ac); - gf100_gr_dtor_fw(&gr->fuc41ad); - - gf100_gr_dtor_init(gr->fuc_bundle); - gf100_gr_dtor_init(gr->fuc_method); - gf100_gr_dtor_init(gr->fuc_sw_ctx); - gf100_gr_dtor_init(gr->fuc_sw_nonctx); + nvkm_memory_unref(&gr->unknown); + nvkm_memory_unref(&gr->attrib_cb); + nvkm_memory_unref(&gr->bundle_cb); + nvkm_memory_unref(&gr->pagepool); + + nvkm_falcon_dtor(&gr->gpccs.falcon); + nvkm_falcon_dtor(&gr->fecs.falcon); + + nvkm_blob_dtor(&gr->fecs.inst); + nvkm_blob_dtor(&gr->fecs.data); + nvkm_blob_dtor(&gr->gpccs.inst); + nvkm_blob_dtor(&gr->gpccs.data); + + vfree(gr->bundle64); + vfree(gr->bundle_veid); + vfree(gr->bundle); + vfree(gr->method); + vfree(gr->sw_ctx); + vfree(gr->sw_nonctx); + vfree(gr->sw_nonctx1); + vfree(gr->sw_nonctx2); + vfree(gr->sw_nonctx3); + vfree(gr->sw_nonctx4); return gr; } -static const struct nvkm_gr_func -gf100_gr_ = { - .dtor = gf100_gr_dtor, - .oneinit = gf100_gr_oneinit, - .init = gf100_gr_init_, - .fini = gf100_gr_fini_, - .intr = gf100_gr_intr, - .units = gf100_gr_units, - .chan_new = gf100_gr_chan_new, - .object_get = gf100_gr_object_get, - .chsw_load = gf100_gr_chsw_load, +static const struct nvkm_falcon_func +gf100_gr_flcn = { + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .start = nvkm_falcon_v1_start, }; -int -gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname, - struct gf100_gr_fuc *fuc, int ret) +void +gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *gr, bool pd, bool ds) { - struct nvkm_subdev *subdev = &gr->base.engine.subdev; - struct nvkm_device *device = subdev->device; - const struct firmware *fw; - char f[32]; + struct nvkm_device *device = gr->base.engine.subdev.device; + int gpc, i, j; + u32 data; - /* see if this firmware has a legacy path */ - if (!strcmp(fwname, "fecs_inst")) - fwname = "fuc409c"; - else if (!strcmp(fwname, "fecs_data")) - fwname = "fuc409d"; - else if (!strcmp(fwname, "gpccs_inst")) - fwname = "fuc41ac"; - else if (!strcmp(fwname, "gpccs_data")) - fwname = "fuc41ad"; - else { - /* nope, let's just return the error we got */ - nvkm_error(subdev, "failed to load %s\n", fwname); - return ret; + for (gpc = 0, i = 0; i < 4; i++) { + for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++) + data |= gr->tpc_nr[gpc] << (j * 4); + if (pd) + nvkm_wr32(device, 0x406028 + (i * 4), data); + if (ds) + nvkm_wr32(device, 0x405870 + (i * 4), data); } +} - /* yes, try to load from the legacy path */ - nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname); +void +gf100_gr_init_400054(struct gf100_gr *gr) +{ + nvkm_wr32(gr->base.engine.subdev.device, 0x400054, 0x34ce3464); +} - snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname); - ret = request_firmware(&fw, f, device->dev); - if (ret) { - snprintf(f, sizeof(f), "nouveau/%s", fwname); - ret = request_firmware(&fw, f, device->dev); - if (ret) { - nvkm_error(subdev, "failed to load %s\n", fwname); - return ret; - } - } +void +gf100_gr_init_exception2(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; - fuc->size = fw->size; - fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); - release_firmware(fw); - return (fuc->data != NULL) ? 0 : -ENOMEM; + nvkm_wr32(device, 0x40011c, 0xffffffff); + nvkm_wr32(device, 0x400134, 0xffffffff); } -int -gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname, - struct gf100_gr_fuc *fuc) +void +gf100_gr_init_rop_exceptions(struct gf100_gr *gr) { - struct nvkm_subdev *subdev = &gr->base.engine.subdev; - struct nvkm_device *device = subdev->device; - const struct firmware *fw; - int ret; - - ret = nvkm_firmware_get(device, fwname, &fw); - if (ret) - return gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret); + struct nvkm_device *device = gr->base.engine.subdev.device; + int rop; - fuc->size = fw->size; - fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); - nvkm_firmware_put(fw); - return (fuc->data != NULL) ? 0 : -ENOMEM; + for (rop = 0; rop < gr->rop_nr; rop++) { + nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000); + nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000); + nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); + nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); + } } -int -gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device, - int index, struct gf100_gr *gr) +void +gf100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc) { - gr->func = func; - gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW", - func->fecs.ucode == NULL); + struct nvkm_device *device = gr->base.engine.subdev.device; + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); +} - return nvkm_gr_ctor(&gf100_gr_, device, index, - gr->firmware || func->fecs.ucode != NULL, - &gr->base); +void +gf100_gr_init_tex_hww_esr(struct gf100_gr *gr, int gpc, int tpc) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); } -int -gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device, - int index, struct nvkm_gr **pgr) +void +gf100_gr_init_419eb4(struct gf100_gr *gr) { - struct gf100_gr *gr; - int ret; + struct nvkm_device *device = gr->base.engine.subdev.device; + nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000); +} - if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) - return -ENOMEM; - *pgr = &gr->base; +void +gf100_gr_init_419cc0(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + int gpc, tpc; - ret = gf100_gr_ctor(func, device, index, gr); - if (ret) - return ret; + nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008); - if (gr->firmware) { - if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) || - gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) || - gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) || - gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad)) - return -ENODEV; + for (gpc = 0; gpc < gr->gpc_nr; gpc++) { + for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); } +} - return 0; +void +gf100_gr_init_40601c(struct gf100_gr *gr) +{ + nvkm_wr32(gr->base.engine.subdev.device, 0x40601c, 0xc0000000); } -int -gf100_gr_init(struct gf100_gr *gr) +void +gf100_gr_init_fecs_exceptions(struct gf100_gr *gr) +{ + const u32 data = gr->firmware ? 0x000e0000 : 0x000e0001; + nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, data); +} + +void +gf100_gr_init_gpc_mmu(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; struct nvkm_fb *fb = device->fb; - const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); - u32 data[TPC_MAX / 8] = {}; - u8 tpcnr[GPC_MAX]; - int gpc, tpc, rop; - int i; - - nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000); - nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000); - nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000); - nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000); - nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000); - nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000); - nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8); - nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8); - - gf100_gr_mmio(gr, gr->func->mmio); - nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001); + nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0x00000001); + nvkm_wr32(device, 0x4188a4, 0x03000000); + nvkm_wr32(device, 0x418888, 0x00000000); + nvkm_wr32(device, 0x41888c, 0x00000000); + nvkm_wr32(device, 0x418890, 0x00000000); + nvkm_wr32(device, 0x418894, 0x00000000); + nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(fb->mmu_wr) >> 8); + nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(fb->mmu_rd) >> 8); +} - memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); - for (i = 0, gpc = -1; i < gr->tpc_total; i++) { - do { - gpc = (gpc + 1) % gr->gpc_nr; - } while (!tpcnr[gpc]); - tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--; +void +gf100_gr_init_num_active_ltcs(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); +} - data[i / 8] |= tpc << ((i % 8) * 4); +void +gf100_gr_init_zcull(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); + const u8 tile_nr = ALIGN(gr->tpc_total, 32); + u8 bank[GPC_MAX] = {}, gpc, i, j; + u32 data; + + for (i = 0; i < tile_nr; i += 8) { + for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) { + data |= bank[gr->tile[i + j]] << (j * 4); + bank[gr->tile[i + j]]++; + } + nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data); } - nvkm_wr32(device, GPC_BCAST(0x0980), data[0]); - nvkm_wr32(device, GPC_BCAST(0x0984), data[1]); - nvkm_wr32(device, GPC_BCAST(0x0988), data[2]); - nvkm_wr32(device, GPC_BCAST(0x098c), data[3]); - for (gpc = 0; gpc < gr->gpc_nr; gpc++) { nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]); @@ -1950,29 +2309,120 @@ gf100_gr_init(struct gf100_gr *gr) nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); } - if (device->chipset != 0xd7) - nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918); - else - nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); + nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918); +} - nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); +void +gf100_gr_init_vsc_stream_master(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001); +} + +static int +gf100_gr_reset(struct nvkm_gr *base) +{ + struct nvkm_subdev *subdev = &base->engine.subdev; + struct nvkm_device *device = subdev->device; + struct gf100_gr *gr = gf100_gr(base); + + nvkm_mask(device, 0x400500, 0x00000001, 0x00000000); + + WARN_ON(gf100_gr_fecs_halt_pipeline(gr)); + + subdev->func->fini(subdev, false); + nvkm_mc_disable(device, subdev->type, subdev->inst); + if (gr->func->gpccs.reset) + gr->func->gpccs.reset(gr); + + nvkm_mc_enable(device, subdev->type, subdev->inst); + return subdev->func->init(subdev); +} + +int +gf100_gr_init(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + int gpc, tpc; + + nvkm_mask(device, 0x400500, 0x00010001, 0x00000000); + + gr->func->init_gpc_mmu(gr); + + if (gr->sw_nonctx1) { + gf100_gr_mmio(gr, gr->sw_nonctx1); + gf100_gr_mmio(gr, gr->sw_nonctx2); + gf100_gr_mmio(gr, gr->sw_nonctx3); + gf100_gr_mmio(gr, gr->sw_nonctx4); + } else + if (gr->sw_nonctx) { + gf100_gr_mmio(gr, gr->sw_nonctx); + } else { + gf100_gr_mmio(gr, gr->func->mmio); + } + + gf100_gr_wait_idle(gr); + + if (gr->func->init_r405a14) + gr->func->init_r405a14(gr); + + if (gr->func->clkgate_pack) + nvkm_therm_clkgate_init(device->therm, gr->func->clkgate_pack); + + if (gr->func->init_bios) + gr->func->init_bios(gr); + + gr->func->init_vsc_stream_master(gr); + gr->func->init_zcull(gr); + gr->func->init_num_active_ltcs(gr); + if (gr->func->init_rop_active_fbps) + gr->func->init_rop_active_fbps(gr); + if (gr->func->init_bios_2) + gr->func->init_bios_2(gr); + if (gr->func->init_swdx_pes_mask) + gr->func->init_swdx_pes_mask(gr); + if (gr->func->init_fs) + gr->func->init_fs(gr); nvkm_wr32(device, 0x400500, 0x00010001); nvkm_wr32(device, 0x400100, 0xffffffff); nvkm_wr32(device, 0x40013c, 0xffffffff); + nvkm_wr32(device, 0x400124, 0x00000002); + + gr->func->init_fecs_exceptions(gr); + + if (gr->func->init_40a790) + gr->func->init_40a790(gr); + + if (gr->func->init_ds_hww_esr_2) + gr->func->init_ds_hww_esr_2(gr); - nvkm_wr32(device, 0x409c24, 0x000f0000); nvkm_wr32(device, 0x404000, 0xc0000000); nvkm_wr32(device, 0x404600, 0xc0000000); nvkm_wr32(device, 0x408030, 0xc0000000); - nvkm_wr32(device, 0x40601c, 0xc0000000); - nvkm_wr32(device, 0x404490, 0xc0000000); + + if (gr->func->init_40601c) + gr->func->init_40601c(gr); + nvkm_wr32(device, 0x406018, 0xc0000000); + nvkm_wr32(device, 0x404490, 0xc0000000); + + if (gr->func->init_sked_hww_esr) + gr->func->init_sked_hww_esr(gr); + nvkm_wr32(device, 0x405840, 0xc0000000); nvkm_wr32(device, 0x405844, 0x00ffffff); - nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008); - nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000); + + if (gr->func->init_419cc0) + gr->func->init_419cc0(gr); + if (gr->func->init_419eb4) + gr->func->init_419eb4(gr); + if (gr->func->init_419c9c) + gr->func->init_419c9c(gr); + + if (gr->func->init_ppc_exceptions) + gr->func->init_ppc_exceptions(gr); for (gpc = 0; gpc < gr->gpc_nr; gpc++) { nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); @@ -1982,37 +2432,49 @@ gf100_gr_init(struct gf100_gr *gr) for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) { nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); - nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); - nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); + if (gr->func->init_tex_hww_esr) + gr->func->init_tex_hww_esr(gr, gpc, tpc); nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); - nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); - nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); + if (gr->func->init_504430) + gr->func->init_504430(gr, gpc, tpc); + gr->func->init_shader_exceptions(gr, gpc, tpc); } nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff); nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff); } - for (rop = 0; rop < gr->rop_nr; rop++) { - nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000); - nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000); - nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); - nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); - } + gr->func->init_rop_exceptions(gr); nvkm_wr32(device, 0x400108, 0xffffffff); nvkm_wr32(device, 0x400138, 0xffffffff); nvkm_wr32(device, 0x400118, 0xffffffff); nvkm_wr32(device, 0x400130, 0xffffffff); - nvkm_wr32(device, 0x40011c, 0xffffffff); - nvkm_wr32(device, 0x400134, 0xffffffff); + if (gr->func->init_exception2) + gr->func->init_exception2(gr); - nvkm_wr32(device, 0x400054, 0x34ce3464); + if (gr->func->init_400054) + gr->func->init_400054(gr); gf100_gr_zbc_init(gr); + if (gr->func->init_4188a4) + gr->func->init_4188a4(gr); + return gf100_gr_init_ctxctl(gr); } +void +gf100_gr_fecs_reset(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_wr32(device, 0x409614, 0x00000070); + nvkm_usec(device, 10, NVKM_DELAY); + nvkm_mask(device, 0x409614, 0x00000700, 0x00000700); + nvkm_usec(device, 10, NVKM_DELAY); + nvkm_rd32(device, 0x409614); +} + #include "fuc/hubgf100.fuc3.h" struct gf100_gr_ucode @@ -2033,14 +2495,60 @@ gf100_gr_gpccs_ucode = { .data.size = sizeof(gf100_grgpc_data), }; +static int +gf100_gr_nonstall(struct nvkm_gr *base) +{ + struct gf100_gr *gr = gf100_gr(base); + + if (gr->func->nonstall) + return gr->func->nonstall(gr); + + return -EINVAL; +} + +static const struct nvkm_gr_func +gf100_gr_ = { + .dtor = gf100_gr_dtor, + .oneinit = gf100_gr_oneinit, + .init = gf100_gr_init_, + .fini = gf100_gr_fini, + .nonstall = gf100_gr_nonstall, + .reset = gf100_gr_reset, + .units = gf100_gr_units, + .chan_new = gf100_gr_chan_new, + .object_get = gf100_gr_object_get, + .chsw_load = gf100_gr_chsw_load, + .ctxsw.pause = gf100_gr_fecs_stop_ctxsw, + .ctxsw.resume = gf100_gr_fecs_start_ctxsw, + .ctxsw.inst = gf100_gr_ctxsw_inst, +}; + static const struct gf100_gr_func gf100_gr = { + .oneinit_tiles = gf100_gr_oneinit_tiles, + .oneinit_sm_id = gf100_gr_oneinit_sm_id, .init = gf100_gr_init, + .init_gpc_mmu = gf100_gr_init_gpc_mmu, + .init_vsc_stream_master = gf100_gr_init_vsc_stream_master, + .init_zcull = gf100_gr_init_zcull, + .init_num_active_ltcs = gf100_gr_init_num_active_ltcs, + .init_fecs_exceptions = gf100_gr_init_fecs_exceptions, + .init_40601c = gf100_gr_init_40601c, + .init_419cc0 = gf100_gr_init_419cc0, + .init_419eb4 = gf100_gr_init_419eb4, + .init_tex_hww_esr = gf100_gr_init_tex_hww_esr, + .init_shader_exceptions = gf100_gr_init_shader_exceptions, + .init_rop_exceptions = gf100_gr_init_rop_exceptions, + .init_exception2 = gf100_gr_init_exception2, + .init_400054 = gf100_gr_init_400054, + .trap_mp = gf100_gr_trap_mp, .mmio = gf100_gr_pack_mmio, .fecs.ucode = &gf100_gr_fecs_ucode, + .fecs.reset = gf100_gr_fecs_reset, .gpccs.ucode = &gf100_gr_gpccs_ucode, .rops = gf100_gr_rops, .grctx = &gf100_grctx, + .zbc = &gf100_gr_zbc, .sclass = { { -1, -1, FERMI_TWOD_A }, { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A }, @@ -2051,7 +2559,102 @@ gf100_gr = { }; int -gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gf100_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) +{ + gr->firmware = false; + return 0; +} + +static int +gf100_gr_load_fw(struct gf100_gr *gr, const char *name, + struct nvkm_blob *blob) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_device *device = subdev->device; + const struct firmware *fw; + char f[32]; + int ret; + + snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, name); + ret = request_firmware(&fw, f, device->dev); + if (ret) { + snprintf(f, sizeof(f), "nouveau/%s", name); + ret = request_firmware(&fw, f, device->dev); + if (ret) { + nvkm_error(subdev, "failed to load %s\n", name); + return ret; + } + } + + blob->size = fw->size; + blob->data = kmemdup(fw->data, blob->size, GFP_KERNEL); + release_firmware(fw); + return (blob->data != NULL) ? 0 : -ENOMEM; +} + +int +gf100_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + if (!nvkm_boolopt(device->cfgopt, "NvGrUseFW", false)) + return -EINVAL; + + if (gf100_gr_load_fw(gr, "fuc409c", &gr->fecs.inst) || + gf100_gr_load_fw(gr, "fuc409d", &gr->fecs.data) || + gf100_gr_load_fw(gr, "fuc41ac", &gr->gpccs.inst) || + gf100_gr_load_fw(gr, "fuc41ad", &gr->gpccs.data)) + return -ENOENT; + + gr->firmware = true; + return 0; +} + +static const struct gf100_gr_fwif +gf100_gr_fwif[] = { + { -1, gf100_gr_load, &gf100_gr }, + { -1, gf100_gr_nofw, &gf100_gr }, + {} +}; + +int +gf100_gr_new_(const struct gf100_gr_fwif *fwif, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) +{ + struct gf100_gr *gr; + int ret; + + if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) + return -ENOMEM; + *pgr = &gr->base; + + ret = nvkm_gr_ctor(&gf100_gr_, device, type, inst, true, &gr->base); + if (ret) + return ret; + + fwif = nvkm_firmware_load(&gr->base.engine.subdev, fwif, "Gr", gr); + if (IS_ERR(fwif)) + return PTR_ERR(fwif); + + gr->func = fwif->func; + + ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev, + "fecs", 0x409000, &gr->fecs.falcon); + if (ret) + return ret; + + mutex_init(&gr->fecs.mutex); + + ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev, + "gpccs", 0x41a000, &gr->gpccs.falcon); + if (ret) + return ret; + + return 0; +} + +int +gf100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gf100_gr, device, index, pgr); + return gf100_gr_new_(gf100_gr_fwif, device, type, inst, pgr); } |
