diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm')
200 files changed, 11762 insertions, 3687 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild index 2e48b0816670..ddcf8782d6b6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild @@ -17,8 +17,6 @@ include $(src)/nvkm/engine/msppp/Kbuild include $(src)/nvkm/engine/msvld/Kbuild include $(src)/nvkm/engine/nvenc/Kbuild include $(src)/nvkm/engine/nvdec/Kbuild -include $(src)/nvkm/engine/nvjpg/Kbuild -include $(src)/nvkm/engine/ofa/Kbuild include $(src)/nvkm/engine/sec/Kbuild include $(src)/nvkm/engine/sec2/Kbuild include $(src)/nvkm/engine/sw/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild index 165d61fc5d6c..9754bac65df7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild @@ -10,5 +10,4 @@ nvkm-y += nvkm/engine/ce/gv100.o nvkm-y += nvkm/engine/ce/tu102.o nvkm-y += nvkm/engine/ce/ga100.o nvkm-y += nvkm/engine/ce/ga102.o - -nvkm-y += nvkm/engine/ce/r535.o +nvkm-y += nvkm/engine/ce/gb202.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c index 9427a592bd16..1c0c60138706 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c @@ -90,7 +90,7 @@ ga100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { if (nvkm_gsp_rm(device->gsp)) - return r535_ce_new(&ga100_ce, device, type, inst, pengine); + return -ENODEV; return nvkm_engine_new_(&ga100_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c index ce56ede7c2e9..9359c5e7aa3a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c @@ -44,7 +44,7 @@ ga102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { if (nvkm_gsp_rm(device->gsp)) - return r535_ce_new(&ga102_ce, device, type, inst, pengine); + return -ENODEV; return nvkm_engine_new_(&ga102_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c new file mode 100644 index 000000000000..37c3c619c71b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include <nvhw/drf.h> +#include <nvhw/ref/gb202/dev_ce.h> + +u32 +gb202_ce_grce_mask(struct nvkm_device *device) +{ + u32 data = nvkm_rd32(device, NV_CE_GRCE_MASK); + + return NVVAL_GET(data, NV_CE, GRCE_MASK, VALUE); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h index 806a76a72249..34fd2657134b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h @@ -16,4 +16,6 @@ int ga100_ce_oneinit(struct nvkm_engine *); int ga100_ce_init(struct nvkm_engine *); int ga100_ce_fini(struct nvkm_engine *, bool); int ga100_ce_nonstall(struct nvkm_engine *); + +u32 gb202_ce_grce_mask(struct nvkm_device *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c deleted file mode 100644 index bd0d435dbbd3..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include <core/object.h> -#include <subdev/gsp.h> -#include <engine/fifo.h> - -#include <nvrm/nvtypes.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h> - -struct r535_ce_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_ce_obj_dtor(struct nvkm_object *object) -{ - struct r535_ce_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_ce_obj = { - .dtor = r535_ce_obj_dtor, -}; - -static int -r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) -{ - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_ce_obj *obj; - NVC0B5_ALLOCATION_PARAMETERS *args; - - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); - if (WARN_ON(IS_ERR(args))) - return PTR_ERR(args); - - args->version = 1; - args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst; - - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); -} - -static void * -r535_ce_dtor(struct nvkm_engine *engine) -{ - kfree(engine->func); - return engine; -} - -int -r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) -{ - struct nvkm_engine_func *rm; - int nclass, ret; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_ce_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_ce_obj_ctor; - } - - ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); - if (ret) - kfree(rm); - - return ret; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c index 7c8647dcb349..67d0545cf902 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c @@ -40,7 +40,7 @@ tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { if (nvkm_gsp_rm(device->gsp)) - return r535_ce_new(&tu102_ce, device, type, inst, pengine); + return -ENODEV; return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 9093d89b16f3..3375a59ebf1a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2529,9 +2529,6 @@ nv170_chipset = { .vfn = { 0x00000001, ga100_vfn_new }, .ce = { 0x000003ff, ga100_ce_new }, .fifo = { 0x00000001, ga100_fifo_new }, - .nvdec = { 0x0000001f, ga100_nvdec_new }, - .nvjpg = { 0x00000001, ga100_nvjpg_new }, - .ofa = { 0x00000001, ga100_ofa_new }, }; static const struct nvkm_device_chip @@ -2561,8 +2558,6 @@ nv172_chipset = { .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, .nvdec = { 0x00000003, ga102_nvdec_new }, - .nvenc = { 0x00000001, ga102_nvenc_new }, - .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2593,8 +2588,6 @@ nv173_chipset = { .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, .nvdec = { 0x00000003, ga102_nvdec_new }, - .nvenc = { 0x00000001, ga102_nvenc_new }, - .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2625,8 +2618,6 @@ nv174_chipset = { .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, .nvdec = { 0x00000003, ga102_nvdec_new }, - .nvenc = { 0x00000001, ga102_nvenc_new }, - .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2657,8 +2648,6 @@ nv176_chipset = { .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, .nvdec = { 0x00000003, ga102_nvdec_new }, - .nvenc = { 0x00000001, ga102_nvenc_new }, - .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2689,12 +2678,26 @@ nv177_chipset = { .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, .nvdec = { 0x00000003, ga102_nvdec_new }, - .nvenc = { 0x00000001, ga102_nvenc_new }, - .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; static const struct nvkm_device_chip +nv180_chipset = { + .name = "GH100", + .bar = { 0x00000001, tu102_bar_new }, + .fault = { 0x00000001, tu102_fault_new }, + .fb = { 0x00000001, gh100_fb_new }, + .fsp = { 0x00000001, gh100_fsp_new }, + .gsp = { 0x00000001, gh100_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip nv192_chipset = { .name = "AD102", .bar = { 0x00000001, tu102_bar_new }, @@ -2709,14 +2712,9 @@ nv192_chipset = { .timer = { 0x00000001, gk20a_timer_new }, .vfn = { 0x00000001, ga100_vfn_new }, .ce = { 0x0000001f, ga102_ce_new }, - .disp = { 0x00000001, ad102_disp_new }, + .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, - .gr = { 0x00000001, ad102_gr_new }, - .nvdec = { 0x0000000f, ad102_nvdec_new }, - .nvenc = { 0x00000007, ad102_nvenc_new }, - .nvjpg = { 0x0000000f, ad102_nvjpg_new }, - .ofa = { 0x00000001, ad102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2735,14 +2733,9 @@ nv193_chipset = { .timer = { 0x00000001, gk20a_timer_new }, .vfn = { 0x00000001, ga100_vfn_new }, .ce = { 0x0000001f, ga102_ce_new }, - .disp = { 0x00000001, ad102_disp_new }, + .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, - .gr = { 0x00000001, ad102_gr_new }, - .nvdec = { 0x0000000f, ad102_nvdec_new }, - .nvenc = { 0x00000007, ad102_nvenc_new }, - .nvjpg = { 0x0000000f, ad102_nvjpg_new }, - .ofa = { 0x00000001, ad102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2761,14 +2754,9 @@ nv194_chipset = { .timer = { 0x00000001, gk20a_timer_new }, .vfn = { 0x00000001, ga100_vfn_new }, .ce = { 0x0000001f, ga102_ce_new }, - .disp = { 0x00000001, ad102_disp_new }, + .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, - .gr = { 0x00000001, ad102_gr_new }, - .nvdec = { 0x0000000f, ad102_nvdec_new }, - .nvenc = { 0x00000007, ad102_nvenc_new }, - .nvjpg = { 0x0000000f, ad102_nvjpg_new }, - .ofa = { 0x00000001, ad102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2787,14 +2775,9 @@ nv196_chipset = { .timer = { 0x00000001, gk20a_timer_new }, .vfn = { 0x00000001, ga100_vfn_new }, .ce = { 0x0000001f, ga102_ce_new }, - .disp = { 0x00000001, ad102_disp_new }, + .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, - .gr = { 0x00000001, ad102_gr_new }, - .nvdec = { 0x0000000f, ad102_nvdec_new }, - .nvenc = { 0x00000007, ad102_nvenc_new }, - .nvjpg = { 0x0000000f, ad102_nvjpg_new }, - .ofa = { 0x00000001, ad102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2813,17 +2796,122 @@ nv197_chipset = { .timer = { 0x00000001, gk20a_timer_new }, .vfn = { 0x00000001, ga100_vfn_new }, .ce = { 0x0000001f, ga102_ce_new }, - .disp = { 0x00000001, ad102_disp_new }, + .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, - .gr = { 0x00000001, ad102_gr_new }, - .nvdec = { 0x0000000f, ad102_nvdec_new }, - .nvenc = { 0x00000007, ad102_nvenc_new }, - .nvjpg = { 0x0000000f, ad102_nvjpg_new }, - .ofa = { 0x00000001, ad102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; +static const struct nvkm_device_chip +nv1a0_chipset = { + .name = "GB100", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb100_fb_new }, + .fsp = { 0x00000001, gb100_fsp_new }, + .gsp = { 0x00000001, gb100_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip +nv1a2_chipset = { + .name = "GB102", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb100_fb_new }, + .fsp = { 0x00000001, gb100_fsp_new }, + .gsp = { 0x00000001, gb100_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip +nv1b2_chipset = { + .name = "GB202", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb202_fb_new }, + .fsp = { 0x00000001, gb202_fsp_new }, + .gsp = { 0x00000001, gb202_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .disp = { 0x00000001, ga102_disp_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip +nv1b3_chipset = { + .name = "GB203", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb202_fb_new }, + .fsp = { 0x00000001, gb202_fsp_new }, + .gsp = { 0x00000001, gb202_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .disp = { 0x00000001, ga102_disp_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip +nv1b5_chipset = { + .name = "GB205", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb202_fb_new }, + .fsp = { 0x00000001, gb202_fsp_new }, + .gsp = { 0x00000001, gb202_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .disp = { 0x00000001, ga102_disp_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip +nv1b6_chipset = { + .name = "GB206", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb202_fb_new }, + .fsp = { 0x00000001, gb202_fsp_new }, + .gsp = { 0x00000001, gb202_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .disp = { 0x00000001, ga102_disp_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip +nv1b7_chipset = { + .name = "GB207", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb202_fb_new }, + .fsp = { 0x00000001, gb202_fsp_new }, + .gsp = { 0x00000001, gb202_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .disp = { 0x00000001, ga102_disp_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + struct nvkm_subdev * nvkm_device_subdev(struct nvkm_device *device, int type, int inst) { @@ -3065,8 +3153,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func, device->debug = nvkm_dbgopt(device->dbgopt, "device"); INIT_LIST_HEAD(&device->subdev); - mmio_base = device->func->resource_addr(device, 0); - mmio_size = device->func->resource_size(device, 0); + mmio_base = device->func->resource_addr(device, NVKM_BAR0_PRI); + mmio_size = device->func->resource_size(device, NVKM_BAR0_PRI); device->pri = ioremap(mmio_base, mmio_size); if (device->pri == NULL) { @@ -3139,7 +3227,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x140: device->card_type = GV100; break; case 0x160: device->card_type = TU100; break; case 0x170: device->card_type = GA100; break; + case 0x180: device->card_type = GH100; break; case 0x190: device->card_type = AD100; break; + case 0x1a0: device->card_type = GB10x; break; + case 0x1b0: device->card_type = GB20x; break; default: break; } @@ -3242,11 +3333,19 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x174: device->chip = &nv174_chipset; break; case 0x176: device->chip = &nv176_chipset; break; case 0x177: device->chip = &nv177_chipset; break; + case 0x180: device->chip = &nv180_chipset; break; case 0x192: device->chip = &nv192_chipset; break; case 0x193: device->chip = &nv193_chipset; break; case 0x194: device->chip = &nv194_chipset; break; case 0x196: device->chip = &nv196_chipset; break; case 0x197: device->chip = &nv197_chipset; break; + case 0x1a0: device->chip = &nv1a0_chipset; break; + case 0x1a2: device->chip = &nv1a2_chipset; break; + case 0x1b2: device->chip = &nv1b2_chipset; break; + case 0x1b3: device->chip = &nv1b3_chipset; break; + case 0x1b5: device->chip = &nv1b5_chipset; break; + case 0x1b6: device->chip = &nv1b6_chipset; break; + case 0x1b7: device->chip = &nv1b7_chipset; break; default: if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) { switch (device->chipset) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index 3ff6436007fa..8f0261a0d618 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c @@ -1560,18 +1560,42 @@ nvkm_device_pci(struct nvkm_device *device) return container_of(device, struct nvkm_device_pci, device); } +static int +nvkm_device_pci_resource_idx(struct nvkm_device_pci *pdev, enum nvkm_bar_id bar) +{ + int idx = 0; + + if (bar == NVKM_BAR0_PRI) + return idx; + + idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1; + if (bar == NVKM_BAR1_FB) + return idx; + + idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1; + if (bar == NVKM_BAR2_INST) + return idx; + + WARN_ON(1); + return -1; +} + static resource_size_t -nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar) +nvkm_device_pci_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar) { struct nvkm_device_pci *pdev = nvkm_device_pci(device); - return pci_resource_start(pdev->pdev, bar); + int idx = nvkm_device_pci_resource_idx(pdev, bar); + + return idx >= 0 ? pci_resource_start(pdev->pdev, idx) : 0; } static resource_size_t -nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar) +nvkm_device_pci_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar) { struct nvkm_device_pci *pdev = nvkm_device_pci(device); - return pci_resource_len(pdev->pdev, bar); + int idx = nvkm_device_pci_resource_idx(pdev, bar); + + return idx >= 0 ? pci_resource_len(pdev->pdev, idx) : 0; } static int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h index e42b18820a95..75ee7506d443 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h @@ -11,6 +11,7 @@ #include <subdev/devinit.h> #include <subdev/fault.h> #include <subdev/fb.h> +#include <subdev/fsp.h> #include <subdev/fuse.h> #include <subdev/gpio.h> #include <subdev/gsp.h> @@ -43,8 +44,6 @@ #include <engine/msvld.h> #include <engine/nvenc.h> #include <engine/nvdec.h> -#include <engine/nvjpg.h> -#include <engine/ofa.h> #include <engine/sec.h> #include <engine/sec2.h> #include <engine/sw.h> diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index 78a83f904bbd..114e50ca1827 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -186,21 +186,31 @@ nvkm_device_tegra(struct nvkm_device *device) } static struct resource * -nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar) +nvkm_device_tegra_resource(struct nvkm_device *device, enum nvkm_bar_id bar) { struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); - return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar); + int idx; + + switch (bar) { + case NVKM_BAR0_PRI: idx = 0; break; + case NVKM_BAR1_FB : idx = 1; break; + default: + WARN_ON(1); + return NULL; + } + + return platform_get_resource(tdev->pdev, IORESOURCE_MEM, idx); } static resource_size_t -nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar) +nvkm_device_tegra_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar) { struct resource *res = nvkm_device_tegra_resource(device, bar); return res ? res->start : 0; } static resource_size_t -nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar) +nvkm_device_tegra_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar) { struct resource *res = nvkm_device_tegra_resource(device, bar); return res ? resource_size(res) : 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index d7f75b3a43c8..58191b7a0494 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -148,6 +148,9 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size) case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break; case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break; case AD100: args->v0.family = NV_DEVICE_INFO_V0_ADA; break; + case GH100: args->v0.family = NV_DEVICE_INFO_V0_HOPPER; break; + case GB10x: args->v0.family = NV_DEVICE_INFO_V0_BLACKWELL; break; + case GB20x: args->v0.family = NV_DEVICE_INFO_V0_BLACKWELL; break; default: args->v0.family = 0; break; @@ -209,8 +212,8 @@ nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_udevice *udev = nvkm_udevice(object); struct nvkm_device *device = udev->device; *type = NVKM_OBJECT_MAP_IO; - *addr = device->func->resource_addr(device, 0); - *size = device->func->resource_size(device, 0); + *addr = device->func->resource_addr(device, NVKM_BAR0_PRI); + *size = device->func->resource_size(device, NVKM_BAR0_PRI); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild index e346e924fee8..e1aecd3fe96c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild @@ -27,9 +27,6 @@ nvkm-y += nvkm/engine/disp/gp102.o nvkm-y += nvkm/engine/disp/gv100.o nvkm-y += nvkm/engine/disp/tu102.o nvkm-y += nvkm/engine/disp/ga102.o -nvkm-y += nvkm/engine/disp/ad102.o - -nvkm-y += nvkm/engine/disp/r535.o nvkm-y += nvkm/engine/disp/udisp.o nvkm-y += nvkm/engine/disp/uconn.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c deleted file mode 100644 index 7f300a79aa29..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" -#include "chan.h" - -#include <subdev/gsp.h> - -#include <nvif/class.h> - -static const struct nvkm_disp_func -ad102_disp = { - .uevent = &gv100_disp_chan_uevent, - .ramht_size = 0x2000, - .root = { 0, 0,AD102_DISP }, - .user = { - {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new }, - {{ 0, 0,GA102_DISP_CURSOR }, nvkm_disp_chan_new, &gv100_disp_curs }, - {{ 0, 0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm }, - {{ 0, 0,AD102_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gv100_disp_core }, - {{ 0, 0,GA102_DISP_WINDOW_CHANNEL_DMA }, nvkm_disp_wndw_new, &gv100_disp_wndw }, - {} - }, -}; - -int -ad102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_disp **pdisp) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_disp_new(&ad102_disp, device, type, inst, pdisp); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c index 4e43ee383c34..9b84e357d354 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c @@ -49,7 +49,7 @@ nvkm_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc, { struct nvkm_disp_chan *chan = nvkm_disp_chan(object); struct nvkm_device *device = chan->disp->engine.subdev.device; - const u64 base = device->func->resource_addr(device, 0); + const u64 base = device->func->resource_addr(device, NVKM_BAR0_PRI); *type = NVKM_OBJECT_MAP_IO; *addr = base + chan->func->user(chan, size); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c index cfa3698d3a2f..614921166fba 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c @@ -805,7 +805,7 @@ gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc, struct gv100_disp_caps *caps = gv100_disp_caps(object); struct nvkm_device *device = caps->disp->engine.subdev.device; *type = NVKM_OBJECT_MAP_IO; - *addr = 0x640000 + device->func->resource_addr(device, 0); + *addr = 0x640000 + device->func->resource_addr(device, NVKM_BAR0_PRI); *size = 0x1000; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild index aff92848abfe..376e9c3bcb1a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild @@ -25,8 +25,7 @@ nvkm-y += nvkm/engine/fifo/gv100.o nvkm-y += nvkm/engine/fifo/tu102.o nvkm-y += nvkm/engine/fifo/ga100.o nvkm-y += nvkm/engine/fifo/ga102.o - -nvkm-y += nvkm/engine/fifo/r535.o +nvkm-y += nvkm/engine/fifo/gb202.o nvkm-y += nvkm/engine/fifo/ucgrp.o nvkm-y += nvkm/engine/fifo/uchan.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c index 22443fe4a39f..fdffa0391b31 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c @@ -303,7 +303,7 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine) } /* Allocate USERD + BAR1 polling area. */ - if (fifo->func->chan.func->userd->bar == 1) { + if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB) { struct nvkm_vmm *bar1 = nvkm_bar_bar1_vmm(device); ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, fifo->chid->nr * @@ -349,8 +349,6 @@ nvkm_fifo_dtor(struct nvkm_engine *engine) nvkm_chid_unref(&fifo->cgid); nvkm_chid_unref(&fifo->chid); - mutex_destroy(&fifo->userd.mutex); - nvkm_event_fini(&fifo->nonstall.event); mutex_destroy(&fifo->mutex); @@ -391,8 +389,5 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, spin_lock_init(&fifo->lock); mutex_init(&fifo->mutex); - INIT_LIST_HEAD(&fifo->userd.list); - mutex_init(&fifo->userd.mutex); - return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c index 7d4716dcd512..4e09985424b6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c @@ -275,11 +275,7 @@ nvkm_chan_del(struct nvkm_chan **pchan) nvkm_gpuobj_del(&chan->ramfc); if (chan->cgrp) { - if (!chan->func->id_put) - nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock); - else - chan->func->id_put(chan); - + nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock); nvkm_cgrp_unref(&chan->cgrp); } @@ -359,14 +355,14 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru /* Validate arguments against class requirements. */ if ((runq && runq >= runl->func->runqs) || (!func->inst->vmm != !vmm) || - ((func->userd->bar < 0) == !userd) || + (!func->userd->bar == !userd) || (!func->ramfc->ctxdma != !dmaobj) || ((func->ramfc->devm < devm) && devm != BIT(0)) || (!func->ramfc->priv && priv)) { RUNL_DEBUG(runl, "args runq:%d:%d vmm:%d:%p userd:%d:%p " "push:%d:%p devm:%08x:%08x priv:%d:%d", runl->func->runqs, runq, func->inst->vmm, vmm, - func->userd->bar < 0, userd, func->ramfc->ctxdma, dmaobj, + func->userd->bar, userd, func->ramfc->ctxdma, dmaobj, func->ramfc->devm, devm, func->ramfc->priv, priv); return -EINVAL; } @@ -441,30 +437,26 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru } /* Allocate channel ID. */ - if (!chan->func->id_get) { - chan->id = nvkm_chid_get(runl->chid, chan); - if (chan->id >= 0) { - if (func->userd->bar < 0) { - if (ouserd + chan->func->userd->size >= - nvkm_memory_size(userd)) { - RUNL_DEBUG(runl, "ouserd %llx", ouserd); - return -EINVAL; - } - - ret = nvkm_memory_kmap(userd, &chan->userd.mem); - if (ret) { - RUNL_DEBUG(runl, "userd %d", ret); - return ret; - } - - chan->userd.base = ouserd; - } else { - chan->userd.mem = nvkm_memory_ref(fifo->userd.mem); - chan->userd.base = chan->id * chan->func->userd->size; + chan->id = nvkm_chid_get(runl->chid, chan); + if (chan->id >= 0) { + if (!func->userd->bar) { + if (ouserd + chan->func->userd->size >= + nvkm_memory_size(userd)) { + RUNL_DEBUG(runl, "ouserd %llx", ouserd); + return -EINVAL; + } + + ret = nvkm_memory_kmap(userd, &chan->userd.mem); + if (ret) { + RUNL_DEBUG(runl, "userd %d", ret); + return ret; } + + chan->userd.base = ouserd; + } else { + chan->userd.mem = nvkm_memory_ref(fifo->userd.mem); + chan->userd.base = chan->id * chan->func->userd->size; } - } else { - chan->id = chan->func->id_get(chan, userd, ouserd); } if (chan->id < 0) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h index 013682a709d5..445db5dfd1e4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h @@ -17,9 +17,6 @@ struct nvkm_cctx { }; struct nvkm_chan_func { - int (*id_get)(struct nvkm_chan *, struct nvkm_memory *userd, u64 ouserd); - void (*id_put)(struct nvkm_chan *); - const struct nvkm_chan_func_inst { u32 size; bool zero; @@ -27,7 +24,7 @@ struct nvkm_chan_func { } *inst; const struct nvkm_chan_func_userd { - int bar; + enum nvkm_bar_id bar; u32 base; u32 size; void (*clear)(struct nvkm_chan *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c new file mode 100644 index 000000000000..b469e8afeb0b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" +#include "cgrp.h" +#include "chan.h" +#include "runl.h" + +u32 +gb202_chan_doorbell_handle(struct nvkm_chan *chan) +{ + return BIT(30) | (chan->cgrp->runl->id << 16) | chan->id; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c index 6c94451d0faa..e4a4fad2eafc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c @@ -133,7 +133,7 @@ gf100_chan_userd_clear(struct nvkm_chan *chan) static const struct nvkm_chan_func_userd gf100_chan_userd = { - .bar = 1, + .bar = NVKM_BAR1_FB, .size = 0x1000, .clear = gf100_chan_userd_clear, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index d8a4d773a58c..5655eda52a7b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -113,7 +113,7 @@ gk104_chan_ramfc = { const struct nvkm_chan_func_userd gk104_chan_userd = { - .bar = 1, + .bar = NVKM_BAR1_FB, .size = 0x200, .clear = gf100_chan_userd_clear, }; @@ -745,7 +745,7 @@ gk104_fifo_init(struct nvkm_fifo *fifo) { struct nvkm_device *device = fifo->engine.subdev.device; - if (fifo->func->chan.func->userd->bar == 1) + if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB) nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12); nvkm_wr32(device, 0x002100, 0xffffffff); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c index 33066c8cdc64..d7f046c03cfd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c @@ -70,7 +70,6 @@ gv100_chan_ramfc = { const struct nvkm_chan_func_userd gv100_chan_userd = { - .bar = -1, .size = 0x200, .clear = gf100_chan_userd_clear, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c index 674faf002b20..c4b8e567d86f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c @@ -154,7 +154,7 @@ nv04_chan_ramfc = { const struct nvkm_chan_func_userd nv04_chan_userd = { - .bar = 0, + .bar = NVKM_BAR0_PRI, .base = 0x800000, .size = 0x010000, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c index e50a94b6d7f8..084ca5561ee1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c @@ -93,7 +93,7 @@ nv40_chan_ramfc = { static const struct nvkm_chan_func_userd nv40_chan_userd = { - .bar = 0, + .bar = NVKM_BAR0_PRI, .base = 0xc00000, .size = 0x001000, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c index 954b5f3a7d57..7bf77661157d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c @@ -124,7 +124,7 @@ nv50_chan_ramfc = { const struct nvkm_chan_func_userd nv50_chan_userd = { - .bar = 0, + .bar = NVKM_BAR0_PRI, .base = 0xc00000, .size = 0x002000, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h index a0f3277605a5..5e81ae195329 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h @@ -6,6 +6,7 @@ #include <core/enum.h> struct nvkm_cctx; struct nvkm_cgrp; +struct nvkm_chan; struct nvkm_engn; struct nvkm_memory; struct nvkm_runl; @@ -195,6 +196,7 @@ extern const struct nvkm_chan_func_ramfc gv100_chan_ramfc; void tu102_fifo_intr_ctxsw_timeout_info(struct nvkm_engn *, u32 info); extern const struct nvkm_fifo_func_mmu_fault tu102_fifo_mmu_fault; +u32 tu102_chan_doorbell_handle(struct nvkm_chan *); int ga100_fifo_runl_ctor(struct nvkm_fifo *); int ga100_fifo_nonstall_ctor(struct nvkm_fifo *); @@ -206,6 +208,8 @@ extern const struct nvkm_engn_func ga100_engn_ce; extern const struct nvkm_cgrp_func ga100_cgrp; extern const struct nvkm_chan_func ga100_chan; +u32 gb202_chan_doorbell_handle(struct nvkm_chan *); + int nvkm_uchan_new(struct nvkm_fifo *, struct nvkm_cgrp *, const struct nvkm_oclass *, void *argv, u32 argc, struct nvkm_object **); int nvkm_ucgrp_new(struct nvkm_fifo *, const struct nvkm_oclass *, void *argv, u32 argc, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c index 1d39a6840a40..c5a03298e88c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c @@ -31,7 +31,7 @@ #include <nvif/class.h> -static u32 +u32 tu102_chan_doorbell_handle(struct nvkm_chan *chan) { return (chan->cgrp->runl->id << 16) | chan->id; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c index 9e56bcc166ed..52420a1edca5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c @@ -258,7 +258,7 @@ nvkm_uchan_map(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_chan *chan = nvkm_uchan(object)->chan; struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; - if (chan->func->userd->bar < 0) + if (!chan->func->userd->bar) return -ENOSYS; *type = NVKM_OBJECT_MAP_IO; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild index 1555f8c40b4f..b5418f05ccd8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild @@ -41,9 +41,6 @@ nvkm-y += nvkm/engine/gr/gp10b.o nvkm-y += nvkm/engine/gr/gv100.o nvkm-y += nvkm/engine/gr/tu102.o nvkm-y += nvkm/engine/gr/ga102.o -nvkm-y += nvkm/engine/gr/ad102.o - -nvkm-y += nvkm/engine/gr/r535.o nvkm-y += nvkm/engine/gr/ctxnv40.o nvkm-y += nvkm/engine/gr/ctxnv50.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c index d285c597aff9..2b51f1d0c281 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c @@ -352,7 +352,7 @@ int ga102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { if (nvkm_gsp_rm(device->gsp)) - return r535_gr_new(&ga102_gr, device, type, inst, pgr); + return -ENODEV; return gf100_gr_new_(ga102_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index b0e0c9305034..54f686ba39ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -445,6 +445,4 @@ void gp108_gr_acr_bld_patch(struct nvkm_acr *, u32, s64); int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gr **); -int r535_gr_new(const struct gf100_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int, - struct nvkm_gr **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c index 02a8c62a0a32..13407fafe947 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c @@ -297,7 +297,7 @@ nv20_gr_init(struct nvkm_gr *base) nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp); /* begin RAM config */ - vramsz = device->func->resource_size(device, 1) - 1; + vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1; nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200)); nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204)); nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c index a5e1f02791b4..b609b0150ba1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c @@ -386,7 +386,7 @@ nv40_gr_init(struct nvkm_gr *base) } /* begin RAM config */ - vramsz = device->func->resource_size(device, 1) - 1; + vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1; switch (device->chipset) { case 0x40: nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200)); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c deleted file mode 100644 index f4bed3eb1ec2..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c +++ /dev/null @@ -1,508 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "gf100.h" - -#include <core/memory.h> -#include <subdev/gsp.h> -#include <subdev/mmu/vmm.h> -#include <engine/fifo/priv.h> - -#include <nvif/if900d.h> - -#include <nvhw/drf.h> - -#include <nvrm/nvtypes.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> -#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h> - -#define r535_gr(p) container_of((p), struct r535_gr, base) - -#define R515_GR_MAX_CTXBUFS 9 - -struct r535_gr { - struct nvkm_gr base; - - struct { - u16 bufferId; - u32 size; - u8 page; - u8 align; - bool global; - bool init; - bool ro; - } ctxbuf[R515_GR_MAX_CTXBUFS]; - int ctxbuf_nr; - - struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS]; -}; - -struct r535_gr_chan { - struct nvkm_object object; - struct r535_gr *gr; - - struct nvkm_vmm *vmm; - struct nvkm_chan *chan; - - struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS]; - struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; -}; - -struct r535_gr_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_gr_obj_dtor(struct nvkm_object *object) -{ - struct r535_gr_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_gr_obj = { - .dtor = r535_gr_obj_dtor, -}; - -static int -r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) -{ - struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object); - struct r535_gr_obj *obj; - - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object); - *pobject = &obj->object; - - return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0, - &obj->rm); -} - -static void * -r535_gr_chan_dtor(struct nvkm_object *object) -{ - struct r535_gr_chan *grc = container_of(object, typeof(*grc), object); - struct r535_gr *gr = grc->gr; - - for (int i = 0; i < gr->ctxbuf_nr; i++) { - nvkm_vmm_put(grc->vmm, &grc->vma[i]); - nvkm_memory_unref(&grc->mem[i]); - } - - nvkm_vmm_unref(&grc->vmm); - return grc; -} - -static const struct nvkm_object_func -r535_gr_chan = { - .dtor = r535_gr_chan_dtor, -}; - -static int -r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm, - struct nvkm_memory **pmem, struct nvkm_vma **pvma, - struct nvkm_gsp_object *chan) -{ - struct nvkm_subdev *subdev = &gr->base.engine.subdev; - struct nvkm_device *device = subdev->device; - NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice, - NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return PTR_ERR(ctrl); - - ctrl->engineType = 1; - ctrl->hChanClient = vmm->rm.client.object.handle; - ctrl->hObject = chan->handle; - - for (int i = 0; i < gr->ctxbuf_nr; i++) { - NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry = - &ctrl->promoteEntry[ctrl->entryCount]; - const bool alloc = golden || !gr->ctxbuf[i].global; - int ret; - - entry->bufferId = gr->ctxbuf[i].bufferId; - entry->bInitialize = gr->ctxbuf[i].init && alloc; - - if (alloc) { - ret = nvkm_memory_new(device, gr->ctxbuf[i].init ? - NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST, - gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page, - gr->ctxbuf[i].init, &pmem[i]); - if (WARN_ON(ret)) - return ret; - - if (gr->ctxbuf[i].bufferId == - NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) - entry->bNonmapped = 1; - } else { - if (gr->ctxbuf[i].bufferId == - NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP) - continue; - - pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]); - } - - if (!entry->bNonmapped) { - struct gf100_vmm_map_v0 args = { - .priv = 1, - .ro = gr->ctxbuf[i].ro, - }; - - mutex_lock(&vmm->mutex.vmm); - ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align, - nvkm_memory_size(pmem[i]), &pvma[i]); - mutex_unlock(&vmm->mutex.vmm); - if (ret) - return ret; - - ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args)); - if (ret) - return ret; - - entry->gpuVirtAddr = pvma[i]->addr; - } - - if (entry->bInitialize) { - entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]); - entry->size = gr->ctxbuf[i].size; - entry->physAttr = 4; - } - - nvkm_debug(subdev, - "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n", - entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size, - entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped); - - ctrl->entryCount++; - } - - return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl); -} - -static int -r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass, - struct nvkm_object **pobject) -{ - struct r535_gr *gr = r535_gr(base); - struct r535_gr_chan *grc; - int ret; - - if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object); - grc->gr = gr; - grc->vmm = nvkm_vmm_ref(chan->vmm); - grc->chan = chan; - *pobject = &grc->object; - - ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object); - if (ret) - return ret; - - return 0; -} - -static u64 -r535_gr_units(struct nvkm_gr *gr) -{ - struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp; - - return (gsp->gr.tpcs << 8) | gsp->gr.gpcs; -} - -static int -r535_gr_oneinit(struct nvkm_gr *base) -{ - NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info; - struct r535_gr *gr = container_of(base, typeof(*gr), base); - struct nvkm_subdev *subdev = &gr->base.engine.subdev; - struct nvkm_device *device = subdev->device; - struct nvkm_gsp *gsp = device->gsp; - struct nvkm_mmu *mmu = device->mmu; - struct { - struct nvkm_memory *inst; - struct nvkm_vmm *vmm; - struct nvkm_gsp_object chan; - struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; - } golden = {}; - int ret; - - /* Allocate a channel to use for golden context init. */ - ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst); - if (ret) - goto done; - - ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm); - if (ret) - goto done; - - ret = mmu->func->promote_vmm(golden.vmm); - if (ret) - goto done; - - { - NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args; - - args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000, - device->fifo->func->chan.user.oclass, - sizeof(*args), &golden.chan); - if (IS_ERR(args)) { - ret = PTR_ERR(args); - goto done; - } - - args->gpFifoOffset = 0; - args->gpFifoEntries = 0x1000 / 8; - args->flags = - NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) | - NVDEF(NVOS04, FLAGS, VPR, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) | - NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) | - NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) | - NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) | - NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) | - NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) | - NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) | - NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) | - NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) | - NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) | - NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) | - NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) | - NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE); - args->hVASpace = golden.vmm->rm.object.handle; - args->engineType = 1; - args->instanceMem.base = nvkm_memory_addr(golden.inst); - args->instanceMem.size = 0x1000; - args->instanceMem.addressSpace = 2; - args->instanceMem.cacheAttrib = 1; - args->ramfcMem.base = nvkm_memory_addr(golden.inst); - args->ramfcMem.size = 0x200; - args->ramfcMem.addressSpace = 2; - args->ramfcMem.cacheAttrib = 1; - args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000; - args->userdMem.size = 0x200; - args->userdMem.addressSpace = 2; - args->userdMem.cacheAttrib = 1; - args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000; - args->mthdbufMem.size = 0x5000; - args->mthdbufMem.addressSpace = 2; - args->mthdbufMem.cacheAttrib = 1; - args->internalFlags = - NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) | - NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) | - NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE); - - ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args); - if (ret) - goto done; - } - - /* Fetch context buffer info from RM and allocate each of them here to use - * during golden context init (or later as a global context buffer). - * - * Also build the information that'll be used to create channel contexts. - */ - info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO, - sizeof(*info)); - if (WARN_ON(IS_ERR(info))) { - ret = PTR_ERR(info); - goto done; - } - - for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) { - static const struct { - u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */ - u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */ - bool global; - bool init; - bool ro; - } map[] = { -#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \ - .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \ - .global = (G), .init = (I), .ro = (R) } -#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R)) - /* global init ro */ - _A( GRAPHICS, MAIN, false, true, false), - _B( PATCH, false, true, false), - _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false), - _B( PAGEPOOL, true, false, false), - _B( ATTRIBUTE_CB, true, false, false), - _B( RTV_CB_GLOBAL, true, false, false), - _B( FECS_EVENT, true, true, false), - _B( PRIV_ACCESS_MAP, true, true, true), -#undef _B -#undef _A - }; - u32 size = info->engineContextBuffersInfo[0].engine[i].size; - u8 align, page; - int id; - - for (id = 0; id < ARRAY_SIZE(map); id++) { - if (map[id].id0 == i) - break; - } - - nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i, - size, (id < ARRAY_SIZE(map)) ? "*" : ""); - if (id >= ARRAY_SIZE(map)) - continue; - - if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN) - size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */ - - if (size >= 1 << 21) page = 21; - else if (size >= 1 << 16) page = 16; - else page = 12; - - if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB) - align = order_base_2(size); - else - align = page; - - if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) - continue; - - gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1; - gr->ctxbuf[gr->ctxbuf_nr].size = size; - gr->ctxbuf[gr->ctxbuf_nr].page = page; - gr->ctxbuf[gr->ctxbuf_nr].align = align; - gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global; - gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init; - gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro; - gr->ctxbuf_nr++; - - if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) { - if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) - continue; - - gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1]; - gr->ctxbuf[gr->ctxbuf_nr].bufferId = - NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP; - gr->ctxbuf_nr++; - } - } - - nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info); - - /* Promote golden context to RM. */ - ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan); - if (ret) - goto done; - - /* Allocate 3D class on channel to trigger golden context init in RM. */ - { - int i; - - for (i = 0; gr->base.func->sclass[i].ctor; i++) { - if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) { - struct nvkm_gsp_object threed; - - ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000, - gr->base.func->sclass[i].oclass, 0, - &threed); - if (ret) - goto done; - - nvkm_gsp_rm_free(&threed); - break; - } - } - - if (WARN_ON(!gr->base.func->sclass[i].ctor)) { - ret = -EINVAL; - goto done; - } - } - -done: - nvkm_gsp_rm_free(&golden.chan); - for (int i = gr->ctxbuf_nr - 1; i >= 0; i--) - nvkm_vmm_put(golden.vmm, &golden.vma[i]); - nvkm_vmm_unref(&golden.vmm); - nvkm_memory_unref(&golden.inst); - return ret; - -} - -static void * -r535_gr_dtor(struct nvkm_gr *base) -{ - struct r535_gr *gr = r535_gr(base); - - while (gr->ctxbuf_nr) - nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]); - - kfree(gr->base.func); - return gr; -} - -int -r535_gr_new(const struct gf100_gr_func *hw, - struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) -{ - struct nvkm_gr_func *rm; - struct r535_gr *gr; - int nclass; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_gr_dtor; - rm->oneinit = r535_gr_oneinit; - rm->units = r535_gr_units; - rm->chan_new = r535_gr_chan_new; - - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_gr_obj_ctor; - } - - if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) { - kfree(rm); - return -ENOMEM; - } - - *pgr = &gr->base; - - return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c index b7a458e9040a..bda8054c6b59 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c @@ -219,7 +219,7 @@ int tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { if (nvkm_gsp_rm(device->gsp)) - return r535_gr_new(&tu102_gr, device, type, inst, pgr); + return -ENODEV; return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild index 2b0e923cb755..37b0cdc760c7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild @@ -2,8 +2,4 @@ nvkm-y += nvkm/engine/nvdec/base.o nvkm-y += nvkm/engine/nvdec/gm107.o nvkm-y += nvkm/engine/nvdec/tu102.o -nvkm-y += nvkm/engine/nvdec/ga100.o nvkm-y += nvkm/engine/nvdec/ga102.o -nvkm-y += nvkm/engine/nvdec/ad102.o - -nvkm-y += nvkm/engine/nvdec/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c index 022a9c824304..eea6368adae2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c @@ -23,16 +23,6 @@ #include <subdev/gsp.h> -#include <nvif/class.h> - -static const struct nvkm_engine_func -ga102_nvdec_gsp = { - .sclass = { - { -1, -1, NVC7B0_VIDEO_DECODER }, - {} - } -}; - static const struct nvkm_falcon_func ga102_nvdec_flcn = { .disable = gm200_flcn_disable, @@ -67,7 +57,7 @@ ga102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst struct nvkm_nvdec **pnvdec) { if (nvkm_gsp_rm(device->gsp)) - return r535_nvdec_new(&ga102_nvdec_gsp, device, type, inst, pnvdec); + return -ENODEV; return nvkm_nvdec_new_(ga102_nvdec_fwif, device, type, inst, 0x848000, pnvdec); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h index f506ae83bfd7..f8d43e913093 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h @@ -20,7 +20,4 @@ extern const struct nvkm_nvdec_fwif gm107_nvdec_fwif[]; int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *, enum nvkm_subdev_type, int, u32 addr, struct nvkm_nvdec **); - -int r535_nvdec_new(const struct nvkm_engine_func *, struct nvkm_device *, - enum nvkm_subdev_type, int, struct nvkm_nvdec **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c deleted file mode 100644 index 75a24f3e6617..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include <core/object.h> -#include <subdev/gsp.h> -#include <engine/fifo.h> - -#include <nvrm/nvtypes.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> - -struct r535_nvdec_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_nvdec_obj_dtor(struct nvkm_object *object) -{ - struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_nvdec_obj = { - .dtor = r535_nvdec_obj_dtor, -}; - -static int -r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) -{ - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_nvdec_obj *obj; - NV_BSP_ALLOCATION_PARAMETERS *args; - - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); - if (WARN_ON(IS_ERR(args))) - return PTR_ERR(args); - - args->size = sizeof(*args); - args->engineInstance = oclass->engine->subdev.inst; - - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); -} - -static void * -r535_nvdec_dtor(struct nvkm_engine *engine) -{ - struct nvkm_nvdec *nvdec = nvkm_nvdec(engine); - - kfree(nvdec->engine.func); - return nvdec; -} - -int -r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec) -{ - struct nvkm_engine_func *rm; - int nclass; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_nvdec_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_nvdec_obj_ctor; - } - - if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) { - kfree(rm); - return -ENOMEM; - } - - return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c index 808c8e010b9e..fe95b6e22f21 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c @@ -23,22 +23,12 @@ #include <subdev/gsp.h> -#include <nvif/class.h> - -static const struct nvkm_engine_func -tu102_nvdec = { - .sclass = { - { -1, -1, NVC4B0_VIDEO_DECODER }, - {} - } -}; - int tu102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec) { if (nvkm_gsp_rm(device->gsp)) - return r535_nvdec_new(&tu102_nvdec, device, type, inst, pnvdec); + return -ENODEV; return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, 0, pnvdec); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild index 2c1495b730f3..6dcb20d1d156 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild @@ -2,7 +2,3 @@ nvkm-y += nvkm/engine/nvenc/base.o nvkm-y += nvkm/engine/nvenc/gm107.o nvkm-y += nvkm/engine/nvenc/tu102.o -nvkm-y += nvkm/engine/nvenc/ga102.o -nvkm-y += nvkm/engine/nvenc/ad102.o - -nvkm-y += nvkm/engine/nvenc/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c deleted file mode 100644 index 6463ab8e5871..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include <subdev/gsp.h> - -#include <nvif/class.h> - -static const struct nvkm_engine_func -ga102_nvenc = { - .sclass = { - { -1, -1, NVC7B7_VIDEO_ENCODER }, - {} - } -}; - -int -ga102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_nvenc **pnvenc) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_nvenc_new(&ga102_nvenc, device, type, inst, pnvenc); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h index 7917affc6505..b097e3f2867b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h @@ -18,7 +18,4 @@ extern const struct nvkm_nvenc_fwif gm107_nvenc_fwif[]; int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_nvenc **pnvenc); - -int r535_nvenc_new(const struct nvkm_engine_func *, struct nvkm_device *, - enum nvkm_subdev_type, int, struct nvkm_nvenc **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c deleted file mode 100644 index c8a2a9196ce5..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include <core/object.h> -#include <subdev/gsp.h> -#include <engine/fifo.h> - -#include <nvrm/nvtypes.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> - -struct r535_nvenc_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_nvenc_obj_dtor(struct nvkm_object *object) -{ - struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_nvenc_obj = { - .dtor = r535_nvenc_obj_dtor, -}; - -static int -r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) -{ - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_nvenc_obj *obj; - NV_MSENC_ALLOCATION_PARAMETERS *args; - - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); - if (WARN_ON(IS_ERR(args))) - return PTR_ERR(args); - - args->size = sizeof(*args); - args->engineInstance = oclass->engine->subdev.inst; - - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); -} - -static void * -r535_nvenc_dtor(struct nvkm_engine *engine) -{ - struct nvkm_nvenc *nvenc = nvkm_nvenc(engine); - - kfree(nvenc->engine.func); - return nvenc; -} - -int -r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc) -{ - struct nvkm_engine_func *rm; - int nclass; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_nvenc_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_nvenc_obj_ctor; - } - - if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) { - kfree(rm); - return -ENOMEM; - } - - return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c index 933864423bb3..8a436b398749 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c @@ -23,22 +23,12 @@ #include <subdev/gsp.h> -#include <nvif/class.h> - -static const struct nvkm_engine_func -tu102_nvenc = { - .sclass = { - { -1, -1, NVC4B7_VIDEO_ENCODER }, - {} - } -}; - int tu102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc) { if (nvkm_gsp_rm(device->gsp)) - return r535_nvenc_new(&tu102_nvenc, device, type, inst, pnvenc); + return -ENODEV; return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild deleted file mode 100644 index 1408f664add6..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: MIT -nvkm-y += nvkm/engine/nvjpg/ga100.o -nvkm-y += nvkm/engine/nvjpg/ad102.o - -nvkm-y += nvkm/engine/nvjpg/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c deleted file mode 100644 index 62705dc6494c..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include <subdev/gsp.h> - -#include <nvif/class.h> - -static const struct nvkm_engine_func -ad102_nvjpg = { - .sclass = { - { -1, -1, NVC9D1_VIDEO_NVJPG }, - {} - } -}; - -int -ad102_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_engine **pengine) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_nvjpg_new(&ad102_nvjpg, device, type, inst, pengine); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c deleted file mode 100644 index f550eb07da5a..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include <subdev/gsp.h> - -#include <nvif/class.h> - -static const struct nvkm_engine_func -ga100_nvjpg = { - .sclass = { - { -1, -1, NVC4D1_VIDEO_NVJPG }, - {} - } -}; - -int -ga100_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_engine **pengine) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_nvjpg_new(&ga100_nvjpg, device, type, inst, pengine); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h deleted file mode 100644 index 1e80cf70033a..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -#ifndef __NVKM_NVJPG_PRIV_H__ -#define __NVKM_NVJPG_PRIV_H__ -#include <engine/nvjpg.h> - -int r535_nvjpg_new(const struct nvkm_engine_func *, struct nvkm_device *, - enum nvkm_subdev_type, int, struct nvkm_engine **); -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c deleted file mode 100644 index 1babddc4eb80..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include <core/object.h> -#include <subdev/gsp.h> -#include <engine/fifo.h> - -#include <nvrm/nvtypes.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> - -struct r535_nvjpg_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_nvjpg_obj_dtor(struct nvkm_object *object) -{ - struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_nvjpg_obj = { - .dtor = r535_nvjpg_obj_dtor, -}; - -static int -r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) -{ - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_nvjpg_obj *obj; - NV_NVJPG_ALLOCATION_PARAMETERS *args; - - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); - if (WARN_ON(IS_ERR(args))) - return PTR_ERR(args); - - args->size = sizeof(*args); - args->engineInstance = oclass->engine->subdev.inst; - - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); -} - -static void * -r535_nvjpg_dtor(struct nvkm_engine *engine) -{ - kfree(engine->func); - return engine; -} - -int -r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) -{ - struct nvkm_engine_func *rm; - int nclass, ret; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_nvjpg_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_nvjpg_obj_ctor; - } - - ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); - if (ret) - kfree(rm); - - return ret; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild deleted file mode 100644 index 99f1713d7e51..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: MIT -nvkm-y += nvkm/engine/ofa/ga100.o -nvkm-y += nvkm/engine/ofa/ga102.o -nvkm-y += nvkm/engine/ofa/ad102.o - -nvkm-y += nvkm/engine/ofa/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c deleted file mode 100644 index 7ac87ef26aec..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include <subdev/gsp.h> - -#include <nvif/class.h> - -static const struct nvkm_engine_func -ad102_ofa = { - .sclass = { - { -1, -1, NVC9FA_VIDEO_OFA }, - {} - } -}; - -int -ad102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_engine **pengine) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_ofa_new(&ad102_ofa, device, type, inst, pengine); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c deleted file mode 100644 index ef474f61a1b5..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include <subdev/gsp.h> - -#include <nvif/class.h> - -static const struct nvkm_engine_func -ga100_ofa = { - .sclass = { - { -1, -1, NVC6FA_VIDEO_OFA }, - {} - } -}; - -int -ga100_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_engine **pengine) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_ofa_new(&ga100_ofa, device, type, inst, pengine); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c deleted file mode 100644 index bea255529993..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include <subdev/gsp.h> - -#include <nvif/class.h> - -static const struct nvkm_engine_func -ga102_ofa = { - .sclass = { - { -1, -1, NVC7FA_VIDEO_OFA }, - {} - } -}; - -int -ga102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_engine **pengine) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_ofa_new(&ga102_ofa, device, type, inst, pengine); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h deleted file mode 100644 index caf29e6bddb4..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -#ifndef __NVKM_OFA_PRIV_H__ -#define __NVKM_OFA_PRIV_H__ -#include <engine/ofa.h> - -int r535_ofa_new(const struct nvkm_engine_func *, struct nvkm_device *, - enum nvkm_subdev_type, int, struct nvkm_engine **); -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c deleted file mode 100644 index 438dc692eefe..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include <core/object.h> -#include <subdev/gsp.h> -#include <subdev/mmu.h> -#include <engine/fifo.h> - -#include <nvrm/nvtypes.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> - -struct r535_ofa_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_ofa_obj_dtor(struct nvkm_object *object) -{ - struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_ofa_obj = { - .dtor = r535_ofa_obj_dtor, -}; - -static int -r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) -{ - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_ofa_obj *obj; - NV_OFA_ALLOCATION_PARAMETERS *args; - - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); - if (WARN_ON(IS_ERR(args))) - return PTR_ERR(args); - - args->size = sizeof(*args); - - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); -} - -static void * -r535_ofa_dtor(struct nvkm_engine *engine) -{ - kfree(engine->func); - return engine; -} - -int -r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) -{ - struct nvkm_engine_func *rm; - int nclass, ret; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_ofa_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_ofa_obj_ctor; - } - - ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); - if (ret) - kfree(rm); - - return ret; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild index 4c2f6fc4ef58..c19ea4ea9bd3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild @@ -9,6 +9,7 @@ include $(src)/nvkm/subdev/fault/Kbuild include $(src)/nvkm/subdev/fb/Kbuild include $(src)/nvkm/subdev/fuse/Kbuild include $(src)/nvkm/subdev/gpio/Kbuild +include $(src)/nvkm/subdev/fsp/Kbuild include $(src)/nvkm/subdev/gsp/Kbuild include $(src)/nvkm/subdev/i2c/Kbuild include $(src)/nvkm/subdev/iccsense/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild index 9754c6872543..8faee3317a74 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild @@ -7,5 +7,3 @@ nvkm-y += nvkm/subdev/bar/gk20a.o nvkm-y += nvkm/subdev/bar/gm107.o nvkm-y += nvkm/subdev/bar/gm20b.o nvkm-y += nvkm/subdev/bar/tu102.o - -nvkm-y += nvkm/subdev/bar/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c index 51070b7dda85..e5e60915029c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c @@ -82,7 +82,7 @@ gf100_bar_bar2_init(struct nvkm_bar *base) static int gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm, - struct lock_class_key *key, int bar_nr) + struct lock_class_key *key, enum nvkm_bar_id bar_id) { struct nvkm_device *device = bar->base.subdev.device; resource_size_t bar_len; @@ -93,14 +93,14 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm, if (ret) return ret; - bar_len = device->func->resource_size(device, bar_nr); + bar_len = device->func->resource_size(device, bar_id); if (!bar_len) return -ENOMEM; - if (bar_nr == 3 && bar->bar2_halve) + if (bar_id == NVKM_BAR2_INST && bar->bar2_halve) bar_len >>= 1; ret = nvkm_vmm_new(device, 0, bar_len, NULL, 0, key, - (bar_nr == 3) ? "bar2" : "bar1", &bar_vm->vmm); + (bar_id == NVKM_BAR2_INST) ? "bar2" : "bar1", &bar_vm->vmm); if (ret) return ret; @@ -110,7 +110,7 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm, /* * Bootstrap page table lookup. */ - if (bar_nr == 3) { + if (bar_id == NVKM_BAR2_INST) { ret = nvkm_vmm_boot(bar_vm->vmm); if (ret) return ret; @@ -129,7 +129,7 @@ gf100_bar_oneinit(struct nvkm_bar *base) /* BAR2 */ if (bar->base.func->bar2.init) { - ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, 3); + ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, NVKM_BAR2_INST); if (ret) return ret; @@ -138,7 +138,7 @@ gf100_bar_oneinit(struct nvkm_bar *base) } /* BAR1 */ - ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, 1); + ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, NVKM_BAR1_FB); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c index 27d8a1be43e4..6a881becb02c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c @@ -127,7 +127,7 @@ nv50_bar_oneinit(struct nvkm_bar *base) /* BAR2 */ start = 0x0100000000ULL; - size = device->func->resource_size(device, 3); + size = device->func->resource_size(device, NVKM_BAR2_INST); if (!size) return -ENOMEM; limit = start + size; @@ -167,7 +167,7 @@ nv50_bar_oneinit(struct nvkm_bar *base) /* BAR1 */ start = 0x0000000000ULL; - size = device->func->resource_size(device, 1); + size = device->func->resource_size(device, NVKM_BAR1_FB); if (!size) return -ENOMEM; limit = start + size; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h index 6c5bbff12eb4..b918e22df5a8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h @@ -47,8 +47,8 @@ static inline struct io_mapping * fbmem_init(struct nvkm_device *dev) { - return io_mapping_create_wc(dev->func->resource_addr(dev, 1), - dev->func->resource_size(dev, 1)); + return io_mapping_create_wc(dev->func->resource_addr(dev, NVKM_BAR1_FB), + dev->func->resource_size(dev, NVKM_BAR1_FB)); } static inline void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c index c123e5893d76..cd2fbc0472d8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c @@ -50,7 +50,7 @@ nvkm_ufault_map(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object); struct nvkm_device *device = buffer->fault->subdev.device; *type = NVKM_OBJECT_MAP_IO; - *addr = device->func->resource_addr(device, 3) + buffer->addr; + *addr = device->func->resource_addr(device, NVKM_BAR2_INST) + buffer->addr; *size = nvkm_memory_size(buffer->mem); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild index d1611ad3bf81..8d8a5382d1b1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild @@ -35,6 +35,9 @@ nvkm-y += nvkm/subdev/fb/gv100.o nvkm-y += nvkm/subdev/fb/tu102.o nvkm-y += nvkm/subdev/fb/ga100.o nvkm-y += nvkm/subdev/fb/ga102.o +nvkm-y += nvkm/subdev/fb/gh100.o +nvkm-y += nvkm/subdev/fb/gb100.o +nvkm-y += nvkm/subdev/fb/gb202.o nvkm-y += nvkm/subdev/fb/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c index 25f82b372bca..2819780050d8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c @@ -25,7 +25,7 @@ #include <subdev/gsp.h> #include <engine/nvdec.h> -static u64 +u64 ga102_fb_vidmem_size(struct nvkm_fb *fb) { return (u64)nvkm_rd32(fb->subdev.device, 0x1183a4) << 20; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c new file mode 100644 index 000000000000..1c78c8853617 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include <nvhw/drf.h> +#include <nvhw/ref/gb100/dev_hshub_base.h> + +static void +gb100_fb_sysmem_flush_page_init(struct nvkm_fb *fb) +{ + const u32 addr_hi = upper_32_bits(fb->sysmem.flush_page_addr); + const u32 addr_lo = lower_32_bits(fb->sysmem.flush_page_addr); + const u32 hshub = DRF_LO(NV_PFB_HSHUB0); + struct nvkm_device *device = fb->subdev.device; + + nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi); + nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo); + nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi); + nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo); +} + +static const struct nvkm_fb_func +gb100_fb = { + .sysmem.flush_page_init = gb100_fb_sysmem_flush_page_init, + .vidmem.size = ga102_fb_vidmem_size, +}; + +int +gb100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) +{ + return r535_fb_new(&gb100_fb, device, type, inst, pfb); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c new file mode 100644 index 000000000000..848505026d02 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include <nvhw/drf.h> +#include <nvhw/ref/gb10b/dev_fbhub.h> + +static void +gb202_fb_sysmem_flush_page_init(struct nvkm_fb *fb) +{ + struct nvkm_device *device = fb->subdev.device; + const u64 addr = fb->sysmem.flush_page_addr; + + nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr)); + nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr)); +} + +static const struct nvkm_fb_func +gb202_fb = { + .sysmem.flush_page_init = gb202_fb_sysmem_flush_page_init, + .vidmem.size = ga102_fb_vidmem_size, +}; + +int +gb202_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) +{ + return r535_fb_new(&gb202_fb, device, type, inst, pfb); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c new file mode 100644 index 000000000000..2d8c51f882d5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include <nvhw/drf.h> +#include <nvhw/ref/gh100/dev_fb.h> + +static void +gh100_fb_sysmem_flush_page_init(struct nvkm_fb *fb) +{ + const u64 addr = fb->sysmem.flush_page_addr >> NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT; + struct nvkm_device *device = fb->subdev.device; + + nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr)); + nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr)); +} + +static const struct nvkm_fb_func +gh100_fb = { + .sysmem.flush_page_init = gh100_fb_sysmem_flush_page_init, + .vidmem.size = ga102_fb_vidmem_size, +}; + +int +gh100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) +{ + return r535_fb_new(&gh100_fb, device, type, inst, pfb); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index 35c55dfba23d..ebe996503ab2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h @@ -98,4 +98,6 @@ int gp102_fb_vpr_scrub(struct nvkm_fb *); int gv100_fb_init_page(struct nvkm_fb *); bool tu102_fb_vpr_scrub_required(struct nvkm_fb *); + +u64 ga102_fb_vidmem_size(struct nvkm_fb *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild new file mode 100644 index 000000000000..1a9ded3a86f8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + +nvkm-y += nvkm/subdev/fsp/base.o +nvkm-y += nvkm/subdev/fsp/gh100.o +nvkm-y += nvkm/subdev/fsp/gb100.o +nvkm-y += nvkm/subdev/fsp/gb202.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c new file mode 100644 index 000000000000..e366a980baa9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +int +nvkm_fsp_boot_gsp_fmc(struct nvkm_fsp *fsp, u64 args_addr, u32 rsvd_size, bool resume, + u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig) +{ + return fsp->func->cot.boot_gsp_fmc(fsp, args_addr, rsvd_size, resume, + img_addr, hash, pkey, sig); +} + +bool +nvkm_fsp_verify_gsp_fmc(struct nvkm_fsp *fsp, u32 hash_size, u32 pkey_size, u32 sig_size) +{ + return hash_size == fsp->func->cot.size_hash && + pkey_size == fsp->func->cot.size_pkey && + sig_size == fsp->func->cot.size_sig; +} + +static int +nvkm_fsp_preinit(struct nvkm_subdev *subdev) +{ + struct nvkm_fsp *fsp = nvkm_fsp(subdev); + + return fsp->func->wait_secure_boot(fsp); +} + +static void * +nvkm_fsp_dtor(struct nvkm_subdev *subdev) +{ + struct nvkm_fsp *fsp = nvkm_fsp(subdev); + + nvkm_falcon_dtor(&fsp->falcon); + return fsp; +} + +static const struct nvkm_falcon_func +nvkm_fsp_flcn = { + .emem_pio = &gp102_flcn_emem_pio, +}; + +static const struct nvkm_subdev_func +nvkm_fsp = { + .dtor = nvkm_fsp_dtor, + .preinit = nvkm_fsp_preinit, +}; + +int +nvkm_fsp_new_(const struct nvkm_fsp_func *func, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fsp **pfsp) +{ + struct nvkm_fsp *fsp; + + fsp = *pfsp = kzalloc(sizeof(*fsp), GFP_KERNEL); + if (!fsp) + return -ENOMEM; + + fsp->func = func; + nvkm_subdev_ctor(&nvkm_fsp, device, type, inst, &fsp->subdev); + + return nvkm_falcon_ctor(&nvkm_fsp_flcn, &fsp->subdev, "fsp", 0x8f2000, &fsp->falcon); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c new file mode 100644 index 000000000000..e06636bf54b6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +static const struct nvkm_fsp_func +gb100_fsp = { + .wait_secure_boot = gh100_fsp_wait_secure_boot, + .cot = { + .version = 2, + .size_hash = 48, + .size_pkey = 97, + .size_sig = 96, + .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc, + }, +}; + +int +gb100_fsp_new(struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp) +{ + return nvkm_fsp_new_(&gb100_fsp, device, type, inst, pfsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c new file mode 100644 index 000000000000..3438aac6383e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include <nvhw/drf.h> +#include <nvhw/ref/gb202/dev_therm.h> + +static int +gb202_fsp_wait_secure_boot(struct nvkm_fsp *fsp) +{ + struct nvkm_device *device = fsp->subdev.device; + unsigned timeout_ms = 4000; + + do { + u32 status = NVKM_RD32(device, NV_THERM, I2CS_SCRATCH, FSP_BOOT_COMPLETE_STATUS); + + if (status == NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS) + return 0; + + usleep_range(1000, 2000); + } while (timeout_ms--); + + return -ETIMEDOUT; +} + +static const struct nvkm_fsp_func +gb202_fsp = { + .wait_secure_boot = gb202_fsp_wait_secure_boot, + .cot = { + .version = 2, + .size_hash = 48, + .size_pkey = 97, + .size_sig = 96, + .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc, + }, +}; + +int +gb202_fsp_new(struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp) +{ + return nvkm_fsp_new_(&gb202_fsp, device, type, inst, pfsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c new file mode 100644 index 000000000000..2815be4bf5de --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include <nvhw/drf.h> +#include <nvhw/ref/gh100/dev_fsp_pri.h> +#include <nvhw/ref/gh100/dev_therm.h> + +#include <nvrm/nvtypes.h> + +#define MCTP_HEADER_VERSION 3:0 +#define MCTP_HEADER_RSVD 7:4 + +#define MCTP_HEADER_DEID 15:8 +#define MCTP_HEADER_SEID 23:16 + +#define MCTP_HEADER_TAG 26:24 +#define MCTP_HEADER_TO 27:27 +#define MCTP_HEADER_SEQ 29:28 +#define MCTP_HEADER_EOM 30:30 +#define MCTP_HEADER_SOM 31:31 + +#define MCTP_MSG_HEADER_TYPE 6:0 +#define MCTP_MSG_HEADER_IC 7:7 + +#define MCTP_MSG_HEADER_VENDOR_ID 23:8 +#define MCTP_MSG_HEADER_NVDM_TYPE 31:24 + +#define MCTP_MSG_HEADER_TYPE_VENDOR_PCI 0x7e +#define MCTP_MSG_HEADER_VENDOR_ID_NV 0x10de + +#define NVDM_TYPE_COT 0x14 +#define NVDM_TYPE_FSP_RESPONSE 0x15 + +#pragma pack(1) +typedef struct nvdm_payload_cot +{ + NvU16 version; + NvU16 size; + NvU64 gspFmcSysmemOffset; + NvU64 frtsSysmemOffset; + NvU32 frtsSysmemSize; + + // Note this is an offset from the end of FB + NvU64 frtsVidmemOffset; + NvU32 frtsVidmemSize; + + // Authentication related fields + NvU32 hash384[12]; + NvU32 publicKey[96]; + NvU32 signature[96]; + + NvU64 gspBootArgsSysmemOffset; +} NVDM_PAYLOAD_COT; +#pragma pack() + +#pragma pack(1) +typedef struct +{ + NvU32 taskId; + NvU32 commandNvdmType; + NvU32 errorCode; +} NVDM_PAYLOAD_COMMAND_RESPONSE; +#pragma pack() + +static u32 +gh100_fsp_poll(struct nvkm_fsp *fsp) +{ + struct nvkm_device *device = fsp->subdev.device; + u32 head, tail; + + head = nvkm_rd32(device, NV_PFSP_MSGQ_HEAD(0)); + tail = nvkm_rd32(device, NV_PFSP_MSGQ_TAIL(0)); + + if (head == tail) + return 0; + + return (tail - head) + sizeof(u32); /* TAIL points at last DWORD written. */ +} + +static int +gh100_fsp_recv(struct nvkm_fsp *fsp, u8 *packet, u32 max_packet_size) +{ + struct nvkm_device *device = fsp->subdev.device; + u32 packet_size; + int ret; + + packet_size = gh100_fsp_poll(fsp); + if (!packet_size || WARN_ON(packet_size % 4 || packet_size > max_packet_size)) + return -EINVAL; + + ret = nvkm_falcon_pio_rd(&fsp->falcon, 0, EMEM, 0, packet, 0, packet_size); + if (ret) + return ret; + + nvkm_wr32(device, NV_PFSP_MSGQ_TAIL(0), 0); + nvkm_wr32(device, NV_PFSP_MSGQ_HEAD(0), 0); + + return packet_size; +} + +static int +gh100_fsp_wait(struct nvkm_fsp *fsp) +{ + int time = 1000; + + do { + if (gh100_fsp_poll(fsp)) + return 0; + + usleep_range(1000, 2000); + } while(time--); + + return -ETIMEDOUT; +} + +static int +gh100_fsp_send(struct nvkm_fsp *fsp, const u8 *packet, u32 packet_size) +{ + struct nvkm_device *device = fsp->subdev.device; + int time = 1000, ret; + + if (WARN_ON(packet_size % sizeof(u32))) + return -EINVAL; + + /* Ensure any previously sent message has been consumed. */ + do { + u32 head = nvkm_rd32(device, NV_PFSP_QUEUE_HEAD(0)); + u32 tail = nvkm_rd32(device, NV_PFSP_QUEUE_TAIL(0)); + + if (tail == head) + break; + + usleep_range(1000, 2000); + } while(time--); + + if (time < 0) + return -ETIMEDOUT; + + /* Write message to EMEM. */ + ret = nvkm_falcon_pio_wr(&fsp->falcon, packet, 0, 0, EMEM, 0, packet_size, 0, false); + if (ret) + return ret; + + /* Update queue pointers - TAIL points at last DWORD written. */ + nvkm_wr32(device, NV_PFSP_QUEUE_TAIL(0), packet_size - sizeof(u32)); + nvkm_wr32(device, NV_PFSP_QUEUE_HEAD(0), 0); + return 0; +} + +static int +gh100_fsp_send_sync(struct nvkm_fsp *fsp, u8 nvdm_type, const u8 *packet, u32 packet_size) +{ + struct nvkm_subdev *subdev = &fsp->subdev; + struct { + u32 mctp_header; + u32 nvdm_header; + NVDM_PAYLOAD_COMMAND_RESPONSE response; + } reply; + int ret; + + ret = gh100_fsp_send(fsp, packet, packet_size); + if (ret) + return ret; + + ret = gh100_fsp_wait(fsp); + if (ret) + return ret; + + ret = gh100_fsp_recv(fsp, (u8 *)&reply, sizeof(reply)); + if (ret < 0) + return ret; + + if (NVVAL_TEST(reply.mctp_header, MCTP, HEADER, SOM, !=, 1) || + NVVAL_TEST(reply.mctp_header, MCTP, HEADER, EOM, !=, 1)) { + nvkm_error(subdev, "unexpected MCTP header in reply: 0x%08x\n", reply.mctp_header); + return -EIO; + } + + if (NVDEF_TEST(reply.nvdm_header, MCTP, MSG_HEADER, TYPE, !=, VENDOR_PCI) || + NVDEF_TEST(reply.nvdm_header, MCTP, MSG_HEADER, VENDOR_ID, !=, NV) || + NVVAL_TEST(reply.nvdm_header, MCTP, MSG_HEADER, NVDM_TYPE, !=, NVDM_TYPE_FSP_RESPONSE)) { + nvkm_error(subdev, "unexpected NVDM header in reply: 0x%08x\n", reply.nvdm_header); + return -EIO; + } + + if (reply.response.commandNvdmType != nvdm_type) { + nvkm_error(subdev, "expected NVDM type 0x%02x in reply, got 0x%02x\n", + nvdm_type, reply.response.commandNvdmType); + return -EIO; + } + + if (reply.response.errorCode) { + nvkm_error(subdev, "NVDM command 0x%02x failed with error 0x%08x\n", + nvdm_type, reply.response.errorCode); + return -EIO; + } + + return 0; +} + +int +gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *fsp, u64 args_addr, u32 rsvd_size, bool resume, + u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig) +{ + struct { + u32 mctp_header; + u32 nvdm_header; + NVDM_PAYLOAD_COT cot; + } msg = {}; + + msg.mctp_header = NVVAL(MCTP, HEADER, SOM, 1) | + NVVAL(MCTP, HEADER, EOM, 1) | + NVVAL(MCTP, HEADER, SEID, 0) | + NVVAL(MCTP, HEADER, SEQ, 0); + + msg.nvdm_header = NVDEF(MCTP, MSG_HEADER, TYPE, VENDOR_PCI) | + NVDEF(MCTP, MSG_HEADER, VENDOR_ID, NV) | + NVVAL(MCTP, MSG_HEADER, NVDM_TYPE, NVDM_TYPE_COT); + + msg.cot.version = fsp->func->cot.version; + msg.cot.size = sizeof(msg.cot); + msg.cot.gspFmcSysmemOffset = img_addr; + if (!resume) { + msg.cot.frtsVidmemOffset = ALIGN(rsvd_size, 0x200000); + msg.cot.frtsVidmemSize = 0x100000; + } + + memcpy(msg.cot.hash384, hash, fsp->func->cot.size_hash); + memcpy(msg.cot.publicKey, pkey, fsp->func->cot.size_pkey); + memcpy(msg.cot.signature, sig, fsp->func->cot.size_sig); + + msg.cot.gspBootArgsSysmemOffset = args_addr; + + return gh100_fsp_send_sync(fsp, NVDM_TYPE_COT, (const u8 *)&msg, sizeof(msg)); +} + +int +gh100_fsp_wait_secure_boot(struct nvkm_fsp *fsp) +{ + struct nvkm_device *device = fsp->subdev.device; + unsigned timeout_ms = 4000; + + do { + u32 status = NVKM_RD32(device, NV_THERM, I2CS_SCRATCH, FSP_BOOT_COMPLETE_STATUS); + + if (status == NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS) + return 0; + + usleep_range(1000, 2000); + } while (timeout_ms--); + + return -ETIMEDOUT; +} + +static const struct nvkm_fsp_func +gh100_fsp = { + .wait_secure_boot = gh100_fsp_wait_secure_boot, + .cot = { + .version = 1, + .size_hash = 48, + .size_pkey = 384, + .size_sig = 384, + .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc, + }, +}; + +int +gh100_fsp_new(struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp) +{ + return nvkm_fsp_new_(&gh100_fsp, device, type, inst, pfsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h new file mode 100644 index 000000000000..f0b2c605c33d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __NVKM_FSP_PRIV_H__ +#define __NVKM_FSP_PRIV_H__ +#define nvkm_fsp(p) container_of((p), struct nvkm_fsp, subdev) +#include <subdev/fsp.h> + +struct nvkm_fsp_func { + int (*wait_secure_boot)(struct nvkm_fsp *); + + struct { + u32 version; + u32 size_hash; + u32 size_pkey; + u32 size_sig; + int (*boot_gsp_fmc)(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume, + u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig); + } cot; +}; + +int nvkm_fsp_new_(const struct nvkm_fsp_func *, + struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **); + +int gh100_fsp_wait_secure_boot(struct nvkm_fsp *); +int gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume, + u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild index 16bf2f1bb780..e9c948b67bbd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild @@ -7,6 +7,9 @@ nvkm-y += nvkm/subdev/gsp/tu102.o nvkm-y += nvkm/subdev/gsp/tu116.o nvkm-y += nvkm/subdev/gsp/ga100.o nvkm-y += nvkm/subdev/gsp/ga102.o +nvkm-y += nvkm/subdev/gsp/gh100.o nvkm-y += nvkm/subdev/gsp/ad102.o +nvkm-y += nvkm/subdev/gsp/gb100.o +nvkm-y += nvkm/subdev/gsp/gb202.o -nvkm-y += nvkm/subdev/gsp/r535.o +include $(src)/nvkm/subdev/gsp/rm/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c index c849c6299c52..eb765da0876e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c @@ -22,30 +22,27 @@ #include "priv.h" static const struct nvkm_gsp_func -ad102_gsp_r535_113_01 = { +ad102_gsp = { .flcn = &ga102_gsp_flcn, .fwsec = &ga102_gsp_fwsec, .sig_section = ".fwsignature_ad10x", - .wpr_heap.os_carveout_size = 20 << 20, - .wpr_heap.base_size = 8 << 20, - .wpr_heap.min_size = 84 << 20, - .booter.ctor = ga102_gsp_booter_ctor, .dtor = r535_gsp_dtor, .oneinit = tu102_gsp_oneinit, - .init = r535_gsp_init, - .fini = r535_gsp_fini, + .init = tu102_gsp_init, + .fini = tu102_gsp_fini, .reset = ga102_gsp_reset, - .rm = &r535_gsp_rm, + .rm.gpu = &ad10x_gpu, }; static struct nvkm_gsp_fwif ad102_gsps[] = { - { 0, r535_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true }, + { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144", true }, + { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01", true }, {} }; @@ -55,3 +52,15 @@ ad102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, { return nvkm_gsp_new_(ad102_gsps, device, type, inst, pgsp); } + +NVKM_GSP_FIRMWARE_BOOTER(ad102, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ad103, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ad104, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ad106, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ad107, 535.113.01); + +NVKM_GSP_FIRMWARE_BOOTER(ad102, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ad103, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ad104, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ad106, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ad107, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c index da1bebb896f7..d23243a83a4c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -52,7 +52,7 @@ nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_gsp *gsp = nvkm_gsp(subdev); - if (!gsp->func->fini) + if (!gsp->func->fini || !gsp->running) return 0; return gsp->func->fini(gsp, suspend); @@ -80,6 +80,21 @@ nvkm_gsp_oneinit(struct nvkm_subdev *subdev) return gsp->func->oneinit(gsp); } +void +nvkm_gsp_dtor_fws(struct nvkm_gsp *gsp) +{ + nvkm_firmware_put(gsp->fws.fmc); + gsp->fws.fmc = NULL; + nvkm_firmware_put(gsp->fws.bl); + gsp->fws.bl = NULL; + nvkm_firmware_put(gsp->fws.booter.unload); + gsp->fws.booter.unload = NULL; + nvkm_firmware_put(gsp->fws.booter.load); + gsp->fws.booter.load = NULL; + nvkm_firmware_put(gsp->fws.rm); + gsp->fws.rm = NULL; +} + static void * nvkm_gsp_dtor(struct nvkm_subdev *subdev) { @@ -89,6 +104,7 @@ nvkm_gsp_dtor(struct nvkm_subdev *subdev) gsp->func->dtor(gsp); nvkm_falcon_dtor(&gsp->falcon); + kfree(gsp->rm); return gsp; } @@ -101,6 +117,16 @@ nvkm_gsp = { }; int +nvkm_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver, + const struct firmware **pfw) +{ + char fwname[64]; + + snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver); + return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw); +} + +int nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gsp **pgsp) { @@ -116,7 +142,19 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, return PTR_ERR(fwif); gsp->func = fwif->func; - gsp->rm = gsp->func->rm; + + if (fwif->rm) { + nvkm_info(&gsp->subdev, "RM version: %s\n", fwif->ver); + + gsp->rm = kzalloc(sizeof(*gsp->rm), GFP_KERNEL); + if (!gsp->rm) + return -ENOMEM; + + gsp->rm->device = device; + gsp->rm->gpu = fwif->func->rm.gpu; + gsp->rm->wpr = fwif->rm->wpr; + gsp->rm->api = fwif->rm->api; + } return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0x110000, &gsp->falcon); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c index 223f68b532ef..d201e8697226 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c @@ -39,29 +39,27 @@ ga100_gsp_flcn = { }; static const struct nvkm_gsp_func -ga100_gsp_r535_113_01 = { +ga100_gsp = { .flcn = &ga100_gsp_flcn, .fwsec = &tu102_gsp_fwsec, .sig_section = ".fwsignature_ga100", - .wpr_heap.base_size = 8 << 20, - .wpr_heap.min_size = 64 << 20, - .booter.ctor = tu102_gsp_booter_ctor, .dtor = r535_gsp_dtor, .oneinit = tu102_gsp_oneinit, - .init = r535_gsp_init, - .fini = r535_gsp_fini, + .init = tu102_gsp_init, + .fini = tu102_gsp_fini, .reset = tu102_gsp_reset, - .rm = &r535_gsp_rm, + .rm.gpu = &ga100_gpu, }; static struct nvkm_gsp_fwif ga100_gsps[] = { - { 0, r535_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" }, + { 1, tu102_gsp_load, &ga100_gsp, &r570_rm_tu102, "570.144" }, + { 0, tu102_gsp_load, &ga100_gsp, &r535_rm_tu102, "535.113.01" }, { -1, gv100_gsp_nofw, &gv100_gsp }, {} }; @@ -72,3 +70,6 @@ ga100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, { return nvkm_gsp_new_(ga100_gsps, device, type, inst, pgsp); } + +NVKM_GSP_FIRMWARE_BOOTER(ga100, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ga100, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c index 4c4b4168a266..917f7e2f6c46 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c @@ -150,25 +150,21 @@ ga102_gsp_flcn = { }; static const struct nvkm_gsp_func -ga102_gsp_r535_113_01 = { +ga102_gsp_r535 = { .flcn = &ga102_gsp_flcn, .fwsec = &ga102_gsp_fwsec, .sig_section = ".fwsignature_ga10x", - .wpr_heap.os_carveout_size = 20 << 20, - .wpr_heap.base_size = 8 << 20, - .wpr_heap.min_size = 84 << 20, - .booter.ctor = ga102_gsp_booter_ctor, .dtor = r535_gsp_dtor, .oneinit = tu102_gsp_oneinit, - .init = r535_gsp_init, - .fini = r535_gsp_fini, + .init = tu102_gsp_init, + .fini = tu102_gsp_fini, .reset = ga102_gsp_reset, - .rm = &r535_gsp_rm, + .rm.gpu = &ga1xx_gpu, }; static const struct nvkm_gsp_func @@ -178,7 +174,8 @@ ga102_gsp = { static struct nvkm_gsp_fwif ga102_gsps[] = { - { 0, r535_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" }, + { 1, tu102_gsp_load, &ga102_gsp_r535, &r570_rm_ga102, "570.144" }, + { 0, tu102_gsp_load, &ga102_gsp_r535, &r535_rm_ga102, "535.113.01" }, { -1, gv100_gsp_nofw, &ga102_gsp }, {} }; @@ -189,3 +186,15 @@ ga102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, { return nvkm_gsp_new_(ga102_gsps, device, type, inst, pgsp); } + +NVKM_GSP_FIRMWARE_BOOTER(ga102, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ga103, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ga104, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ga106, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ga107, 535.113.01); + +NVKM_GSP_FIRMWARE_BOOTER(ga102, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ga103, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ga104, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ga106, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ga107, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c new file mode 100644 index 000000000000..12a3f2c1ed82 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +static const struct nvkm_gsp_func +gb100_gsp = { + .flcn = &ga102_gsp_flcn, + + .sig_section = ".fwsignature_gb10x", + + .dtor = r535_gsp_dtor, + .oneinit = gh100_gsp_oneinit, + .init = gh100_gsp_init, + .fini = gh100_gsp_fini, + + .rm.gpu = &gb10x_gpu, +}; + +static struct nvkm_gsp_fwif +gb100_gsps[] = { + { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144", true }, + {} +}; + +int +gb100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(gb100_gsps, device, type, inst, pgsp); +} + +NVKM_GSP_FIRMWARE_FMC(gb100, 570.144); +NVKM_GSP_FIRMWARE_FMC(gb102, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c new file mode 100644 index 000000000000..c1d718172ddf --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +static const struct nvkm_gsp_func +gb202_gsp = { + .flcn = &ga102_gsp_flcn, + + .sig_section = ".fwsignature_gb20x", + + .dtor = r535_gsp_dtor, + .oneinit = gh100_gsp_oneinit, + .init = gh100_gsp_init, + .fini = gh100_gsp_fini, + + .rm.gpu = &gb20x_gpu, +}; + +static struct nvkm_gsp_fwif +gb202_gsps[] = { + { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144", true }, + {} +}; + +int +gb202_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(gb202_gsps, device, type, inst, pgsp); +} + +NVKM_GSP_FIRMWARE_FMC(gb202, 570.144); +NVKM_GSP_FIRMWARE_FMC(gb203, 570.144); +NVKM_GSP_FIRMWARE_FMC(gb205, 570.144); +NVKM_GSP_FIRMWARE_FMC(gb206, 570.144); +NVKM_GSP_FIRMWARE_FMC(gb207, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c new file mode 100644 index 000000000000..ce31e8248807 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c @@ -0,0 +1,358 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include <linux/elf.h> +#include <linux/crc32.h> + +#include <subdev/fb.h> +#include <subdev/fsp.h> + +#include <rm/r570/nvrm/gsp.h> + +#include <nvhw/drf.h> +#include <nvhw/ref/gh100/dev_falcon_v4.h> +#include <nvhw/ref/gh100/dev_riscv_pri.h> + +int +gh100_gsp_fini(struct nvkm_gsp *gsp, bool suspend) +{ + struct nvkm_falcon *falcon = &gsp->falcon; + int ret, time = 4000; + + /* Shutdown RM. */ + ret = r535_gsp_fini(gsp, suspend); + if (ret && suspend) + return ret; + + /* Wait for RISC-V to halt. */ + do { + u32 data = nvkm_falcon_rd32(falcon, falcon->addr2 + NV_PRISCV_RISCV_CPUCTL); + + if (NVVAL_GET(data, NV_PRISCV, RISCV_CPUCTL, HALTED)) + return 0; + + usleep_range(1000, 2000); + } while(time--); + + return -ETIMEDOUT; +} + +static bool +gh100_gsp_lockdown_released(struct nvkm_gsp *gsp, u32 *mbox0) +{ + u32 data; + + /* Wait for GSP access via BAR0 to be allowed. */ + *mbox0 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX0); + + if (*mbox0 && (*mbox0 & 0xffffff00) == 0xbadf4100) + return false; + + /* Check if an error code has been reported. */ + if (*mbox0) { + u32 mbox1 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX1); + + /* Any value that's not GSP_FMC_BOOT_PARAMS addr is an error. */ + if ((((u64)mbox1 << 32) | *mbox0) != gsp->fmc.args.addr) + return true; + } + + /* Check if lockdown has been released. */ + data = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_HWCFG2); + return !NVVAL_GET(data, NV_PFALCON, FALCON_HWCFG2, RISCV_BR_PRIV_LOCKDOWN); +} + +int +gh100_gsp_init(struct nvkm_gsp *gsp) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + const bool resume = gsp->sr.meta.data != NULL; + struct nvkm_gsp_mem *meta; + GSP_FMC_BOOT_PARAMS *args; + int ret, time = 4000; + u32 rsvd_size; + u32 mbox0; + + if (!resume) { + ret = nvkm_gsp_mem_ctor(gsp, sizeof(*args), &gsp->fmc.args); + if (ret) + return ret; + + meta = &gsp->wpr_meta; + } else { + gsp->rm->api->gsp->set_rmargs(gsp, true); + meta = &gsp->sr.meta; + } + + args = gsp->fmc.args.data; + + args->bootGspRmParams.gspRmDescOffset = meta->addr; + args->bootGspRmParams.gspRmDescSize = meta->size; + args->bootGspRmParams.target = GSP_DMA_TARGET_COHERENT_SYSTEM; + args->bootGspRmParams.bIsGspRmBoot = 1; + + args->gspRmParams.target = GSP_DMA_TARGET_NONCOHERENT_SYSTEM; + args->gspRmParams.bootArgsOffset = gsp->libos.addr; + + rsvd_size = gsp->fb.heap.size; + if (gsp->rm->wpr->rsvd_size_pmu) + rsvd_size = ALIGN(rsvd_size + gsp->rm->wpr->rsvd_size_pmu, 0x200000); + + ret = nvkm_fsp_boot_gsp_fmc(device->fsp, gsp->fmc.args.addr, rsvd_size, resume, + gsp->fmc.fw.addr, gsp->fmc.hash, gsp->fmc.pkey, gsp->fmc.sig); + if (ret) + return ret; + + do { + if (gh100_gsp_lockdown_released(gsp, &mbox0)) + break; + + usleep_range(1000, 2000); + } while(time--); + + if (time < 0) { + nvkm_error(subdev, "GSP-FMC boot timed out\n"); + return -ETIMEDOUT; + } + + if (mbox0) { + nvkm_error(subdev, "GSP-FMC boot failed (mbox: 0x%08x)\n", mbox0); + return -EIO; + } + + return r535_gsp_init(gsp); +} + +static int +gh100_gsp_wpr_meta_init(struct nvkm_gsp *gsp) +{ + GspFwWprMeta *meta; + int ret; + + ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta); + if (ret) + return ret; + + gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device); + gsp->fb.bios.vga_workspace.size = 128 * 1024; + gsp->fb.heap.size = gsp->rm->wpr->heap_size_non_wpr; + + meta = gsp->wpr_meta.data; + + meta->magic = GSP_FW_WPR_META_MAGIC; + meta->revision = GSP_FW_WPR_META_REVISION; + + meta->sizeOfRadix3Elf = gsp->fw.len; + meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr; + + meta->sizeOfBootloader = gsp->boot.fw.size; + meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; + meta->bootloaderCodeOffset = gsp->boot.code_offset; + meta->bootloaderDataOffset = gsp->boot.data_offset; + meta->bootloaderManifestOffset = gsp->boot.manifest_offset; + + meta->sysmemAddrOfSignature = gsp->sig.addr; + meta->sizeOfSignature = gsp->sig.size; + + meta->nonWprHeapSize = gsp->fb.heap.size; + meta->gspFwHeapSize = tu102_gsp_wpr_heap_size(gsp); + meta->frtsSize = 0x100000; + meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; + meta->pmuReservedSize = gsp->rm->wpr->rsvd_size_pmu; + return 0; +} + +/* The sh_flags value for the binary blobs in the ELF image */ +#define FMC_SHF_FLAGS (SHF_MASKPROC | SHF_MASKOS | SHF_OS_NONCONFORMING | SHF_ALLOC) + +#define ELF_HDR_SIZE ((u8)sizeof(struct elf32_hdr)) +#define ELF_SHDR_SIZE ((u8)sizeof(struct elf32_shdr)) + +/* The FMC ELF header must be exactly this */ +static const u8 elf_header[] = { + 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + + 0, 0, 0, 0, 1, 0, 0, 0, /* e_type, e_machine, e_version */ + 0, 0, 0, 0, 0, 0, 0, 0, /* e_entry, e_phoff */ + + ELF_HDR_SIZE, 0, 0, 0, 0, 0, 0, 0, /* e_shoff, e_flags */ + ELF_HDR_SIZE, 0, 0, 0, /* e_ehsize, e_phentsize */ + 0, 0, ELF_SHDR_SIZE, 0, /* e_phnum, e_shentsize */ + + 6, 0, 1, 0, /* e_shnum, e_shstrndx */ +}; + +/** + * elf_validate_sections - validate each section in the FMC ELF image + * @elf: ELF image + * @length: size of the entire ELF image + */ +static bool +elf_validate_sections(const void *elf, size_t length) +{ + const struct elf32_hdr *ehdr = elf; + const struct elf32_shdr *shdr = elf + ehdr->e_shoff; + + /* The offset of the first section */ + Elf32_Off section_begin = ehdr->e_shoff + ehdr->e_shnum * ehdr->e_shentsize; + + if (section_begin > length) + return false; + + /* The first section header is the null section, so skip it */ + for (unsigned int i = 1; i < ehdr->e_shnum; i++) { + if (i == ehdr->e_shstrndx) { + if (shdr[i].sh_type != SHT_STRTAB) + return false; + if (shdr[i].sh_flags != SHF_STRINGS) + return false; + } else { + if (shdr[i].sh_type != SHT_PROGBITS) + return false; + if (shdr[i].sh_flags != FMC_SHF_FLAGS) + return false; + } + + /* Ensure that each section is inside the image */ + if (shdr[i].sh_offset < section_begin || + (u64)shdr[i].sh_offset + shdr[i].sh_size > length) + return false; + + /* Non-zero sh_info is a CRC */ + if (shdr[i].sh_info) { + /* The kernel's CRC32 needs a pre- and post-xor to match standard CRCs */ + u32 crc32 = crc32_le(~0, elf + shdr[i].sh_offset, shdr[i].sh_size) ^ ~0; + + if (shdr[i].sh_info != crc32) + return false; + } + } + + return true; +} + +/** + * elf_section - return a pointer to the data for a given section + * @elf: ELF image + * @name: section name to search for + * @len: pointer to returned length of found section + */ +static const void * +elf_section(const void *elf, const char *name, unsigned int *len) +{ + const struct elf32_hdr *ehdr = elf; + const struct elf32_shdr *shdr = elf + ehdr->e_shoff; + const char *names = elf + shdr[ehdr->e_shstrndx].sh_offset; + + for (unsigned int i = 1; i < ehdr->e_shnum; i++) { + if (!strcmp(&names[shdr[i].sh_name], name)) { + *len = shdr[i].sh_size; + return elf + shdr[i].sh_offset; + } + } + + return NULL; +} + +int +gh100_gsp_oneinit(struct nvkm_gsp *gsp) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_fsp *fsp = device->fsp; + const void *fw = gsp->fws.fmc->data; + const void *hash, *sig, *pkey, *img; + unsigned int img_len = 0, hash_len = 0, pkey_len = 0, sig_len = 0; + int ret; + + if (gsp->fws.fmc->size < ELF_HDR_SIZE || + memcmp(fw, elf_header, sizeof(elf_header)) || + !elf_validate_sections(fw, gsp->fws.fmc->size)) { + nvkm_error(subdev, "fmc firmware image is invalid\n"); + return -ENODATA; + } + + hash = elf_section(fw, "hash", &hash_len); + sig = elf_section(fw, "signature", &sig_len); + pkey = elf_section(fw, "publickey", &pkey_len); + img = elf_section(fw, "image", &img_len); + + if (!hash || !sig || !pkey || !img) { + nvkm_error(subdev, "fmc firmware image is invalid\n"); + return -ENODATA; + } + + if (!nvkm_fsp_verify_gsp_fmc(fsp, hash_len, pkey_len, sig_len)) + return -EINVAL; + + /* Load GSP-FMC FW into memory. */ + ret = nvkm_gsp_mem_ctor(gsp, img_len, &gsp->fmc.fw); + if (ret) + return ret; + + memcpy(gsp->fmc.fw.data, img, img_len); + + gsp->fmc.hash = kmemdup(hash, hash_len, GFP_KERNEL); + gsp->fmc.pkey = kmemdup(pkey, pkey_len, GFP_KERNEL); + gsp->fmc.sig = kmemdup(sig, sig_len, GFP_KERNEL); + if (!gsp->fmc.hash || !gsp->fmc.pkey || !gsp->fmc.sig) + return -ENOMEM; + + ret = r535_gsp_oneinit(gsp); + if (ret) + return ret; + + return gh100_gsp_wpr_meta_init(gsp); +} + +static const struct nvkm_gsp_func +gh100_gsp = { + .flcn = &ga102_gsp_flcn, + + .sig_section = ".fwsignature_gh100", + + .dtor = r535_gsp_dtor, + .oneinit = gh100_gsp_oneinit, + .init = gh100_gsp_init, + .fini = gh100_gsp_fini, + + .rm.gpu = &gh100_gpu, +}; + +int +gh100_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) +{ + int ret; + + ret = tu102_gsp_load_rm(gsp, fwif); + if (ret) + goto done; + + ret = nvkm_gsp_load_fw(gsp, "fmc", fwif->ver, &gsp->fws.fmc); + +done: + if (ret) + nvkm_gsp_dtor_fws(gsp); + + return ret; +} + +static struct nvkm_gsp_fwif +gh100_gsps[] = { + { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144", true }, + {} +}; + +int +gh100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(gh100_gsps, device, type, inst, pgsp); +} + +NVKM_GSP_FIRMWARE_FMC(gh100, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index 9f4a62375a27..4f14e85fc69e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -2,6 +2,7 @@ #ifndef __NVKM_GSP_PRIV_H__ #define __NVKM_GSP_PRIV_H__ #include <subdev/gsp.h> +#include <rm/gpu.h> enum nvkm_acr_lsf_id; int nvkm_gsp_fwsec_frts(struct nvkm_gsp *); @@ -11,12 +12,32 @@ struct nvkm_gsp_fwif { int version; int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *); const struct nvkm_gsp_func *func; + const struct nvkm_rm_impl *rm; const char *ver; bool enable; }; +int nvkm_gsp_load_fw(struct nvkm_gsp *, const char *name, const char *ver, + const struct firmware **); +void nvkm_gsp_dtor_fws(struct nvkm_gsp *); + int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); -int r535_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); + +int tu102_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); +int tu102_gsp_load_rm(struct nvkm_gsp *, const struct nvkm_gsp_fwif *); + +int gh100_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); + +#define NVKM_GSP_FIRMWARE_BOOTER(chip,vers) \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-"#vers".bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-"#vers".bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin") + +#define NVKM_GSP_FIRMWARE_FMC(chip,vers) \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/fmc-"#vers".bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin") struct nvkm_gsp_func { const struct nvkm_falcon_func *flcn; @@ -25,12 +46,6 @@ struct nvkm_gsp_func { char *sig_section; struct { - u32 os_carveout_size; - u32 base_size; - u64 min_size; - } wpr_heap; - - struct { int (*ctor)(struct nvkm_gsp *, const char *name, const struct firmware *, struct nvkm_falcon *, struct nvkm_falcon_fw *); } booter; @@ -41,7 +56,9 @@ struct nvkm_gsp_func { int (*fini)(struct nvkm_gsp *, bool suspend); int (*reset)(struct nvkm_gsp *); - const struct nvkm_gsp_rm *rm; + struct { + const struct nvkm_rm_gpu *gpu; + } rm; }; extern const struct nvkm_falcon_func tu102_gsp_flcn; @@ -49,7 +66,10 @@ extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec; int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *, struct nvkm_falcon *, struct nvkm_falcon_fw *); int tu102_gsp_oneinit(struct nvkm_gsp *); +int tu102_gsp_init(struct nvkm_gsp *); +int tu102_gsp_fini(struct nvkm_gsp *, bool suspend); int tu102_gsp_reset(struct nvkm_gsp *); +u64 tu102_gsp_wpr_heap_size(struct nvkm_gsp *); extern const struct nvkm_falcon_func ga102_gsp_flcn; extern const struct nvkm_falcon_fw_func ga102_gsp_fwsec; @@ -57,11 +77,14 @@ int ga102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware struct nvkm_falcon *, struct nvkm_falcon_fw *); int ga102_gsp_reset(struct nvkm_gsp *); +int gh100_gsp_oneinit(struct nvkm_gsp *); +int gh100_gsp_init(struct nvkm_gsp *); +int gh100_gsp_fini(struct nvkm_gsp *, bool suspend); + void r535_gsp_dtor(struct nvkm_gsp *); int r535_gsp_oneinit(struct nvkm_gsp *); int r535_gsp_init(struct nvkm_gsp *); int r535_gsp_fini(struct nvkm_gsp *, bool suspend); -extern const struct nvkm_gsp_rm r535_gsp_rm; int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild new file mode 100644 index 000000000000..04037394a2da --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +nvkm-y += nvkm/subdev/gsp/rm/client.o +nvkm-y += nvkm/subdev/gsp/rm/engine.o +nvkm-y += nvkm/subdev/gsp/rm/gr.o +nvkm-y += nvkm/subdev/gsp/rm/nvdec.o +nvkm-y += nvkm/subdev/gsp/rm/nvenc.o + +nvkm-y += nvkm/subdev/gsp/rm/tu1xx.o +nvkm-y += nvkm/subdev/gsp/rm/ga100.o +nvkm-y += nvkm/subdev/gsp/rm/ga1xx.o +nvkm-y += nvkm/subdev/gsp/rm/ad10x.o +nvkm-y += nvkm/subdev/gsp/rm/gh100.o +nvkm-y += nvkm/subdev/gsp/rm/gb10x.o +nvkm-y += nvkm/subdev/gsp/rm/gb20x.o + +include $(src)/nvkm/subdev/gsp/rm/r535/Kbuild +include $(src)/nvkm/subdev/gsp/rm/r570/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c new file mode 100644 index 000000000000..e1ce6355c35f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +#include <engine/fifo/priv.h> + +#include <nvif/class.h> + +const struct nvkm_rm_gpu +ad10x_gpu = { + .disp.class = { + .root = AD102_DISP, + .caps = GV100_DISP_CAPS, + .core = AD102_DISP_CORE_CHANNEL_DMA, + .wndw = GA102_DISP_WINDOW_CHANNEL_DMA, + .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA, + .curs = GA102_DISP_CURSOR, + }, + + .usermode.class = AMPERE_USERMODE_A, + + .fifo.chan = { + .class = AMPERE_CHANNEL_GPFIFO_A, + .doorbell_handle = tu102_chan_doorbell_handle, + }, + + .ce.class = AMPERE_DMA_COPY_B, + .gr.class = { + .i2m = KEPLER_INLINE_TO_MEMORY_B, + .twod = FERMI_TWOD_A, + .threed = ADA_A, + .compute = ADA_COMPUTE_A, + }, + .nvdec.class = NVC9B0_VIDEO_DECODER, + .nvenc.class = NVC9B7_VIDEO_ENCODER, + .ofa.class = NVC9FA_VIDEO_OFA, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c new file mode 100644 index 000000000000..72d3e3ca84c2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "rm.h" + +void +nvkm_gsp_client_dtor(struct nvkm_gsp_client *client) +{ + const unsigned int id = client->object.handle - NVKM_RM_CLIENT(0); + struct nvkm_gsp *gsp = client->gsp; + + if (!gsp) + return; + + if (client->object.client) + nvkm_gsp_rm_free(&client->object); + + mutex_lock(&gsp->client_id.mutex); + idr_remove(&gsp->client_id.idr, id); + mutex_unlock(&gsp->client_id.mutex); + + client->gsp = NULL; +} + +int +nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) +{ + int id, ret; + + if (WARN_ON(!gsp->rm)) + return -ENOSYS; + + mutex_lock(&gsp->client_id.mutex); + id = idr_alloc(&gsp->client_id.idr, client, 0, NVKM_RM_CLIENT_MASK + 1, GFP_KERNEL); + mutex_unlock(&gsp->client_id.mutex); + if (id < 0) + return id; + + client->gsp = gsp; + client->object.client = client; + INIT_LIST_HEAD(&client->events); + + ret = gsp->rm->api->client->ctor(client, NVKM_RM_CLIENT(id)); + if (ret) + nvkm_gsp_client_dtor(client); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c new file mode 100644 index 000000000000..3b0e83b2f57f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "engine.h" +#include "gpu.h" + +#include <core/object.h> +#include <engine/fifo/chan.h> + +struct nvkm_rm_engine { + struct nvkm_engine engine; + + struct nvkm_engine_func func; +}; + +struct nvkm_rm_engine_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void* +nvkm_rm_engine_obj_dtor(struct nvkm_object *object) +{ + struct nvkm_rm_engine_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +nvkm_rm_engine_obj = { + .dtor = nvkm_rm_engine_obj_dtor, +}; + +int +nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *oclass, + struct nvkm_object **pobject) +{ + struct nvkm_rm *rm = chan->client->gsp->rm; + const int inst = oclass->engine->subdev.inst; + const u32 class = oclass->base.oclass; + const u32 handle = oclass->handle; + struct nvkm_rm_engine_obj *obj; + int ret; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return -ENOMEM; + + switch (oclass->engine->subdev.type) { + case NVKM_ENGINE_CE: + ret = rm->api->ce->alloc(chan, handle, class, inst, &obj->rm); + break; + case NVKM_ENGINE_GR: + ret = nvkm_gsp_rm_alloc(chan, handle, class, 0, &obj->rm); + break; + case NVKM_ENGINE_NVDEC: + ret = rm->api->nvdec->alloc(chan, handle, class, inst, &obj->rm); + break; + case NVKM_ENGINE_NVENC: + ret = rm->api->nvenc->alloc(chan, handle, class, inst, &obj->rm); + break; + case NVKM_ENGINE_NVJPG: + ret = rm->api->nvjpg->alloc(chan, handle, class, inst, &obj->rm); + break; + case NVKM_ENGINE_OFA: + ret = rm->api->ofa->alloc(chan, handle, class, inst, &obj->rm); + break; + default: + ret = -EINVAL; + WARN_ON(1); + break; + } + + if (ret) { + kfree(obj); + return ret; + } + + nvkm_object_ctor(&nvkm_rm_engine_obj, oclass, &obj->object); + *pobject = &obj->object; + return 0; +} + +static int +nvkm_rm_engine_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); + + return nvkm_rm_engine_obj_new(&chan->rm.object, chan->id, oclass, pobject); +} + +static void * +nvkm_rm_engine_dtor(struct nvkm_engine *engine) +{ + kfree(engine->func); + return engine; +} + +int +nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *rm, + enum nvkm_subdev_type type, int inst, + const u32 *class, int nclass, struct nvkm_engine *engine) +{ + struct nvkm_engine_func *func; + + func = kzalloc(struct_size(func, sclass, nclass + 1), GFP_KERNEL); + if (!func) + return -ENOMEM; + + func->dtor = dtor; + + for (int i = 0; i < nclass; i++) { + func->sclass[i].oclass = class[i]; + func->sclass[i].minver = -1; + func->sclass[i].maxver = 0; + func->sclass[i].ctor = nvkm_rm_engine_obj_ctor; + } + + nvkm_engine_ctor(func, rm->device, type, inst, true, engine); + return 0; +} + +static int +nvkm_rm_engine_new_(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst, u32 class, + struct nvkm_engine **pengine) +{ + struct nvkm_engine *engine; + int ret; + + engine = kzalloc(sizeof(*engine), GFP_KERNEL); + if (!engine) + return -ENOMEM; + + ret = nvkm_rm_engine_ctor(nvkm_rm_engine_dtor, rm, type, inst, &class, 1, engine); + if (ret) { + kfree(engine); + return ret; + } + + *pengine = engine; + return 0; +} + +int +nvkm_rm_engine_new(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst) +{ + const struct nvkm_rm_gpu *gpu = rm->gpu; + struct nvkm_device *device = rm->device; + + switch (type) { + case NVKM_ENGINE_CE: + if (WARN_ON(inst >= ARRAY_SIZE(device->ce))) + return -EINVAL; + + return nvkm_rm_engine_new_(rm, type, inst, gpu->ce.class, &device->ce[inst]); + case NVKM_ENGINE_GR: + if (inst != 0) + return -ENODEV; /* MiG not supported, just ignore. */ + + return nvkm_rm_gr_new(rm); + case NVKM_ENGINE_NVDEC: + if (WARN_ON(inst >= ARRAY_SIZE(device->nvdec))) + return -EINVAL; + + return nvkm_rm_nvdec_new(rm, inst); + case NVKM_ENGINE_NVENC: + if (WARN_ON(inst >= ARRAY_SIZE(device->nvenc))) + return -EINVAL; + + return nvkm_rm_nvenc_new(rm, inst); + case NVKM_ENGINE_NVJPG: + if (WARN_ON(inst >= ARRAY_SIZE(device->nvjpg))) + return -EINVAL; + + return nvkm_rm_engine_new_(rm, type, inst, gpu->nvjpg.class, &device->nvjpg[inst]); + case NVKM_ENGINE_OFA: + if (WARN_ON(inst >= ARRAY_SIZE(device->ofa))) + return -EINVAL; + + return nvkm_rm_engine_new_(rm, type, inst, gpu->ofa.class, &device->ofa[inst]); + default: + break; + } + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h new file mode 100644 index 000000000000..5b8c9c3901d4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __NVKM_RM_ENGINE_H__ +#define __NVKM_RM_ENGINE_H__ +#include "gpu.h" + +int nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *, + enum nvkm_subdev_type type, int inst, + const u32 *class, int nclass, struct nvkm_engine *); +int nvkm_rm_engine_new(struct nvkm_rm *, enum nvkm_subdev_type, int inst); + +int nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *, + struct nvkm_object **); + +int nvkm_rm_gr_new(struct nvkm_rm *); +int nvkm_rm_nvdec_new(struct nvkm_rm *, int inst); +int nvkm_rm_nvenc_new(struct nvkm_rm *, int inst); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c new file mode 100644 index 000000000000..a48c6134075d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +#include <engine/fifo/priv.h> + +#include <nvif/class.h> + +const struct nvkm_rm_gpu +ga100_gpu = { + .usermode.class = AMPERE_USERMODE_A, + + .fifo.chan = { + .class = AMPERE_CHANNEL_GPFIFO_A, + .doorbell_handle = tu102_chan_doorbell_handle, + }, + + .ce.class = AMPERE_DMA_COPY_A, + .gr.class = { + .i2m = KEPLER_INLINE_TO_MEMORY_B, + .twod = FERMI_TWOD_A, + .threed = AMPERE_A, + .compute = AMPERE_COMPUTE_A, + }, + .nvdec.class = NVC6B0_VIDEO_DECODER, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c new file mode 100644 index 000000000000..50536ad7f85d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +#include <engine/fifo/priv.h> + +#include <nvif/class.h> + +const struct nvkm_rm_gpu +ga1xx_gpu = { + .disp.class = { + .root = GA102_DISP, + .caps = GV100_DISP_CAPS, + .core = GA102_DISP_CORE_CHANNEL_DMA, + .wndw = GA102_DISP_WINDOW_CHANNEL_DMA, + .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA, + .curs = GA102_DISP_CURSOR, + }, + + .usermode.class = AMPERE_USERMODE_A, + + .fifo.chan = { + .class = AMPERE_CHANNEL_GPFIFO_A, + .doorbell_handle = tu102_chan_doorbell_handle, + }, + + .ce.class = AMPERE_DMA_COPY_B, + .gr.class = { + .i2m = KEPLER_INLINE_TO_MEMORY_B, + .twod = FERMI_TWOD_A, + .threed = AMPERE_B, + .compute = AMPERE_COMPUTE_B, + }, + .nvdec.class = NVC7B0_VIDEO_DECODER, + .nvenc.class = NVC7B7_VIDEO_ENCODER, + .ofa.class = NVC7FA_VIDEO_OFA, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c new file mode 100644 index 000000000000..2f517dcd721a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +#include <engine/fifo/priv.h> + +#include <nvif/class.h> + +const struct nvkm_rm_gpu +gb10x_gpu = { + .usermode.class = HOPPER_USERMODE_A, + + .fifo.chan = { + .class = BLACKWELL_CHANNEL_GPFIFO_A, + .doorbell_handle = tu102_chan_doorbell_handle, + }, + + .ce.class = BLACKWELL_DMA_COPY_A, + .gr.class = { + .i2m = BLACKWELL_INLINE_TO_MEMORY_A, + .twod = FERMI_TWOD_A, + .threed = BLACKWELL_A, + .compute = BLACKWELL_COMPUTE_A, + }, + .nvdec.class = NVCDB0_VIDEO_DECODER, + .nvjpg.class = NVCDD1_VIDEO_NVJPG, + .ofa.class = NVCDFA_VIDEO_OFA, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c new file mode 100644 index 000000000000..950471d9996e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +#include <engine/ce/priv.h> +#include <engine/fifo/priv.h> + +#include <nvif/class.h> + +const struct nvkm_rm_gpu +gb20x_gpu = { + .disp.class = { + .root = GB202_DISP, + .caps = GB202_DISP_CAPS, + .core = GB202_DISP_CORE_CHANNEL_DMA, + .wndw = GB202_DISP_WINDOW_CHANNEL_DMA, + .wimm = GB202_DISP_WINDOW_IMM_CHANNEL_DMA, + .curs = GB202_DISP_CURSOR, + }, + + .usermode.class = BLACKWELL_USERMODE_A, + + .fifo.chan = { + .class = BLACKWELL_CHANNEL_GPFIFO_B, + .doorbell_handle = gb202_chan_doorbell_handle, + }, + + .ce = { + .class = BLACKWELL_DMA_COPY_B, + .grce_mask = gb202_ce_grce_mask, + }, + .gr.class = { + .i2m = BLACKWELL_INLINE_TO_MEMORY_A, + .twod = FERMI_TWOD_A, + .threed = BLACKWELL_B, + .compute = BLACKWELL_COMPUTE_B, + }, + .nvdec.class = NVCFB0_VIDEO_DECODER, + .nvenc.class = NVCFB7_VIDEO_ENCODER, + .nvjpg.class = NVCFD1_VIDEO_NVJPG, + .ofa.class = NVCFFA_VIDEO_OFA, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c new file mode 100644 index 000000000000..49e2c54e1aa8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +#include <engine/fifo/priv.h> + +#include <nvif/class.h> + +const struct nvkm_rm_gpu +gh100_gpu = { + .usermode.class = HOPPER_USERMODE_A, + + .fifo.chan = { + .class = HOPPER_CHANNEL_GPFIFO_A, + .doorbell_handle = tu102_chan_doorbell_handle, + }, + + .ce.class = HOPPER_DMA_COPY_A, + .gr.class = { + .i2m = KEPLER_INLINE_TO_MEMORY_B, + .twod = FERMI_TWOD_A, + .threed = HOPPER_A, + .compute = HOPPER_COMPUTE_A, + }, + .nvdec.class = NVB8B0_VIDEO_DECODER, + .nvjpg.class = NVB8D1_VIDEO_NVJPG, + .ofa.class = NVB8FA_VIDEO_OFA, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h new file mode 100644 index 000000000000..46a6325641b7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __NVKM_RM_GPU_H__ +#define __NVKM_RM_GPU_H__ +#include "rm.h" + +struct nvkm_rm_gpu { + struct { + struct { + u32 root; + u32 caps; + u32 core; + u32 wndw; + u32 wimm; + u32 curs; + } class; + } disp; + + struct { + u32 class; + } usermode; + + struct { + struct { + u32 class; + u32 (*doorbell_handle)(struct nvkm_chan *); + } chan; + } fifo; + + struct { + u32 class; + u32 (*grce_mask)(struct nvkm_device *); + } ce; + + struct { + struct { + u32 i2m; + u32 twod; + u32 threed; + u32 compute; + } class; + } gr; + + struct { + u32 class; + } nvdec; + + struct { + u32 class; + } nvenc; + + struct { + u32 class; + } nvjpg; + + struct { + u32 class; + } ofa; +}; + +extern const struct nvkm_rm_gpu tu1xx_gpu; +extern const struct nvkm_rm_gpu ga100_gpu; +extern const struct nvkm_rm_gpu ga1xx_gpu; +extern const struct nvkm_rm_gpu ad10x_gpu; +extern const struct nvkm_rm_gpu gh100_gpu; +extern const struct nvkm_rm_gpu gb10x_gpu; +extern const struct nvkm_rm_gpu gb20x_gpu; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c new file mode 100644 index 000000000000..f40b8fcc2bcb --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gr.h" + +#include <engine/fifo.h> +#include <engine/gr/priv.h> + +static int +nvkm_rm_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object); + + return nvkm_rm_engine_obj_new(&chan->chan->rm.object, chan->chan->id, oclass, pobject); +} + +static int +nvkm_rm_gr_fini(struct nvkm_gr *base, bool suspend) +{ + struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm; + struct r535_gr *gr = container_of(base, typeof(*gr), base); + + if (rm->api->gr->scrubber.fini) + rm->api->gr->scrubber.fini(gr); + + return 0; +} + +static int +nvkm_rm_gr_init(struct nvkm_gr *base) +{ + struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm; + struct r535_gr *gr = container_of(base, typeof(*gr), base); + int ret; + + if (rm->api->gr->scrubber.init) { + ret = rm->api->gr->scrubber.init(gr); + if (ret) + return ret; + } + + return 0; +} + +int +nvkm_rm_gr_new(struct nvkm_rm *rm) +{ + const u32 classes[] = { + rm->gpu->gr.class.i2m, + rm->gpu->gr.class.twod, + rm->gpu->gr.class.threed, + rm->gpu->gr.class.compute, + }; + struct nvkm_gr_func *func; + struct r535_gr *gr; + + func = kzalloc(struct_size(func, sclass, ARRAY_SIZE(classes) + 1), GFP_KERNEL); + if (!func) + return -ENOMEM; + + func->dtor = r535_gr_dtor; + func->oneinit = r535_gr_oneinit; + func->init = nvkm_rm_gr_init; + func->fini = nvkm_rm_gr_fini; + func->units = r535_gr_units; + func->chan_new = r535_gr_chan_new; + + for (int i = 0; i < ARRAY_SIZE(classes); i++) { + func->sclass[i].oclass = classes[i]; + func->sclass[i].minver = -1; + func->sclass[i].maxver = 0; + func->sclass[i].ctor = nvkm_rm_gr_obj_ctor; + } + + gr = kzalloc(sizeof(*gr), GFP_KERNEL); + if (!gr) { + kfree(func); + return -ENOMEM; + } + + nvkm_gr_ctor(func, rm->device, NVKM_ENGINE_GR, 0, true, &gr->base); + gr->scrubber.chid = -1; + rm->device->gr = &gr->base; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h new file mode 100644 index 000000000000..24980f23aab9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_RM_GR_H__ +#define __NVKM_RM_GR_H__ +#include "engine.h" + +#include <core/object.h> +#include <engine/gr.h> + +#define R515_GR_MAX_CTXBUFS 9 + +struct r535_gr_chan { + struct nvkm_object object; + struct r535_gr *gr; + + struct nvkm_vmm *vmm; + struct nvkm_chan *chan; + + struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS]; + struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; +}; + +struct r535_gr { + struct nvkm_gr base; + + struct { + u16 bufferId; + u32 size; + u8 page; + u8 align; + bool global; + bool init; + bool ro; + } ctxbuf[R515_GR_MAX_CTXBUFS]; + int ctxbuf_nr; + + struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS]; + + struct { + int chid; + struct nvkm_memory *inst; + struct nvkm_vmm *vmm; + struct nvkm_gsp_object chan; + struct nvkm_gsp_object threed; + struct { + struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS]; + struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; + } ctxbuf; + bool enabled; + } scrubber; +}; + +struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO; +void r535_gr_get_ctxbuf_info(struct r535_gr *, int i, + struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h new file mode 100644 index 000000000000..3bdb5ad320d7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __NVKM_RM_HANDLES_H__ +#define __NVKM_RM_HANDLES_H__ + +/* RMAPI handles for various objects allocated from GSP-RM with RM_ALLOC. */ + +#define NVKM_RM_CLIENT(id) (0xc1d00000 | (id)) +#define NVKM_RM_CLIENT_MASK 0x0000ffff +#define NVKM_RM_DEVICE 0xde1d0000 +#define NVKM_RM_SUBDEVICE 0x5d1d0000 +#define NVKM_RM_DISP 0x00730000 +#define NVKM_RM_VASPACE 0x90f10000 +#define NVKM_RM_CHAN(chid) (0xf1f00000 | (chid)) +#define NVKM_RM_THREED 0x97000000 +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c new file mode 100644 index 000000000000..d9fbfc377864 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "engine.h" +#include <engine/nvdec.h> + +static void * +nvkm_rm_nvdec_dtor(struct nvkm_engine *engine) +{ + return container_of(engine, struct nvkm_nvdec, engine); +} + +int +nvkm_rm_nvdec_new(struct nvkm_rm *rm, int inst) +{ + struct nvkm_nvdec *nvdec; + int ret; + + nvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL); + if (!nvdec) + return -ENOMEM; + + ret = nvkm_rm_engine_ctor(nvkm_rm_nvdec_dtor, rm, NVKM_ENGINE_NVDEC, inst, + &rm->gpu->nvdec.class, 1, &nvdec->engine); + if (ret) { + kfree(nvdec); + return ret; + } + + rm->device->nvdec[inst] = nvdec; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c new file mode 100644 index 000000000000..6dfa7b789e07 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "engine.h" +#include <engine/nvenc.h> + +static void * +nvkm_rm_nvenc_dtor(struct nvkm_engine *engine) +{ + return container_of(engine, struct nvkm_nvenc, engine); +} + +int +nvkm_rm_nvenc_new(struct nvkm_rm *rm, int inst) +{ + struct nvkm_nvenc *nvenc; + int ret; + + nvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL); + if (!nvenc) + return -ENOMEM; + + ret = nvkm_rm_engine_ctor(nvkm_rm_nvenc_dtor, rm, NVKM_ENGINE_NVENC, inst, + &rm->gpu->nvenc.class, 1, &nvenc->engine); + if (ret) { + kfree(nvenc); + return ret; + } + + rm->device->nvenc[inst] = nvenc; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild new file mode 100644 index 000000000000..a5f6b2abfd33 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + +nvkm-y += nvkm/subdev/gsp/rm/r535/rm.o +nvkm-y += nvkm/subdev/gsp/rm/r535/gsp.o +nvkm-y += nvkm/subdev/gsp/rm/r535/rpc.o +nvkm-y += nvkm/subdev/gsp/rm/r535/ctrl.o +nvkm-y += nvkm/subdev/gsp/rm/r535/alloc.o +nvkm-y += nvkm/subdev/gsp/rm/r535/client.o +nvkm-y += nvkm/subdev/gsp/rm/r535/device.o + +nvkm-y += nvkm/subdev/gsp/rm/r535/bar.o +nvkm-y += nvkm/subdev/gsp/rm/r535/fbsr.o +nvkm-y += nvkm/subdev/gsp/rm/r535/vmm.o + +nvkm-y += nvkm/subdev/gsp/rm/r535/disp.o + +nvkm-y += nvkm/subdev/gsp/rm/r535/fifo.o +nvkm-y += nvkm/subdev/gsp/rm/r535/ce.o +nvkm-y += nvkm/subdev/gsp/rm/r535/gr.o +nvkm-y += nvkm/subdev/gsp/rm/r535/nvdec.o +nvkm-y += nvkm/subdev/gsp/rm/r535/nvenc.o +nvkm-y += nvkm/subdev/gsp/rm/r535/nvjpg.o +nvkm-y += nvkm/subdev/gsp/rm/r535/ofa.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c new file mode 100644 index 000000000000..46e3a29f2ad7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c @@ -0,0 +1,112 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <rm/rpc.h> + +#include "nvrm/alloc.h" +#include "nvrm/rpcfn.h" + +static int +r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object) +{ + struct nvkm_gsp_client *client = object->client; + struct nvkm_gsp *gsp = client->gsp; + rpc_free_v03_00 *rpc; + + nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n", + client->object.handle, object->handle); + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc)); + if (WARN_ON(IS_ERR_OR_NULL(rpc))) + return -EIO; + + rpc->params.hRoot = client->object.handle; + rpc->params.hObjectParent = 0; + rpc->params.hObjectOld = object->handle; + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); +} + +static void +r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params) +{ + rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc); + + nvkm_gsp_rpc_done(object->client->gsp, rpc); +} + +static void * +r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params) +{ + rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc); + struct nvkm_gsp *gsp = object->client->gsp; + void *ret = NULL; + + rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, sizeof(*rpc)); + if (IS_ERR_OR_NULL(rpc)) + return rpc; + + if (rpc->status) { + ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status)); + if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY) + nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); + } + + nvkm_gsp_rpc_done(gsp, rpc); + + return ret; +} + +static void * +r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, + u32 params_size) +{ + struct nvkm_gsp_client *client = object->client; + struct nvkm_gsp *gsp = client->gsp; + rpc_gsp_rm_alloc_v03_00 *rpc; + + nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n", + client->object.handle, object->parent->handle, + object->handle); + + nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass, + params_size); + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, + sizeof(*rpc) + params_size); + if (IS_ERR(rpc)) + return rpc; + + rpc->hClient = client->object.handle; + rpc->hParent = object->parent->handle; + rpc->hObject = object->handle; + rpc->hClass = oclass; + rpc->status = 0; + rpc->paramsSize = params_size; + return rpc->params; +} + +const struct nvkm_rm_api_alloc +r535_alloc = { + .get = r535_gsp_rpc_rm_alloc_get, + .push = r535_gsp_rpc_rm_alloc_push, + .done = r535_gsp_rpc_rm_alloc_done, + .free = r535_gsp_rpc_rm_free, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c index 3a30bea30e36..d06bf95b9a4a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c @@ -19,7 +19,7 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include "gf100.h" +#include <subdev/bar/gf100.h> #include <core/mm.h> #include <subdev/fb.h> @@ -27,14 +27,20 @@ #include <subdev/instmem.h> #include <subdev/mmu/vmm.h> -#include <nvrm/nvtypes.h> -#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h> -#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h> -#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h> +#include "nvrm/bar.h" +#include "nvrm/rpcfn.h" static void r535_bar_flush(struct nvkm_bar *bar) { + /* Use NV_PFLUSH in resume path - needed on R570 to flush writes before + * BAR2 page tables have been restored. + */ + if (unlikely(!bar->bar2)) { + g84_bar_flush(bar); + return; + } + ioread32_native(bar->flushBAR2); } @@ -44,7 +50,7 @@ r535_bar_bar2_wait(struct nvkm_bar *base) } static int -r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr) +r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u8 page_shift, u64 pdbe) { rpc_update_bar_pde_v15_00 *rpc; @@ -53,21 +59,22 @@ r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr) return -EIO; rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2; - rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */ - rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu! + rpc->info.entryValue = pdbe; + rpc->info.entryLevelShift = page_shift; - return nvkm_gsp_rpc_wr(gsp, rpc, true); + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); } static void r535_bar_bar2_fini(struct nvkm_bar *bar) { + struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm; struct nvkm_gsp *gsp = bar->subdev.device->gsp; bar->flushBAR2 = bar->flushBAR2PhysMode; nvkm_done(bar->flushFBZero); - WARN_ON(r535_bar_bar2_update_pde(gsp, 0)); + WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, 0)); } static void @@ -76,8 +83,18 @@ r535_bar_bar2_init(struct nvkm_bar *bar) struct nvkm_device *device = bar->subdev.device; struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm; struct nvkm_gsp *gsp = device->gsp; - - WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr)); + struct nvkm_memory *pdb = vmm->pd->pt[0]->memory; + u32 pdb_offset = vmm->pd->pt[0]->base; + u32 pdbe_lo, pdbe_hi; + u64 pdbe; + + nvkm_kmap(pdb); + pdbe_lo = nvkm_ro32(pdb, pdb_offset + 0); + pdbe_hi = nvkm_ro32(pdb, pdb_offset + 4); + pdbe = ((u64)pdbe_hi << 32) | pdbe_lo; + nvkm_done(pdb); + + WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, pdbe)); vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb; if (!bar->flushFBZero) { @@ -174,7 +191,7 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device, } *pbar = bar; - bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE); + bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, NVKM_BAR2_INST), PAGE_SIZE); if (!bar->flushBAR2PhysMode) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c new file mode 100644 index 000000000000..2d1ce9db2dcf --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c @@ -0,0 +1,46 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <rm/engine.h> + +#include "nvrm/ce.h" +#include "nvrm/engine.h" + +static int +r535_ce_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, + struct nvkm_gsp_object *ce) +{ + NVC0B5_ALLOCATION_PARAMETERS *args; + + args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ce); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->version = 1; + args->engineType = NV2080_ENGINE_TYPE_COPY0 + inst; + + return nvkm_gsp_rm_alloc_wr(ce, args); +} + +const struct nvkm_rm_api_engine +r535_ce = { + .alloc = r535_ce_alloc, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c index 932934227b9c..ec71f683e609 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c @@ -19,26 +19,27 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include "priv.h" +#include <rm/rm.h> -#include <subdev/gsp.h> +#include "nvrm/client.h" -#include <nvif/class.h> +static int +r535_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle) +{ + NV0000_ALLOC_PARAMETERS *args; -static const struct nvkm_engine_func -ga100_nvdec = { - .sclass = { - { -1, -1, NVC6B0_VIDEO_DECODER }, - {} - } -}; + args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args), + &client->object); + if (IS_ERR(args)) + return PTR_ERR(args); -int -ga100_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_nvdec **pnvdec) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_nvdec_new(&ga100_nvdec, device, type, inst, pnvdec); + args->hClient = client->object.handle; + args->processID = ~0; - return -ENODEV; + return nvkm_gsp_rm_alloc_wr(&client->object, args); } + +const struct nvkm_rm_api_client +r535_client = { + .ctor = r535_gsp_client_ctor, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c new file mode 100644 index 000000000000..70b9ee911c5e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c @@ -0,0 +1,93 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <rm/rpc.h> + +#include "nvrm/ctrl.h" +#include "nvrm/rpcfn.h" + +static void +r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params) +{ + rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc); + + if (!params) + return; + nvkm_gsp_rpc_done(object->client->gsp, rpc); +} + +static int +r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc) +{ + rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc); + struct nvkm_gsp *gsp = object->client->gsp; + int ret = 0; + + rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, repc); + if (IS_ERR_OR_NULL(rpc)) { + *params = NULL; + return PTR_ERR(rpc); + } + + if (rpc->status) { + ret = r535_rpc_status_to_errno(rpc->status); + if (ret != -EAGAIN && ret != -EBUSY) + nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", + object->client->object.handle, object->handle, rpc->cmd, rpc->status); + } + + if (repc) + *params = rpc->params; + else + nvkm_gsp_rpc_done(gsp, rpc); + + return ret; +} + +static void * +r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size) +{ + struct nvkm_gsp_client *client = object->client; + struct nvkm_gsp *gsp = client->gsp; + rpc_gsp_rm_control_v03_00 *rpc; + + nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n", + client->object.handle, object->handle, cmd, params_size); + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, + sizeof(*rpc) + params_size); + if (IS_ERR(rpc)) + return rpc; + + rpc->hClient = client->object.handle; + rpc->hObject = object->handle; + rpc->cmd = cmd; + rpc->status = 0; + rpc->paramsSize = params_size; + return rpc->params; +} + +const struct nvkm_rm_api_ctrl +r535_ctrl = { + .get = r535_gsp_rpc_rm_ctrl_get, + .push = r535_gsp_rpc_rm_ctrl_push, + .done = r535_gsp_rpc_rm_ctrl_done, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c new file mode 100644 index 000000000000..f830e12a8f6e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c @@ -0,0 +1,148 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <rm/rm.h> + +#include "nvrm/device.h" +#include "nvrm/event.h" + +static void +r535_gsp_event_dtor(struct nvkm_gsp_event *event) +{ + struct nvkm_gsp_device *device = event->device; + struct nvkm_gsp_client *client = device->object.client; + struct nvkm_gsp *gsp = client->gsp; + + mutex_lock(&gsp->client_id.mutex); + if (event->func) { + list_del(&event->head); + event->func = NULL; + } + mutex_unlock(&gsp->client_id.mutex); + + nvkm_gsp_rm_free(&event->object); + event->device = NULL; +} + +static int +r535_gsp_device_event_get(struct nvkm_gsp_event *event) +{ + struct nvkm_gsp_device *device = event->device; + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->event = event->id; + ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl); +} + +static int +r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id, + nvkm_gsp_event_func func, struct nvkm_gsp_event *event) +{ + struct nvkm_gsp_client *client = device->object.client; + struct nvkm_gsp *gsp = client->gsp; + NV0005_ALLOC_PARAMETERS *args; + int ret; + + args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle, + NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args), + &event->object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->hParentClient = client->object.handle; + args->hSrcResource = 0; + args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX; + args->notifyIndex = NV01_EVENT_CLIENT_RM | id; + args->data = NULL; + + ret = nvkm_gsp_rm_alloc_wr(&event->object, args); + if (ret) + return ret; + + event->device = device; + event->id = id; + + ret = r535_gsp_device_event_get(event); + if (ret) { + nvkm_gsp_event_dtor(event); + return ret; + } + + mutex_lock(&gsp->client_id.mutex); + event->func = func; + list_add(&event->head, &client->events); + mutex_unlock(&gsp->client_id.mutex); + return 0; +} + +static void +r535_gsp_device_dtor(struct nvkm_gsp_device *device) +{ + nvkm_gsp_rm_free(&device->subdevice); + nvkm_gsp_rm_free(&device->object); +} + +static int +r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device) +{ + NV2080_ALLOC_PARAMETERS *args; + + return nvkm_gsp_rm_alloc(&device->object, NVKM_RM_SUBDEVICE, NV20_SUBDEVICE_0, + sizeof(*args), &device->subdevice); +} + +static int +r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device) +{ + NV0080_ALLOC_PARAMETERS *args; + int ret; + + args = nvkm_gsp_rm_alloc_get(&client->object, NVKM_RM_DEVICE, NV01_DEVICE_0, sizeof(*args), + &device->object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->hClientShare = client->object.handle; + + ret = nvkm_gsp_rm_alloc_wr(&device->object, args); + if (ret) + return ret; + + ret = r535_gsp_subdevice_ctor(device); + if (ret) + nvkm_gsp_rm_free(&device->object); + + return ret; +} + +const struct nvkm_rm_api_device +r535_device = { + .ctor = r535_gsp_device_ctor, + .dtor = r535_gsp_device_dtor, + .event.ctor = r535_gsp_device_event_ctor, + .event.dtor = r535_gsp_event_dtor, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c index 99110ab2f44d..7e9e2d3564da 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -19,13 +19,13 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include "priv.h" -#include "chan.h" -#include "conn.h" -#include "dp.h" -#include "head.h" -#include "ior.h" -#include "outp.h" +#include <engine/disp/priv.h> +#include <engine/disp/chan.h> +#include <engine/disp/conn.h> +#include <engine/disp/dp.h> +#include <engine/disp/head.h> +#include <engine/disp/ior.h> +#include <engine/disp/outp.h> #include <core/ramht.h> #include <subdev/bios.h> @@ -34,19 +34,11 @@ #include <subdev/mmu.h> #include <subdev/vfn.h> +#include <rm/gpu.h> + #include <nvhw/drf.h> -#include <nvrm/nvtypes.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> -#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h> -#include <nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h> -#include <nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h> +#include "nvrm/disp.h" #include <linux/acpi.h> @@ -78,9 +70,9 @@ r535_chan_fini(struct nvkm_disp_chan *chan) } static int -r535_chan_push(struct nvkm_disp_chan *chan) +r535_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory) { - struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp; + struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp; NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl; ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, @@ -89,8 +81,8 @@ r535_chan_push(struct nvkm_disp_chan *chan) if (IS_ERR(ctrl)) return PTR_ERR(ctrl); - if (chan->memory) { - switch (nvkm_memory_target(chan->memory)) { + if (memory) { + switch (nvkm_memory_target(memory)) { case NVKM_MEM_TARGET_NCOH: ctrl->addressSpace = ADDR_SYSMEM; ctrl->cacheSnoop = 0; @@ -107,13 +99,13 @@ r535_chan_push(struct nvkm_disp_chan *chan) return -EINVAL; } - ctrl->physicalAddr = nvkm_memory_addr(chan->memory); - ctrl->limit = nvkm_memory_size(chan->memory) - 1; + ctrl->physicalAddr = nvkm_memory_addr(memory); + ctrl->limit = nvkm_memory_size(memory) - 1; } - ctrl->hclass = chan->object.oclass; - ctrl->channelInstance = chan->head; - ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0; + ctrl->hclass = oclass; + ctrl->channelInstance = inst; + ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0; return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); } @@ -121,10 +113,11 @@ r535_chan_push(struct nvkm_disp_chan *chan) static int r535_curs_init(struct nvkm_disp_chan *chan) { + const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api; NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args; int ret; - ret = r535_chan_push(chan); + ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, NULL); if (ret) return ret; @@ -172,25 +165,34 @@ r535_dmac_fini(struct nvkm_disp_chan *chan) } static int -r535_dmac_init(struct nvkm_disp_chan *chan) +r535_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset, + struct nvkm_gsp_object *dmac) { NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args; - int ret; - - ret = r535_chan_push(chan); - if (ret) - return ret; - args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object, - (chan->object.oclass << 16) | chan->head, - chan->object.oclass, sizeof(*args), &chan->rm.object); + args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass, + sizeof(*args), dmac); if (IS_ERR(args)) return PTR_ERR(args); - args->channelInstance = chan->head; - args->offset = chan->suspend_put; + args->channelInstance = inst; + args->offset = put_offset; - return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args); + return nvkm_gsp_rm_alloc_wr(dmac, args); +} + +static int +r535_dmac_init(struct nvkm_disp_chan *chan) +{ + const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api; + int ret; + + ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, chan->memory); + if (ret) + return ret; + + return rmapi->disp->chan.dmac_alloc(chan->disp, chan->object.oclass, chan->head, + chan->suspend_put, &chan->rm.object); } static int @@ -260,47 +262,47 @@ r535_core = { }; static int -r535_sor_bl_set(struct nvkm_ior *sor, int lvl) +r535_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval) { - struct nvkm_disp *disp = sor->disp; + u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS : + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS; NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl; + int ret; - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS, - sizeof(*ctrl)); + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl)); if (IS_ERR(ctrl)) return PTR_ERR(ctrl); - ctrl->displayId = BIT(sor->asy.outp->index); - ctrl->brightness = lvl; + ctrl->displayId = BIT(display_id); + ctrl->brightness = *pval; - return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) + return ret; + + *pval = ctrl->brightness; + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; } static int -r535_sor_bl_get(struct nvkm_ior *sor) +r535_sor_bl_set(struct nvkm_ior *sor, int lvl) { struct nvkm_disp *disp = sor->disp; - NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl; - int ret, lvl; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api; - ctrl->displayId = BIT(sor->asy.outp->index); + return rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, true, &lvl); +} - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; - } +static int +r535_sor_bl_get(struct nvkm_ior *sor) +{ + struct nvkm_disp *disp = sor->disp; + const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api; + int lvl, ret = rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, false, &lvl); - lvl = ctrl->brightness; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return lvl; + return (ret == 0) ? lvl : ret; } static const struct nvkm_ior_func_bl @@ -730,7 +732,7 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda) } static int -r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid) +r535_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid) { NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl; int ret; @@ -763,7 +765,9 @@ r535_outp_inherit(struct nvkm_outp *outp) int ret; list_for_each_entry(head, &disp->heads, head) { - ret = r535_disp_head_displayid(disp, head->id, &displayid); + const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api; + + ret = rmapi->disp->get_active(disp, head->id, &displayid); if (WARN_ON(ret)) return NULL; @@ -858,10 +862,9 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp) } static int -r535_outp_detect(struct nvkm_outp *outp) +r535_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id) { NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl; - struct nvkm_disp *disp = outp->disp; int ret; ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, @@ -870,23 +873,29 @@ r535_outp_detect(struct nvkm_outp *outp) return PTR_ERR(ctrl); ctrl->subDeviceInstance = 0; - ctrl->displayMask = BIT(outp->index); + ctrl->displayMask = BIT(display_id); ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; - } + if (ret == 0 && (ctrl->displayMask & BIT(display_id))) + ret = 1; + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; +} - if (ctrl->displayMask & BIT(outp->index)) { +static int +r535_outp_detect(struct nvkm_outp *outp) +{ + const struct nvkm_rm_api *rmapi = outp->disp->rm.objcom.client->gsp->rm->api; + int ret; + + ret = rmapi->disp->get_connect_state(outp->disp, outp->index); + if (ret == 1) { ret = r535_outp_dfp_get_info(outp); if (ret == 0) ret = 1; - } else { - ret = 0; } - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); return ret; } @@ -1029,15 +1038,11 @@ r535_dp_train(struct nvkm_outp *outp, bool retrain) } static int -r535_dp_rates(struct nvkm_outp *outp) +r535_dp_set_indexed_link_rates(struct nvkm_outp *outp) { NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl; struct nvkm_disp *disp = outp->disp; - if (outp->conn->info.type != DCB_CONNECTOR_eDP || - !outp->dp.rates || outp->dp.rate[0].dpcd < 0) - return 0; - if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl))) return -EINVAL; @@ -1054,6 +1059,18 @@ r535_dp_rates(struct nvkm_outp *outp) } static int +r535_dp_rates(struct nvkm_outp *outp) +{ + struct nvkm_rm *rm = outp->disp->rm.objcom.client->gsp->rm; + + if (outp->conn->info.type != DCB_CONNECTOR_eDP || + !outp->dp.rates || outp->dp.rate[0].dpcd < 0) + return 0; + + return rm->api->disp->dp.set_indexed_link_rates(outp); +} + +static int r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize) { struct nvkm_disp *disp = outp->disp; @@ -1151,6 +1168,49 @@ r535_dp = { }; static int +r535_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm) +{ + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->sorIndex = ~0; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) { + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62: + *plink_bw = 0x06; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70: + *plink_bw = 0x0a; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40: + *plink_bw = 0x14; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10: + *plink_bw = 0x1e; + break; + default: + *plink_bw = 0x00; + break; + } + + *pmst = ctrl->bIsMultistreamSupported; + *pwm = ctrl->bHasIncreasedWatermarkLimits; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static int r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize) { NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl; @@ -1194,6 +1254,7 @@ r535_tmds = { static int r535_outp_new(struct nvkm_disp *disp, u32 id) { + const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api; NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl; enum nvkm_ior_proto proto; struct dcb_output dcbE = {}; @@ -1278,43 +1339,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id) if (ret) return ret; } else { - NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl; bool mst, wm; - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->sorIndex = ~0; - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + ret = rmapi->disp->dp.get_caps(disp, &dcbE.dpconf.link_bw, &mst, &wm); + if (ret) return ret; - } - - switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) { - case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62: - dcbE.dpconf.link_bw = 0x06; - break; - case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70: - dcbE.dpconf.link_bw = 0x0a; - break; - case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40: - dcbE.dpconf.link_bw = 0x14; - break; - case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10: - dcbE.dpconf.link_bw = 0x1e; - break; - default: - dcbE.dpconf.link_bw = 0x00; - break; - } - - mst = ctrl->bIsMultistreamSupported; - wm = ctrl->bHasIncreasedWatermarkLimits; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); if (WARN_ON(!dcbE.dpconf.link_bw)) return -EINVAL; @@ -1441,11 +1470,47 @@ r535_disp_init(struct nvkm_disp *disp) } static int +r535_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask) +{ + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + *pmask = ctrl->displayMask; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static int +r535_disp_get_static_info(struct nvkm_disp *disp) +{ + NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl; + struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + disp->wndw.mask = ctrl->windowPresentMask; + disp->wndw.nr = fls(disp->wndw.mask); + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return 0; +} + +static int r535_disp_oneinit(struct nvkm_disp *disp) { struct nvkm_device *device = disp->engine.subdev.device; struct nvkm_gsp *gsp = device->gsp; + const struct nvkm_rm_api *rmapi = gsp->rm->api; NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl; + unsigned long mask; int ret, i; /* RAMIN. */ @@ -1476,24 +1541,14 @@ r535_disp_oneinit(struct nvkm_disp *disp) if (ret) return ret; - ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0, + ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, NVKM_RM_DISP, NV04_DISPLAY_COMMON, 0, &disp->rm.objcom); if (ret) return ret; - { - NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - disp->wndw.mask = ctrl->windowPresentMask; - disp->wndw.nr = fls(disp->wndw.mask); - nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); - } + ret = rmapi->disp->get_static_info(disp); + if (ret) + return ret; /* */ { @@ -1622,25 +1677,14 @@ r535_disp_oneinit(struct nvkm_disp *disp) return ret; } - /* */ - { - NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl; - unsigned long mask; - int i; - - ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, - NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - mask = ctrl->displayMask; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + ret = rmapi->disp->get_supported(disp, &mask); + if (ret) + return ret; - for_each_set_bit(i, &mask, 32) { - ret = r535_outp_new(disp, i); - if (ret) - return ret; - } + for_each_set_bit(i, &mask, 32) { + ret = r535_outp_new(disp, i); + if (ret) + return ret; } ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event); @@ -1686,6 +1730,7 @@ int r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp) { + const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu; struct nvkm_disp_func *rm; int ret; @@ -1701,20 +1746,26 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device, rm->sor.new = r535_sor_new; rm->ramht_size = hw->ramht_size; - rm->root = hw->root; + rm->root.oclass = gpu->disp.class.root; - for (int i = 0; hw->user[i].ctor; i++) { - switch (hw->user[i].base.oclass & 0xff) { - case 0x73: rm->user[i] = hw->user[i]; break; - case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break; - case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break; - case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break; - case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break; - default: - WARN_ON(1); - continue; - } - } + rm->user[0].base.oclass = gpu->disp.class.caps; + rm->user[0].ctor = gv100_disp_caps_new; + + rm->user[1].base.oclass = gpu->disp.class.core; + rm->user[1].ctor = nvkm_disp_core_new; + rm->user[1].chan = &r535_core; + + rm->user[2].base.oclass = gpu->disp.class.wndw; + rm->user[2].ctor = nvkm_disp_wndw_new; + rm->user[2].chan = &r535_wndw; + + rm->user[3].base.oclass = gpu->disp.class.wimm; + rm->user[3].ctor = nvkm_disp_wndw_new; + rm->user[3].chan = &r535_wimm; + + rm->user[4].base.oclass = gpu->disp.class.curs; + rm->user[4].ctor = nvkm_disp_chan_new; + rm->user[4].chan = &r535_curs; ret = nvkm_disp_new_(rm, device, type, inst, pdisp); if (ret) @@ -1723,3 +1774,20 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device, mutex_init(&(*pdisp)->super.mutex); //XXX return ret; } + +const struct nvkm_rm_api_disp +r535_disp = { + .get_static_info = r535_disp_get_static_info, + .get_supported = r535_disp_get_supported, + .get_connect_state = r535_disp_get_connect_state, + .get_active = r535_disp_get_active, + .bl_ctrl = r535_bl_ctrl, + .dp = { + .get_caps = r535_dp_get_caps, + .set_indexed_link_rates = r535_dp_set_indexed_link_rates, + }, + .chan = { + .set_pushbuf = r535_disp_chan_set_pushbuf, + .dmac_alloc = r535_dmac_alloc, + } +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c index 5f3c9c02a4c0..150e22fde2ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c @@ -19,19 +19,13 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include "priv.h" - +#include <subdev/instmem/priv.h> #include <subdev/gsp.h> #include <nvhw/drf.h> -#include <nvrm/nvtypes.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> -#include <nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h> -#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h> -#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h> +#include "nvrm/fbsr.h" +#include "nvrm/rpcfn.h" struct fbsr_item { const char *type; @@ -54,9 +48,9 @@ struct fbsr { u64 sys_offset; }; -static int -fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper, - u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object) +int +r535_fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper, + u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object) { struct nvkm_gsp_client *client = device->object.client; struct nvkm_gsp *gsp = client->gsp; @@ -105,7 +99,7 @@ fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i; } - ret = nvkm_gsp_rpc_wr(gsp, rpc, true); + ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL); if (ret) return ret; @@ -123,8 +117,8 @@ fbsr_send(struct fbsr *fbsr, struct fbsr_item *item) struct nvkm_gsp_object memlist; int ret; - ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM, - item->addr, item->size, NULL, &memlist); + ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM, + item->addr, item->size, NULL, &memlist); if (ret) return ret; @@ -161,8 +155,8 @@ fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size) struct nvkm_gsp_object memlist; int ret; - ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST, - 0, fbsr->size, sgt, &memlist); + ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST, + 0, fbsr->size, sgt, &memlist); if (ret) return ret; @@ -206,22 +200,19 @@ fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory) return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory)); } -static void -r535_instmem_resume(struct nvkm_instmem *imem) +void +r535_fbsr_resume(struct nvkm_gsp *gsp) { /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */ - if (imem->rm.fbsr_valid) { - nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr); - imem->rm.fbsr_valid = false; - } + nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.fbsr); } static int -r535_instmem_suspend(struct nvkm_instmem *imem) +r535_fbsr_suspend(struct nvkm_gsp *gsp) { - struct nvkm_subdev *subdev = &imem->subdev; + struct nvkm_subdev *subdev = &gsp->subdev; struct nvkm_device *device = subdev->device; - struct nvkm_gsp *gsp = device->gsp; + struct nvkm_instmem *imem = device->imem; struct nvkm_instobj *iobj; struct fbsr fbsr = {}; struct fbsr_item *item, *temp; @@ -262,7 +253,7 @@ r535_instmem_suspend(struct nvkm_instmem *imem) fbsr.size += gsp->fb.bios.vga_workspace.size; nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size); - ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr); + ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &gsp->sr.fbsr); if (ret) goto done; @@ -271,7 +262,7 @@ r535_instmem_suspend(struct nvkm_instmem *imem) if (ret) goto done_sgt; - ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size); + ret = fbsr_init(&fbsr, &gsp->sr.fbsr, items_size); if (WARN_ON(ret)) goto done_sgt; @@ -282,12 +273,10 @@ r535_instmem_suspend(struct nvkm_instmem *imem) goto done_sgt; } - imem->rm.fbsr_valid = true; - /* Cleanup everything except the sysmem backup, which will be removed after resume. */ done_sgt: if (ret) /* ... unless we failed already. */ - nvkm_gsp_sg_free(device, &imem->rm.fbsr); + nvkm_gsp_sg_free(device, &gsp->sr.fbsr); done: list_for_each_entry_safe(item, temp, &fbsr.items, head) { list_del(&item->head); @@ -299,6 +288,12 @@ done: return ret; } +const struct nvkm_rm_api_fbsr +r535_fbsr = { + .suspend = r535_fbsr_suspend, + .resume = r535_fbsr_resume, +}; + static void * r535_instmem_dtor(struct nvkm_instmem *imem) { @@ -319,11 +314,10 @@ r535_instmem_new(const struct nvkm_instmem_func *hw, rm->dtor = r535_instmem_dtor; rm->fini = hw->fini; - rm->suspend = r535_instmem_suspend; - rm->resume = r535_instmem_resume; rm->memory_new = hw->memory_new; rm->memory_wrap = hw->memory_wrap; rm->zero = false; + rm->set_bar0_window_addr = hw->set_bar0_window_addr; ret = nv50_instmem_new_(rm, device, type, inst, pinstmem); if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index 3454c7d29502..1ac5628c5140 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -19,11 +19,11 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include "priv.h" -#include "cgrp.h" -#include "chan.h" -#include "chid.h" -#include "runl.h" +#include <engine/fifo/priv.h> +#include <engine/fifo/cgrp.h> +#include <engine/fifo/chan.h> +#include <engine/fifo/chid.h> +#include <engine/fifo/runl.h> #include <core/gpuobj.h> #include <subdev/gsp.h> @@ -31,24 +31,19 @@ #include <subdev/vfn.h> #include <engine/gr.h> +#include <rm/engine.h> + #include <nvhw/drf.h> -#include <nvrm/nvtypes.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h> -#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h> -#include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h> -#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h> +#include "nvrm/fifo.h" +#include "nvrm/engine.h" static u32 r535_chan_doorbell_handle(struct nvkm_chan *chan) { - return (chan->cgrp->runl->id << 16) | chan->id; + struct nvkm_gsp *gsp = chan->rm.object.client->gsp; + + return gsp->rm->gpu->fifo.chan.doorbell_handle(chan); } static void @@ -77,50 +72,29 @@ r535_chan_ramfc_clear(struct nvkm_chan *chan) #define CHID_PER_USERD 8 static int -r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv) +r535_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq, + bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr, + struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length, + struct nvkm_gsp_object *chan) { - struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; - struct nvkm_engn *engn; - struct nvkm_device *device = fifo->engine.subdev.device; + struct nvkm_gsp *gsp = device->object.client->gsp; + struct nvkm_fifo *fifo = gsp->subdev.device->fifo; + const int userd_p = chid / CHID_PER_USERD; + const int userd_i = chid % CHID_PER_USERD; NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args; - const int userd_p = chan->id / CHID_PER_USERD; - const int userd_i = chan->id % CHID_PER_USERD; - u32 eT = ~0; - int ret; - - if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) { - ret = nvkm_subdev_oneinit(&device->gr->engine.subdev); - if (ret) - return ret; - } - - nvkm_runl_foreach_engn(engn, chan->cgrp->runl) { - eT = engn->id; - break; - } - - if (WARN_ON(eT == ~0)) - return -EINVAL; - chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev, - fifo->rm.mthdbuf_size, - &chan->rm.mthdbuf.addr, GFP_KERNEL); - if (!chan->rm.mthdbuf.ptr) - return -ENOMEM; - - args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id, - fifo->func->chan.user.oclass, sizeof(*args), - &chan->rm.object); + args = nvkm_gsp_rm_alloc_get(&device->object, handle, + fifo->func->chan.user.oclass, sizeof(*args), chan); if (WARN_ON(IS_ERR(args))) return PTR_ERR(args); - args->gpFifoOffset = offset; - args->gpFifoEntries = length / 8; + args->gpFifoOffset = gpfifo_offset; + args->gpFifoEntries = gpfifo_length / 8; args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL); args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE); args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE); - args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq); + args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq); if (!priv) args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE); else @@ -143,25 +117,25 @@ r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE); args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE); - args->hVASpace = chan->vmm->rm.object.handle; - args->engineType = eT; + args->hVASpace = vmm->rm.object.handle; + args->engineType = nv2080_engine_type; - args->instanceMem.base = chan->inst->addr; - args->instanceMem.size = chan->inst->size; + args->instanceMem.base = inst_addr; + args->instanceMem.size = fifo->func->chan.func->inst->size; args->instanceMem.addressSpace = 2; args->instanceMem.cacheAttrib = 1; - args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base; + args->userdMem.base = userd_addr; args->userdMem.size = fifo->func->chan.func->userd->size; args->userdMem.addressSpace = 2; args->userdMem.cacheAttrib = 1; - args->ramfcMem.base = chan->inst->addr + 0; + args->ramfcMem.base = inst_addr; args->ramfcMem.size = 0x200; args->ramfcMem.addressSpace = 2; args->ramfcMem.cacheAttrib = 1; - args->mthdbufMem.base = chan->rm.mthdbuf.addr; + args->mthdbufMem.base = mthdbuf_addr; args->mthdbufMem.size = fifo->rm.mthdbuf_size; args->mthdbufMem.addressSpace = 1; args->mthdbufMem.cacheAttrib = 0; @@ -173,7 +147,44 @@ r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE); args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE); - ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args); + return nvkm_gsp_rm_alloc_wr(chan, args); +} + +static int +r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv) +{ + struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; + struct nvkm_engn *engn; + struct nvkm_device *device = fifo->engine.subdev.device; + const struct nvkm_rm_api *rmapi = device->gsp->rm->api; + u32 eT = ~0; + int ret; + + if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) { + ret = nvkm_subdev_oneinit(&device->gr->engine.subdev); + if (ret) + return ret; + } + + nvkm_runl_foreach_engn(engn, chan->cgrp->runl) { + eT = engn->id; + break; + } + + if (WARN_ON(eT == ~0)) + return -EINVAL; + + chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev, + fifo->rm.mthdbuf_size, + &chan->rm.mthdbuf.addr, GFP_KERNEL); + if (!chan->rm.mthdbuf.ptr) + return -ENOMEM; + + ret = rmapi->fifo->chan.alloc(&chan->vmm->rm.device, NVKM_RM_CHAN(chan->id), + eT, chan->runq, priv, chan->id, chan->inst->addr, + nvkm_memory_addr(chan->userd.mem) + chan->userd.base, + chan->rm.mthdbuf.addr, chan->vmm, offset, length, + &chan->rm.object); if (ret) return ret; @@ -215,123 +226,8 @@ r535_chan_ramfc = { .priv = true, }; -struct r535_chan_userd { - struct nvkm_memory *mem; - struct nvkm_memory *map; - int chid; - u32 used; - - struct list_head head; -} *userd; - -static void -r535_chan_id_put(struct nvkm_chan *chan) -{ - struct nvkm_runl *runl = chan->cgrp->runl; - struct nvkm_fifo *fifo = runl->fifo; - struct r535_chan_userd *userd; - - mutex_lock(&fifo->userd.mutex); - list_for_each_entry(userd, &fifo->userd.list, head) { - if (userd->map == chan->userd.mem) { - u32 chid = chan->userd.base / chan->func->userd->size; - - userd->used &= ~BIT(chid); - if (!userd->used) { - nvkm_memory_unref(&userd->map); - nvkm_memory_unref(&userd->mem); - nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock); - list_del(&userd->head); - kfree(userd); - } - - break; - } - } - mutex_unlock(&fifo->userd.mutex); - -} - -static int -r535_chan_id_get_locked(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd) -{ - const u32 userd_size = CHID_PER_USERD * chan->func->userd->size; - struct nvkm_runl *runl = chan->cgrp->runl; - struct nvkm_fifo *fifo = runl->fifo; - struct r535_chan_userd *userd; - u32 chid; - int ret; - - if (ouserd + chan->func->userd->size >= userd_size || - (ouserd & (chan->func->userd->size - 1))) { - RUNL_DEBUG(runl, "ouserd %llx", ouserd); - return -EINVAL; - } - - chid = div_u64(ouserd, chan->func->userd->size); - - list_for_each_entry(userd, &fifo->userd.list, head) { - if (userd->mem == muserd) { - if (userd->used & BIT(chid)) - return -EBUSY; - break; - } - } - - if (&userd->head == &fifo->userd.list) { - if (nvkm_memory_size(muserd) < userd_size) { - RUNL_DEBUG(runl, "userd too small"); - return -EINVAL; - } - - userd = kzalloc(sizeof(*userd), GFP_KERNEL); - if (!userd) - return -ENOMEM; - - userd->chid = nvkm_chid_get(runl->chid, chan); - if (userd->chid < 0) { - ret = userd->chid; - kfree(userd); - return ret; - } - - userd->mem = nvkm_memory_ref(muserd); - - ret = nvkm_memory_kmap(userd->mem, &userd->map); - if (ret) { - nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock); - kfree(userd); - return ret; - } - - - list_add(&userd->head, &fifo->userd.list); - } - - userd->used |= BIT(chid); - - chan->userd.mem = nvkm_memory_ref(userd->map); - chan->userd.base = ouserd; - - return (userd->chid * CHID_PER_USERD) + chid; -} - -static int -r535_chan_id_get(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd) -{ - struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; - int ret; - - mutex_lock(&fifo->userd.mutex); - ret = r535_chan_id_get_locked(chan, muserd, ouserd); - mutex_unlock(&fifo->userd.mutex); - return ret; -} - static const struct nvkm_chan_func r535_chan = { - .id_get = r535_chan_id_get, - .id_put = r535_chan_id_put, .inst = &gf100_chan_inst, .userd = &gv100_chan_userd, .ramfc = &r535_chan_ramfc, @@ -340,10 +236,6 @@ r535_chan = { .doorbell_handle = r535_chan_doorbell_handle, }; -static const struct nvkm_cgrp_func -r535_cgrp = { -}; - static int r535_engn_nonstall(struct nvkm_engn *engn) { @@ -356,7 +248,7 @@ r535_engn_nonstall(struct nvkm_engn *engn) } static const struct nvkm_engn_func -r535_ce = { +r535_engn_ce = { .nonstall = r535_engn_nonstall, }; @@ -376,7 +268,7 @@ r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *c } static const struct nvkm_engn_func -r535_gr = { +r535_engn_gr = { .nonstall = r535_engn_nonstall, .ctor2 = r535_gr_ctor, }; @@ -449,57 +341,86 @@ r535_runl = { .allow = r535_runl_allow, }; -static int -r535_fifo_2080_type(enum nvkm_subdev_type type, int inst) +void +r535_fifo_rc_chid(struct nvkm_fifo *fifo, int chid) { - switch (type) { - case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0; - case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst; - case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2; - case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst; - case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst; - case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst; - case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA; - case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW; - default: - break; + struct nvkm_chan *chan; + unsigned long flags; + + chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags); + if (!chan) { + nvkm_error(&fifo->engine.subdev, "rc: chid %d not found!\n", chid); + return; } - WARN_ON(1); - return -EINVAL; + nvkm_chan_error(chan, false); + nvkm_chan_put(&chan, flags); } static int -r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype) +r535_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) { + rpc_rc_triggered_v17_02 *msg = repv; + struct nvkm_gsp *gsp = priv; + + if (WARN_ON(repc < sizeof(*msg))) + return -EINVAL; + + nvkm_error(&gsp->subdev, "rc: engn:%08x chid:%d type:%d scope:%d part:%d\n", + msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope, + msg->partitionAttributionId); + + r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid); + return 0; +} + +static int +r535_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080) +{ +#define RM_ENGINE_TYPE(RM,NVKM,INST) \ + RM_ENGINE_TYPE_##RM: \ + *ptype = NVKM_ENGINE_##NVKM; \ + *p2080 = NV2080_ENGINE_TYPE_##RM; \ + return INST + switch (rm) { - case RM_ENGINE_TYPE_GR0: - *ptype = NVKM_ENGINE_GR; - return 0; - case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9: - *ptype = NVKM_ENGINE_CE; - return rm - RM_ENGINE_TYPE_COPY0; - case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7: - *ptype = NVKM_ENGINE_NVDEC; - return rm - RM_ENGINE_TYPE_NVDEC0; - case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2: - *ptype = NVKM_ENGINE_NVENC; - return rm - RM_ENGINE_TYPE_NVENC0; - case RM_ENGINE_TYPE_SW: - *ptype = NVKM_ENGINE_SW; - return 0; - case RM_ENGINE_TYPE_SEC2: - *ptype = NVKM_ENGINE_SEC2; - return 0; - case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7: - *ptype = NVKM_ENGINE_NVJPG; - return rm - RM_ENGINE_TYPE_NVJPEG0; - case RM_ENGINE_TYPE_OFA: - *ptype = NVKM_ENGINE_OFA; - return 0; + case RM_ENGINE_TYPE( GR0, GR, 0); + case RM_ENGINE_TYPE( COPY0, CE, 0); + case RM_ENGINE_TYPE( COPY1, CE, 1); + case RM_ENGINE_TYPE( COPY2, CE, 2); + case RM_ENGINE_TYPE( COPY3, CE, 3); + case RM_ENGINE_TYPE( COPY4, CE, 4); + case RM_ENGINE_TYPE( COPY5, CE, 5); + case RM_ENGINE_TYPE( COPY6, CE, 6); + case RM_ENGINE_TYPE( COPY7, CE, 7); + case RM_ENGINE_TYPE( COPY8, CE, 8); + case RM_ENGINE_TYPE( COPY9, CE, 9); + case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0); + case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1); + case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2); + case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3); + case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4); + case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5); + case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6); + case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7); + case RM_ENGINE_TYPE( NVENC0, NVENC, 0); + case RM_ENGINE_TYPE( NVENC1, NVENC, 1); + case RM_ENGINE_TYPE( NVENC2, NVENC, 2); + case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0); + case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1); + case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2); + case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3); + case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4); + case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5); + case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6); + case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7); + case RM_ENGINE_TYPE( SW, SW, 0); + case RM_ENGINE_TYPE( SEC2, SEC2, 0); + case RM_ENGINE_TYPE( OFA, OFA, 0); default: return -EINVAL; } +#undef RM_ENGINE_TYPE } static int @@ -536,16 +457,19 @@ static int r535_fifo_runl_ctor(struct nvkm_fifo *fifo) { struct nvkm_subdev *subdev = &fifo->engine.subdev; - struct nvkm_gsp *gsp = subdev->device->gsp; + struct nvkm_device *device = subdev->device; + struct nvkm_gsp *gsp = device->gsp; + struct nvkm_rm *rm = gsp->rm; struct nvkm_runl *runl; struct nvkm_engn *engn; - u32 cgids = 2048; u32 chids = 2048; + u32 first = rm->api->fifo->rsvd_chids; + u32 count = chids - first; int ret; NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl; - if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) || - (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid))) + if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->cgid)) || + (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->chid))) return ret; ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, @@ -576,25 +500,43 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo) if (!runl) continue; - inst = r535_fifo_engn_type(rmid, &type); + inst = rm->api->fifo->xlat_rm_engine_type(rmid, &type, &nv2080); if (inst < 0) { nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid); nvkm_runl_del(runl); continue; } - nv2080 = r535_fifo_2080_type(type, inst); - if (nv2080 < 0) { + /* Skip SW engine - there's currently no support for NV SW classes. */ + if (type == NVKM_ENGINE_SW) + continue; + + /* Skip lone GRCEs (ones not paired with GR on a runlist), as they + * don't appear to function as async copy engines. + */ + if (type == NVKM_ENGINE_CE && + rm->gpu->ce.grce_mask && + (rm->gpu->ce.grce_mask(device) & BIT(inst)) && + !nvkm_runl_find_engn(engn, runl, engn->engine->subdev.type == NVKM_ENGINE_GR)) { + RUNL_DEBUG(runl, "skip LCE %d - GRCE without GR", inst); + nvkm_runl_del(runl); + continue; + } + + ret = nvkm_rm_engine_new(gsp->rm, type, inst); + if (ret) { nvkm_runl_del(runl); continue; } + engn = NULL; + switch (type) { case NVKM_ENGINE_CE: - engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst); + engn = nvkm_runl_add(runl, nv2080, &r535_engn_ce, type, inst); break; case NVKM_ENGINE_GR: - engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst); + engn = nvkm_runl_add(runl, nv2080, &r535_engn_gr, type, inst); break; case NVKM_ENGINE_NVDEC: case NVKM_ENGINE_NVENC: @@ -633,7 +575,7 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo) nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); } - return r535_fifo_ectx_size(fifo); + return rm->api->fifo->ectx_size(fifo); } static void @@ -646,6 +588,7 @@ int r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo) { + const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu; struct nvkm_fifo_func *rm; if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) @@ -654,12 +597,20 @@ r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device, rm->dtor = r535_fifo_dtor; rm->runl_ctor = r535_fifo_runl_ctor; rm->runl = &r535_runl; - rm->cgrp = hw->cgrp; - rm->cgrp.func = &r535_cgrp; - rm->chan = hw->chan; + rm->chan.user.oclass = gpu->fifo.chan.class; rm->chan.func = &r535_chan; rm->nonstall = &ga100_fifo_nonstall; rm->nonstall_ctor = ga100_fifo_nonstall_ctor; return nvkm_fifo_new_(rm, device, type, inst, pfifo); } + +const struct nvkm_rm_api_fifo +r535_fifo = { + .xlat_rm_engine_type = r535_fifo_xlat_rm_engine_type, + .ectx_size = r535_fifo_ectx_size, + .rc_triggered = r535_fifo_rc_triggered, + .chan = { + .alloc = r535_chan_alloc, + }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c new file mode 100644 index 000000000000..ddb57d5e73d6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c @@ -0,0 +1,356 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <rm/gr.h> + +#include <core/memory.h> +#include <subdev/gsp.h> +#include <subdev/mmu/vmm.h> +#include <engine/fifo/priv.h> +#include <engine/gr/priv.h> + +#include <nvif/if900d.h> + +#include <nvhw/drf.h> + +#include "nvrm/gr.h" +#include "nvrm/vmm.h" + +#define r535_gr(p) container_of((p), struct r535_gr, base) + +static void * +r535_gr_chan_dtor(struct nvkm_object *object) +{ + struct r535_gr_chan *grc = container_of(object, typeof(*grc), object); + struct r535_gr *gr = grc->gr; + + for (int i = 0; i < gr->ctxbuf_nr; i++) { + nvkm_vmm_put(grc->vmm, &grc->vma[i]); + nvkm_memory_unref(&grc->mem[i]); + } + + nvkm_vmm_unref(&grc->vmm); + return grc; +} + +static const struct nvkm_object_func +r535_gr_chan = { + .dtor = r535_gr_chan_dtor, +}; + +int +r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm, + struct nvkm_memory **pmem, struct nvkm_vma **pvma, + struct nvkm_gsp_object *chan) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_device *device = subdev->device; + NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice, + NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return PTR_ERR(ctrl); + + ctrl->engineType = 1; + ctrl->hChanClient = vmm->rm.client.object.handle; + ctrl->hObject = chan->handle; + + for (int i = 0; i < gr->ctxbuf_nr; i++) { + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry = + &ctrl->promoteEntry[ctrl->entryCount]; + const bool alloc = golden || !gr->ctxbuf[i].global; + int ret; + + entry->bufferId = gr->ctxbuf[i].bufferId; + entry->bInitialize = gr->ctxbuf[i].init && alloc; + + if (alloc) { + ret = nvkm_memory_new(device, gr->ctxbuf[i].init ? + NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST, + gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page, + gr->ctxbuf[i].init, &pmem[i]); + if (WARN_ON(ret)) + return ret; + + if (gr->ctxbuf[i].bufferId == + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) + entry->bNonmapped = 1; + } else { + if (gr->ctxbuf[i].bufferId == + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP) + continue; + + pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]); + } + + if (!entry->bNonmapped) { + struct gf100_vmm_map_v0 args = { + .priv = 1, + .ro = gr->ctxbuf[i].ro, + }; + + mutex_lock(&vmm->mutex.vmm); + ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align, + nvkm_memory_size(pmem[i]), &pvma[i]); + mutex_unlock(&vmm->mutex.vmm); + if (ret) + return ret; + + ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args)); + if (ret) + return ret; + + entry->gpuVirtAddr = pvma[i]->addr; + } + + if (entry->bInitialize) { + entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]); + entry->size = gr->ctxbuf[i].size; + entry->physAttr = 4; + } + + nvkm_debug(subdev, + "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n", + entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size, + entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped); + + ctrl->entryCount++; + } + + return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl); +} + +int +r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass, + struct nvkm_object **pobject) +{ + struct r535_gr *gr = r535_gr(base); + struct r535_gr_chan *grc; + int ret; + + if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object); + grc->gr = gr; + grc->vmm = nvkm_vmm_ref(chan->vmm); + grc->chan = chan; + *pobject = &grc->object; + + ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object); + if (ret) + return ret; + + return 0; +} + +u64 +r535_gr_units(struct nvkm_gr *gr) +{ + struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp; + + return (gsp->gr.tpcs << 8) | gsp->gr.gpcs; +} + +void +r535_gr_get_ctxbuf_info(struct r535_gr *gr, int i, + struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *info) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + static const struct { + u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */ + u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */ + bool global; + bool init; + bool ro; + } map[] = { +#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \ + .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \ + .global = (G), .init = (I), .ro = (R) } +#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R)) + /* global init ro */ + _A( GRAPHICS, MAIN, false, true, false), + _B( PATCH, false, true, false), + _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false), + _B( PAGEPOOL, true, false, false), + _B( ATTRIBUTE_CB, true, false, false), + _B( RTV_CB_GLOBAL, true, false, false), + _B( FECS_EVENT, true, true, false), + _B( PRIV_ACCESS_MAP, true, true, true), +#undef _B +#undef _A + }; + u32 size = info->size; + u8 align, page; + int id; + + for (id = 0; id < ARRAY_SIZE(map); id++) { + if (map[id].id0 == i) + break; + } + + nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i, + size, (id < ARRAY_SIZE(map)) ? "*" : ""); + if (id >= ARRAY_SIZE(map)) + return; + + if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN) + size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */ + + if (size >= 1 << 21) page = 21; + else if (size >= 1 << 16) page = 16; + else page = 12; + + if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB) + align = order_base_2(size); + else + align = page; + + if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) + return; + + gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1; + gr->ctxbuf[gr->ctxbuf_nr].size = size; + gr->ctxbuf[gr->ctxbuf_nr].page = page; + gr->ctxbuf[gr->ctxbuf_nr].align = align; + gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global; + gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init; + gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro; + gr->ctxbuf_nr++; + + if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) { + if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) + return; + + gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1]; + gr->ctxbuf[gr->ctxbuf_nr].bufferId = + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP; + gr->ctxbuf_nr++; + } +} + +static int +r535_gr_get_ctxbufs_info(struct r535_gr *gr) +{ + NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info; + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_gsp *gsp = subdev->device->gsp; + + info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO, + sizeof(*info)); + if (WARN_ON(IS_ERR(info))) + return PTR_ERR(info); + + for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) + r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]); + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info); + return 0; +} + +int +r535_gr_oneinit(struct nvkm_gr *base) +{ + struct r535_gr *gr = container_of(base, typeof(*gr), base); + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_gsp *gsp = device->gsp; + struct nvkm_rm *rm = gsp->rm; + struct { + struct nvkm_memory *inst; + struct nvkm_vmm *vmm; + struct nvkm_gsp_object chan; + struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; + } golden = {}; + struct nvkm_gsp_object threed; + int ret; + + /* Allocate a channel to use for golden context init. */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst); + if (ret) + goto done; + + ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm); + if (ret) + goto done; + + ret = r535_mmu_vaspace_new(golden.vmm, NVKM_RM_VASPACE, false); + if (ret) + goto done; + + ret = rm->api->fifo->chan.alloc(&golden.vmm->rm.device, NVKM_RM_CHAN(0), + 1, 0, true, rm->api->fifo->rsvd_chids, + nvkm_memory_addr(golden.inst), + nvkm_memory_addr(golden.inst) + 0x1000, + nvkm_memory_addr(golden.inst) + 0x2000, + golden.vmm, 0, 0x1000, &golden.chan); + if (ret) + goto done; + + /* Fetch context buffer info from RM and allocate each of them here to use + * during golden context init (or later as a global context buffer). + * + * Also build the information that'll be used to create channel contexts. + */ + ret = rm->api->gr->get_ctxbufs_info(gr); + if (ret) + goto done; + + /* Promote golden context to RM. */ + ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan); + if (ret) + goto done; + + /* Allocate 3D class on channel to trigger golden context init in RM. */ + ret = nvkm_gsp_rm_alloc(&golden.chan, NVKM_RM_THREED, rm->gpu->gr.class.threed, 0, &threed); + if (ret) + goto done; + + /* There's no need to keep the golden channel around, as RM caches the context. */ + nvkm_gsp_rm_free(&threed); +done: + nvkm_gsp_rm_free(&golden.chan); + for (int i = gr->ctxbuf_nr - 1; i >= 0; i--) + nvkm_vmm_put(golden.vmm, &golden.vma[i]); + nvkm_vmm_unref(&golden.vmm); + nvkm_memory_unref(&golden.inst); + return ret; + +} + +void * +r535_gr_dtor(struct nvkm_gr *base) +{ + struct r535_gr *gr = r535_gr(base); + + while (gr->ctxbuf_nr) + nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]); + + kfree(gr->base.func); + return gr; +} + +const struct nvkm_rm_api_gr +r535_gr = { + .get_ctxbufs_info = r535_gr_get_ctxbufs_info, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index db2602e88006..588cb4ab85cb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -19,9 +19,12 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ +#include <rm/rpc.h> + #include "priv.h" #include <core/pci.h> +#include <subdev/pci/priv.h> #include <subdev/timer.h> #include <subdev/vfn.h> #include <engine/fifo/chan.h> @@ -30,29 +33,11 @@ #include <nvfw/fw.h> -#include <nvrm/nvtypes.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> -#include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h> -#include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h> -#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h> -#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h> -#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h> -#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h> -#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h> -#include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h> -#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h> -#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h> -#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h> -#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h> -#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h> -#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h> +#include "nvrm/gsp.h" +#include "nvrm/rpcfn.h" +#include "nvrm/msgfn.h" +#include "nvrm/event.h" +#include "nvrm/fifo.h" #include <linux/acpi.h> #include <linux/ctype.h> @@ -60,990 +45,6 @@ extern struct dentry *nouveau_debugfs_root; -#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE -#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16) - -/** - * DOC: GSP message queue element - * - * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h - * - * The GSP command queue and status queue are message queues for the - * communication between software and GSP. The software submits the GSP - * RPC via the GSP command queue, GSP writes the status of the submitted - * RPC in the status queue. - * - * A GSP message queue element consists of three parts: - * - * - message element header (struct r535_gsp_msg), which mostly maintains - * the metadata for queuing the element. - * - * - RPC message header (struct nvfw_gsp_rpc), which maintains the info - * of the RPC. E.g., the RPC function number. - * - * - The payload, where the RPC message stays. E.g. the params of a - * specific RPC function. Some RPC functions also have their headers - * in the payload. E.g. rm_alloc, rm_control. - * - * The memory layout of a GSP message element can be illustrated below:: - * - * +------------------------+ - * | Message Element Header | - * | (r535_gsp_msg) | - * | | - * | (r535_gsp_msg.data) | - * | | | - * |----------V-------------| - * | GSP RPC Header | - * | (nvfw_gsp_rpc) | - * | | - * | (nvfw_gsp_rpc.data) | - * | | | - * |----------V-------------| - * | Payload | - * | | - * | header(optional) | - * | params | - * +------------------------+ - * - * The max size of a message queue element is 16 pages (including the - * headers). When a GSP message to be sent is larger than 16 pages, the - * message should be split into multiple elements and sent accordingly. - * - * In the bunch of the split elements, the first element has the expected - * function number, while the rest of the elements are sent with the - * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD. - * - * GSP consumes the elements from the cmdq and always writes the result - * back to the msgq. The result is also formed as split elements. - * - * Terminology: - * - * - gsp_msg(msg): GSP message element (element header + GSP RPC header + - * payload) - * - gsp_rpc(rpc): GSP RPC (RPC header + payload) - * - gsp_rpc_buf: buffer for (GSP RPC header + payload) - * - gsp_rpc_len: size of (GSP RPC header + payload) - * - params_size: size of params in the payload - * - payload_size: size of (header if exists + params) in the payload - */ - -struct r535_gsp_msg { - u8 auth_tag_buffer[16]; - u8 aad_buffer[16]; - u32 checksum; - u32 sequence; - u32 elem_count; - u32 pad; - u8 data[]; -}; - -struct nvfw_gsp_rpc { - u32 header_version; - u32 signature; - u32 length; - u32 function; - u32 rpc_result; - u32 rpc_result_private; - u32 sequence; - union { - u32 spare; - u32 cpuRmGfid; - }; - u8 data[]; -}; - -#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data) - -#define to_gsp_hdr(p, header) \ - container_of((void *)p, typeof(*header), data) - -#define to_payload_hdr(p, header) \ - container_of((void *)p, typeof(*header), params) - -static int -r535_rpc_status_to_errno(uint32_t rpc_status) -{ - switch (rpc_status) { - case 0x55: /* NV_ERR_NOT_READY */ - case 0x66: /* NV_ERR_TIMEOUT_RETRY */ - return -EBUSY; - case 0x51: /* NV_ERR_NO_MEMORY */ - return -ENOMEM; - default: - return -EINVAL; - } -} - -static int -r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime) -{ - u32 size, rptr = *gsp->msgq.rptr; - int used; - - size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len, - GSP_PAGE_SIZE); - if (WARN_ON(!size || size >= gsp->msgq.cnt)) - return -EINVAL; - - do { - u32 wptr = *gsp->msgq.wptr; - - used = wptr + gsp->msgq.cnt - rptr; - if (used >= gsp->msgq.cnt) - used -= gsp->msgq.cnt; - if (used >= size) - break; - - usleep_range(1, 2); - } while (--(*ptime)); - - if (WARN_ON(!*ptime)) - return -ETIMEDOUT; - - return used; -} - -static struct r535_gsp_msg * -r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp) -{ - u32 rptr = *gsp->msgq.rptr; - - /* Skip the first page, which is the message queue info */ - return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE + - rptr * GSP_PAGE_SIZE); -} - -/** - * DOC: Receive a GSP message queue element - * - * Receiving a GSP message queue element from the message queue consists of - * the following steps: - * - * - Peek the element from the queue: r535_gsp_msgq_peek(). - * Peek the first page of the element to determine the total size of the - * message before allocating the proper memory. - * - * - Allocate memory for the message. - * Once the total size of the message is determined from the GSP message - * queue element, the caller of r535_gsp_msgq_recv() allocates the - * required memory. - * - * - Receive the message: r535_gsp_msgq_recv(). - * Copy the message into the allocated memory. Advance the read pointer. - * If the message is a large GSP message, r535_gsp_msgq_recv() calls - * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts - * until the complete message is received. - * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into - * the return of the large GSP message. - * - * - Free the allocated memory: r535_gsp_msg_done(). - * The user is responsible for freeing the memory allocated for the GSP - * message pages after they have been processed. - */ -static void * -r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) -{ - struct r535_gsp_msg *mqe; - int ret; - - ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries); - if (ret < 0) - return ERR_PTR(ret); - - mqe = r535_gsp_msgq_get_entry(gsp); - - return mqe->data; -} - -struct r535_gsp_msg_info { - int *retries; - u32 gsp_rpc_len; - void *gsp_rpc_buf; - bool continuation; -}; - -static void -r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl); - -static void * -r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp, - struct r535_gsp_msg_info *info) -{ - u8 *buf = info->gsp_rpc_buf; - u32 rptr = *gsp->msgq.rptr; - struct r535_gsp_msg *mqe; - u32 size, expected, len; - int ret; - - expected = info->gsp_rpc_len; - - ret = r535_gsp_msgq_wait(gsp, expected, info->retries); - if (ret < 0) - return ERR_PTR(ret); - - mqe = r535_gsp_msgq_get_entry(gsp); - - if (info->continuation) { - struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data; - - if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) { - nvkm_error(&gsp->subdev, - "Not a continuation of a large RPC\n"); - r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR); - return ERR_PTR(-EIO); - } - } - - size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE); - - len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe); - len = min_t(u32, expected, len); - - if (info->continuation) - memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc), - len - sizeof(struct nvfw_gsp_rpc)); - else - memcpy(buf, mqe->data, len); - - expected -= len; - - if (expected) { - mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000); - memcpy(buf + len, mqe, expected); - } - - rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt; - - mb(); - (*gsp->msgq.rptr) = rptr; - return buf; -} - -static void * -r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) -{ - struct r535_gsp_msg *mqe; - const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe); - struct nvfw_gsp_rpc *rpc; - struct r535_gsp_msg_info info = {0}; - u32 expected = gsp_rpc_len; - void *buf; - - mqe = r535_gsp_msgq_get_entry(gsp); - rpc = (struct nvfw_gsp_rpc *)mqe->data; - - if (WARN_ON(rpc->length > max_rpc_size)) - return NULL; - - buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL); - if (!buf) - return ERR_PTR(-ENOMEM); - - info.gsp_rpc_buf = buf; - info.retries = retries; - info.gsp_rpc_len = rpc->length; - - buf = r535_gsp_msgq_recv_one_elem(gsp, &info); - if (IS_ERR(buf)) { - kvfree(info.gsp_rpc_buf); - info.gsp_rpc_buf = NULL; - return buf; - } - - if (expected <= max_rpc_size) - return buf; - - info.gsp_rpc_buf += info.gsp_rpc_len; - expected -= info.gsp_rpc_len; - - while (expected) { - u32 size; - - rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries); - if (IS_ERR_OR_NULL(rpc)) { - kfree(buf); - return rpc; - } - - info.gsp_rpc_len = rpc->length; - info.continuation = true; - - rpc = r535_gsp_msgq_recv_one_elem(gsp, &info); - if (IS_ERR_OR_NULL(rpc)) { - kfree(buf); - return rpc; - } - - size = info.gsp_rpc_len - sizeof(*rpc); - expected -= size; - info.gsp_rpc_buf += size; - } - - rpc = buf; - rpc->length = gsp_rpc_len; - return buf; -} - -static int -r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc) -{ - struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg); - struct r535_gsp_msg *cqe; - u32 gsp_rpc_len = msg->checksum; - u64 *ptr = (void *)msg; - u64 *end; - u64 csum = 0; - int free, time = 1000000; - u32 wptr, size, step, len; - u32 off = 0; - - len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE); - - end = (u64 *)((char *)ptr + len); - msg->pad = 0; - msg->checksum = 0; - msg->sequence = gsp->cmdq.seq++; - msg->elem_count = DIV_ROUND_UP(len, 0x1000); - - while (ptr < end) - csum ^= *ptr++; - - msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum); - - wptr = *gsp->cmdq.wptr; - do { - do { - free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1; - if (free >= gsp->cmdq.cnt) - free -= gsp->cmdq.cnt; - if (free >= 1) - break; - - usleep_range(1, 2); - } while(--time); - - if (WARN_ON(!time)) { - kvfree(msg); - return -ETIMEDOUT; - } - - cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000); - step = min_t(u32, free, (gsp->cmdq.cnt - wptr)); - size = min_t(u32, len, step * GSP_PAGE_SIZE); - - memcpy(cqe, (u8 *)msg + off, size); - - wptr += DIV_ROUND_UP(size, 0x1000); - if (wptr == gsp->cmdq.cnt) - wptr = 0; - - off += size; - len -= size; - } while (len); - - nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr); - wmb(); - (*gsp->cmdq.wptr) = wptr; - mb(); - - nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000); - - kvfree(msg); - return 0; -} - -static void * -r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len) -{ - struct r535_gsp_msg *msg; - u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len; - - size = ALIGN(size, GSP_MSG_MIN_SIZE); - msg = kvzalloc(size, GFP_KERNEL); - if (!msg) - return ERR_PTR(-ENOMEM); - - msg->checksum = gsp_rpc_len; - return msg->data; -} - -static void -r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg) -{ - kvfree(msg); -} - -static void -r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl) -{ - if (gsp->subdev.debug >= lvl) { - nvkm_printk__(&gsp->subdev, lvl, info, - "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n", - msg->function, msg->length, msg->length - sizeof(*msg), - msg->rpc_result, msg->rpc_result_private); - print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1, - msg->data, msg->length - sizeof(*msg), true); - } -} - -static struct nvfw_gsp_rpc * -r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len) -{ - struct nvkm_subdev *subdev = &gsp->subdev; - struct nvfw_gsp_rpc *rpc; - int retries = 4000000, i; - -retry: - rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries); - if (IS_ERR_OR_NULL(rpc)) - return rpc; - - rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries); - if (IS_ERR_OR_NULL(rpc)) - return rpc; - - if (rpc->rpc_result) { - r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR); - r535_gsp_msg_done(gsp, rpc); - return ERR_PTR(-EINVAL); - } - - r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE); - - if (fn && rpc->function == fn) { - if (gsp_rpc_len) { - if (rpc->length < gsp_rpc_len) { - nvkm_error(subdev, "rpc len %d < %d\n", - rpc->length, gsp_rpc_len); - r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR); - r535_gsp_msg_done(gsp, rpc); - return ERR_PTR(-EIO); - } - - return rpc; - } - - r535_gsp_msg_done(gsp, rpc); - return NULL; - } - - for (i = 0; i < gsp->msgq.ntfy_nr; i++) { - struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i]; - - if (ntfy->fn == rpc->function) { - if (ntfy->func) - ntfy->func(ntfy->priv, ntfy->fn, rpc->data, - rpc->length - sizeof(*rpc)); - break; - } - } - - if (i == gsp->msgq.ntfy_nr) - r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN); - - r535_gsp_msg_done(gsp, rpc); - if (fn) - goto retry; - - if (*gsp->msgq.rptr != *gsp->msgq.wptr) - goto retry; - - return NULL; -} - -static int -r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv) -{ - int ret = 0; - - mutex_lock(&gsp->msgq.mutex); - if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) { - ret = -ENOSPC; - } else { - gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn; - gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func; - gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv; - gsp->msgq.ntfy_nr++; - } - mutex_unlock(&gsp->msgq.mutex); - return ret; -} - -static int -r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn) -{ - void *repv; - - mutex_lock(&gsp->cmdq.mutex); - repv = r535_gsp_msg_recv(gsp, fn, 0); - mutex_unlock(&gsp->cmdq.mutex); - if (IS_ERR(repv)) - return PTR_ERR(repv); - - return 0; -} - -static void * -r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait, - u32 gsp_rpc_len) -{ - struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc); - struct nvfw_gsp_rpc *msg; - u32 fn = rpc->function; - void *repv = NULL; - int ret; - - if (gsp->subdev.debug >= NV_DBG_TRACE) { - nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function, - rpc->length, rpc->length - sizeof(*rpc)); - print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1, - rpc->data, rpc->length - sizeof(*rpc), true); - } - - ret = r535_gsp_cmdq_push(gsp, rpc); - if (ret) - return ERR_PTR(ret); - - if (wait) { - msg = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len); - if (!IS_ERR_OR_NULL(msg)) - repv = msg->data; - else - repv = msg; - } - - return repv; -} - -static void -r535_gsp_event_dtor(struct nvkm_gsp_event *event) -{ - struct nvkm_gsp_device *device = event->device; - struct nvkm_gsp_client *client = device->object.client; - struct nvkm_gsp *gsp = client->gsp; - - mutex_lock(&gsp->client_id.mutex); - if (event->func) { - list_del(&event->head); - event->func = NULL; - } - mutex_unlock(&gsp->client_id.mutex); - - nvkm_gsp_rm_free(&event->object); - event->device = NULL; -} - -static int -r535_gsp_device_event_get(struct nvkm_gsp_event *event) -{ - struct nvkm_gsp_device *device = event->device; - NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice, - NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->event = event->id; - ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; - return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl); -} - -static int -r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id, - nvkm_gsp_event_func func, struct nvkm_gsp_event *event) -{ - struct nvkm_gsp_client *client = device->object.client; - struct nvkm_gsp *gsp = client->gsp; - NV0005_ALLOC_PARAMETERS *args; - int ret; - - args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle, - NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args), - &event->object); - if (IS_ERR(args)) - return PTR_ERR(args); - - args->hParentClient = client->object.handle; - args->hSrcResource = 0; - args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX; - args->notifyIndex = NV01_EVENT_CLIENT_RM | id; - args->data = NULL; - - ret = nvkm_gsp_rm_alloc_wr(&event->object, args); - if (ret) - return ret; - - event->device = device; - event->id = id; - - ret = r535_gsp_device_event_get(event); - if (ret) { - nvkm_gsp_event_dtor(event); - return ret; - } - - mutex_lock(&gsp->client_id.mutex); - event->func = func; - list_add(&event->head, &client->events); - mutex_unlock(&gsp->client_id.mutex); - return 0; -} - -static void -r535_gsp_device_dtor(struct nvkm_gsp_device *device) -{ - nvkm_gsp_rm_free(&device->subdevice); - nvkm_gsp_rm_free(&device->object); -} - -static int -r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device) -{ - NV2080_ALLOC_PARAMETERS *args; - - return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args), - &device->subdevice); -} - -static int -r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device) -{ - NV0080_ALLOC_PARAMETERS *args; - int ret; - - args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args), - &device->object); - if (IS_ERR(args)) - return PTR_ERR(args); - - args->hClientShare = client->object.handle; - - ret = nvkm_gsp_rm_alloc_wr(&device->object, args); - if (ret) - return ret; - - ret = r535_gsp_subdevice_ctor(device); - if (ret) - nvkm_gsp_rm_free(&device->object); - - return ret; -} - -static void -r535_gsp_client_dtor(struct nvkm_gsp_client *client) -{ - struct nvkm_gsp *gsp = client->gsp; - - nvkm_gsp_rm_free(&client->object); - - mutex_lock(&gsp->client_id.mutex); - idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff); - mutex_unlock(&gsp->client_id.mutex); - - client->gsp = NULL; -} - -static int -r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) -{ - NV0000_ALLOC_PARAMETERS *args; - int ret; - - mutex_lock(&gsp->client_id.mutex); - ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL); - mutex_unlock(&gsp->client_id.mutex); - if (ret < 0) - return ret; - - client->gsp = gsp; - client->object.client = client; - INIT_LIST_HEAD(&client->events); - - args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args), - &client->object); - if (IS_ERR(args)) { - r535_gsp_client_dtor(client); - return ret; - } - - args->hClient = client->object.handle; - args->processID = ~0; - - ret = nvkm_gsp_rm_alloc_wr(&client->object, args); - if (ret) { - r535_gsp_client_dtor(client); - return ret; - } - - return 0; -} - -static int -r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object) -{ - struct nvkm_gsp_client *client = object->client; - struct nvkm_gsp *gsp = client->gsp; - rpc_free_v03_00 *rpc; - - nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n", - client->object.handle, object->handle); - - rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc)); - if (WARN_ON(IS_ERR_OR_NULL(rpc))) - return -EIO; - - rpc->params.hRoot = client->object.handle; - rpc->params.hObjectParent = 0; - rpc->params.hObjectOld = object->handle; - return nvkm_gsp_rpc_wr(gsp, rpc, true); -} - -static void -r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params) -{ - rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc); - - nvkm_gsp_rpc_done(object->client->gsp, rpc); -} - -static void * -r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params) -{ - rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc); - struct nvkm_gsp *gsp = object->client->gsp; - void *ret = NULL; - - rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc)); - if (IS_ERR_OR_NULL(rpc)) - return rpc; - - if (rpc->status) { - ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status)); - if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY) - nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); - } - - nvkm_gsp_rpc_done(gsp, rpc); - - return ret; -} - -static void * -r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, - u32 params_size) -{ - struct nvkm_gsp_client *client = object->client; - struct nvkm_gsp *gsp = client->gsp; - rpc_gsp_rm_alloc_v03_00 *rpc; - - nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n", - client->object.handle, object->parent->handle, - object->handle); - - nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass, - params_size); - - rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, - sizeof(*rpc) + params_size); - if (IS_ERR(rpc)) - return rpc; - - rpc->hClient = client->object.handle; - rpc->hParent = object->parent->handle; - rpc->hObject = object->handle; - rpc->hClass = oclass; - rpc->status = 0; - rpc->paramsSize = params_size; - return rpc->params; -} - -static void -r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params) -{ - rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc); - - if (!params) - return; - nvkm_gsp_rpc_done(object->client->gsp, rpc); -} - -static int -r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc) -{ - rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc); - struct nvkm_gsp *gsp = object->client->gsp; - int ret = 0; - - rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc); - if (IS_ERR_OR_NULL(rpc)) { - *params = NULL; - return PTR_ERR(rpc); - } - - if (rpc->status) { - ret = r535_rpc_status_to_errno(rpc->status); - if (ret != -EAGAIN && ret != -EBUSY) - nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", - object->client->object.handle, object->handle, rpc->cmd, rpc->status); - } - - if (repc) - *params = rpc->params; - else - nvkm_gsp_rpc_done(gsp, rpc); - - return ret; -} - -static void * -r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size) -{ - struct nvkm_gsp_client *client = object->client; - struct nvkm_gsp *gsp = client->gsp; - rpc_gsp_rm_control_v03_00 *rpc; - - nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n", - client->object.handle, object->handle, cmd, params_size); - - rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, - sizeof(*rpc) + params_size); - if (IS_ERR(rpc)) - return rpc; - - rpc->hClient = client->object.handle; - rpc->hObject = object->handle; - rpc->cmd = cmd; - rpc->status = 0; - rpc->paramsSize = params_size; - return rpc->params; -} - -static void -r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv) -{ - struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data); - - r535_gsp_msg_done(gsp, rpc); -} - -static void * -r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size) -{ - struct nvfw_gsp_rpc *rpc; - - rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size, - sizeof(u64))); - if (IS_ERR(rpc)) - return ERR_CAST(rpc); - - rpc->header_version = 0x03000000; - rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V'; - rpc->function = fn; - rpc->rpc_result = 0xffffffff; - rpc->rpc_result_private = 0xffffffff; - rpc->length = sizeof(*rpc) + payload_size; - return rpc->data; -} - -static void * -r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait, - u32 gsp_rpc_len) -{ - struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc); - struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg); - const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg); - const u32 max_payload_size = max_rpc_size - sizeof(*rpc); - u32 payload_size = rpc->length - sizeof(*rpc); - void *repv; - - mutex_lock(&gsp->cmdq.mutex); - if (payload_size > max_payload_size) { - const u32 fn = rpc->function; - u32 remain_payload_size = payload_size; - - /* Adjust length, and send initial RPC. */ - rpc->length = sizeof(*rpc) + max_payload_size; - msg->checksum = rpc->length; - - repv = r535_gsp_rpc_send(gsp, payload, false, 0); - if (IS_ERR(repv)) - goto done; - - payload += max_payload_size; - remain_payload_size -= max_payload_size; - - /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */ - while (remain_payload_size) { - u32 size = min(remain_payload_size, - max_payload_size); - void *next; - - next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size); - if (IS_ERR(next)) { - repv = next; - goto done; - } - - memcpy(next, payload, size); - - repv = r535_gsp_rpc_send(gsp, next, false, 0); - if (IS_ERR(repv)) - goto done; - - payload += size; - remain_payload_size -= size; - } - - /* Wait for reply. */ - rpc = r535_gsp_msg_recv(gsp, fn, payload_size + - sizeof(*rpc)); - if (!IS_ERR_OR_NULL(rpc)) { - if (wait) { - repv = rpc->data; - } else { - nvkm_gsp_rpc_done(gsp, rpc); - repv = NULL; - } - } else { - repv = wait ? rpc : NULL; - } - } else { - repv = r535_gsp_rpc_send(gsp, payload, wait, gsp_rpc_len); - } - -done: - mutex_unlock(&gsp->cmdq.mutex); - return repv; -} - -const struct nvkm_gsp_rm -r535_gsp_rm = { - .rpc_get = r535_gsp_rpc_get, - .rpc_push = r535_gsp_rpc_push, - .rpc_done = r535_gsp_rpc_done, - - .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get, - .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push, - .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done, - - .rm_alloc_get = r535_gsp_rpc_rm_alloc_get, - .rm_alloc_push = r535_gsp_rpc_rm_alloc_push, - .rm_alloc_done = r535_gsp_rpc_rm_alloc_done, - - .rm_free = r535_gsp_rpc_rm_free, - - .client_ctor = r535_gsp_client_ctor, - .client_dtor = r535_gsp_client_dtor, - - .device_ctor = r535_gsp_device_ctor, - .device_dtor = r535_gsp_device_dtor, - - .event_ctor = r535_gsp_device_event_ctor, - .event_dtor = r535_gsp_event_dtor, -}; - static void r535_gsp_msgq_work(struct work_struct *work) { @@ -1086,10 +87,52 @@ r535_gsp_intr(struct nvkm_inth *inth) return IRQ_HANDLED; } +static bool +r535_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst) +{ + switch (mc_engine_idx) { + case MC_ENGINE_IDX_GSP: + *ptype = NVKM_SUBDEV_GSP; + *pinst = 0; + return true; + case MC_ENGINE_IDX_DISP: + *ptype = NVKM_ENGINE_DISP; + *pinst = 0; + return true; + case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9: + *ptype = NVKM_ENGINE_CE; + *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0; + return true; + case MC_ENGINE_IDX_GR0: + *ptype = NVKM_ENGINE_GR; + *pinst = 0; + return true; + case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: + *ptype = NVKM_ENGINE_NVDEC; + *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0; + return true; + case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2: + *ptype = NVKM_ENGINE_NVENC; + *pinst = mc_engine_idx - MC_ENGINE_IDX_MSENC; + return true; + case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: + *ptype = NVKM_ENGINE_NVJPG; + *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0; + return true; + case MC_ENGINE_IDX_OFA0: + *ptype = NVKM_ENGINE_OFA; + *pinst = 0; + return true; + default: + return false; + } +} + static int r535_gsp_intr_get_table(struct nvkm_gsp *gsp) { NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl; + const struct nvkm_rm_api *rmapi = gsp->rm->api; int ret = 0; ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, @@ -1112,42 +155,8 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp) ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask, ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall); - switch (ctrl->table[i].engineIdx) { - case MC_ENGINE_IDX_GSP: - type = NVKM_SUBDEV_GSP; - inst = 0; - break; - case MC_ENGINE_IDX_DISP: - type = NVKM_ENGINE_DISP; - inst = 0; - break; - case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9: - type = NVKM_ENGINE_CE; - inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0; - break; - case MC_ENGINE_IDX_GR0: - type = NVKM_ENGINE_GR; - inst = 0; - break; - case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: - type = NVKM_ENGINE_NVDEC; - inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0; - break; - case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2: - type = NVKM_ENGINE_NVENC; - inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC; - break; - case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: - type = NVKM_ENGINE_NVJPG; - inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0; - break; - case MC_ENGINE_IDX_OFA0: - type = NVKM_ENGINE_OFA; - inst = 0; - break; - default: + if (!rmapi->gsp->xlat_mc_engine_idx(ctrl->table[i].engineIdx, &type, &inst)) continue; - } if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) { ret = -ENOSPC; @@ -1165,35 +174,14 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp) return ret; } -static int -r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) +void +r535_gsp_get_static_info_fb(struct nvkm_gsp *gsp, + const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *info) { - GspStaticConfigInfo *rpc; int last_usable = -1; - rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); - if (IS_ERR(rpc)) - return PTR_ERR(rpc); - - gsp->internal.client.object.client = &gsp->internal.client; - gsp->internal.client.object.parent = NULL; - gsp->internal.client.object.handle = rpc->hInternalClient; - gsp->internal.client.gsp = gsp; - - gsp->internal.device.object.client = &gsp->internal.client; - gsp->internal.device.object.parent = &gsp->internal.client.object; - gsp->internal.device.object.handle = rpc->hInternalDevice; - - gsp->internal.device.subdevice.client = &gsp->internal.client; - gsp->internal.device.subdevice.parent = &gsp->internal.device.object; - gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; - - gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; - gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; - - for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) { - NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = - &rpc->fbRegionInfoParams.fbRegion[i]; + for (int i = 0; i < info->numFBRegions; i++) { + const NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = &info->fbRegion[i]; nvkm_debug(&gsp->subdev, "fb region %d: " "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i, @@ -1215,10 +203,38 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) } if (last_usable >= 0) { - u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1; + u32 rsvd_base = info->fbRegion[last_usable].limit + 1; gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base; } +} + +static int +r535_gsp_get_static_info(struct nvkm_gsp *gsp) +{ + GspStaticConfigInfo *rpc; + + rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + gsp->internal.client.object.client = &gsp->internal.client; + gsp->internal.client.object.parent = NULL; + gsp->internal.client.object.handle = rpc->hInternalClient; + gsp->internal.client.gsp = gsp; + + gsp->internal.device.object.client = &gsp->internal.client; + gsp->internal.device.object.parent = &gsp->internal.client.object; + gsp->internal.device.object.handle = rpc->hInternalDevice; + + gsp->internal.device.subdevice.client = &gsp->internal.client; + gsp->internal.device.subdevice.parent = &gsp->internal.device.object; + gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; + + gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; + gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; + + r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams); for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) { if (rpc->gpcInfo.gpcMask & BIT(gpc)) { @@ -1231,7 +247,7 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) return 0; } -static void +void nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem) { if (mem->data) { @@ -1260,7 +276,7 @@ nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem) * so we take a device reference to ensure its lifetime. The reference is * dropped in the destructor. */ -static int +int nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem) { mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); @@ -1277,9 +293,10 @@ static int r535_gsp_postinit(struct nvkm_gsp *gsp) { struct nvkm_device *device = gsp->subdev.device; + const struct nvkm_rm_api *rmapi = gsp->rm->api; int ret; - ret = r535_gsp_rpc_get_gsp_static_info(gsp); + ret = rmapi->gsp->get_static_info(gsp); if (WARN_ON(ret)) return ret; @@ -1327,7 +344,7 @@ r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend) rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0; } - return nvkm_gsp_rpc_wr(gsp, rpc, true); + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); } enum registry_type { @@ -1684,7 +701,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) build_registry(gsp, rpc); - return nvkm_gsp_rpc_wr(gsp, rpc, false); + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT); fail: clean_registry(gsp); @@ -1692,7 +709,7 @@ fail: } #if defined(CONFIG_ACPI) && defined(CONFIG_X86) -static void +void r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) { const guid_t NVOP_DSM_GUID = @@ -1702,7 +719,6 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = 4, - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), }, *obj; caps->status = 0xffff; @@ -1710,23 +726,28 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) return; + argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL); + if (!argv4.buffer.pointer) + return; + obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); if (!obj) - return; + goto done; if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) - return; + goto done; caps->status = 0; caps->optimusCaps = *(u32 *)obj->buffer.pointer; +done: ACPI_FREE(obj); kfree(argv4.buffer.pointer); } -static void +void r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) { const guid_t JT_DSM_GUID = @@ -1737,24 +758,28 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = sizeof(caps), - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), }, *obj; jt->status = 0xffff; + argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL); + if (!argv4.buffer.pointer) + return; + obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); if (!obj) - return; + goto done; if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) - return; + goto done; jt->status = 0; jt->jtCaps = *(u32 *)obj->buffer.pointer; jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; jt->bSBIOSCaps = 0; +done: ACPI_FREE(obj); kfree(argv4.buffer.pointer); @@ -1818,7 +843,7 @@ r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux } } -static void +void r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod) { acpi_status status; @@ -1871,7 +896,7 @@ r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi) } static int -r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp) +r535_gsp_set_system_info(struct nvkm_gsp *gsp) { struct nvkm_device *device = gsp->subdev.device; struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device); @@ -1884,16 +909,16 @@ r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp) if (IS_ERR(info)) return PTR_ERR(info); - info->gpuPhysAddr = device->func->resource_addr(device, 0); - info->gpuPhysFbAddr = device->func->resource_addr(device, 1); - info->gpuPhysInstAddr = device->func->resource_addr(device, 3); + info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI); + info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB); + info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST); info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev); info->maxUserVa = TASK_SIZE; - info->pciConfigMirrorBase = 0x088000; - info->pciConfigMirrorSize = 0x001000; + info->pciConfigMirrorBase = device->pci->func->cfg.addr; + info->pciConfigMirrorSize = device->pci->func->cfg.size; r535_gsp_acpi_info(gsp, &info->acpiMethodData); - return nvkm_gsp_rpc_wr(gsp, info, false); + return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); } static int @@ -1911,33 +936,6 @@ r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc) } static int -r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) -{ - rpc_rc_triggered_v17_02 *msg = repv; - struct nvkm_gsp *gsp = priv; - struct nvkm_subdev *subdev = &gsp->subdev; - struct nvkm_chan *chan; - unsigned long flags; - - if (WARN_ON(repc < sizeof(*msg))) - return -EINVAL; - - nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n", - msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope, - msg->partitionAttributionId); - - chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags); - if (!chan) { - nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid); - return 0; - } - - nvkm_chan_error(chan, false); - nvkm_chan_put(&chan, flags); - return 0; -} - -static int r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc) { struct nvkm_gsp *gsp = priv; @@ -2130,97 +1128,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) } static int -r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) -{ - struct nvkm_subdev *subdev = &gsp->subdev; - struct nvkm_device *device = subdev->device; - u32 wpr2_hi; - int ret; - - wpr2_hi = nvkm_rd32(device, 0x1fa828); - if (!wpr2_hi) { - nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n"); - return 0; - } - - ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); - if (WARN_ON(ret)) - return ret; - - wpr2_hi = nvkm_rd32(device, 0x1fa828); - if (WARN_ON(wpr2_hi)) - return -EIO; - - return 0; -} - -static int -r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) -{ - int ret; - - ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); - if (ret) - return ret; - - nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); - - if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) - return -EIO; - - return 0; -} - -static int -r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp) -{ - GspFwWprMeta *meta; - int ret; - - ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta); - if (ret) - return ret; - - meta = gsp->wpr_meta.data; - - meta->magic = GSP_FW_WPR_META_MAGIC; - meta->revision = GSP_FW_WPR_META_REVISION; - - meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr; - meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; - - meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; - meta->sizeOfBootloader = gsp->boot.fw.size; - meta->bootloaderCodeOffset = gsp->boot.code_offset; - meta->bootloaderDataOffset = gsp->boot.data_offset; - meta->bootloaderManifestOffset = gsp->boot.manifest_offset; - - meta->sysmemAddrOfSignature = gsp->sig.addr; - meta->sizeOfSignature = gsp->sig.size; - - meta->gspFwRsvdStart = gsp->fb.heap.addr; - meta->nonWprHeapOffset = gsp->fb.heap.addr; - meta->nonWprHeapSize = gsp->fb.heap.size; - meta->gspFwWprStart = gsp->fb.wpr2.addr; - meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr; - meta->gspFwHeapSize = gsp->fb.wpr2.heap.size; - meta->gspFwOffset = gsp->fb.wpr2.elf.addr; - meta->bootBinOffset = gsp->fb.wpr2.boot.addr; - meta->frtsOffset = gsp->fb.wpr2.frts.addr; - meta->frtsSize = gsp->fb.wpr2.frts.size; - meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000); - meta->fbSize = gsp->fb.size; - meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr; - meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; - meta->bootCount = 0; - meta->partitionRpcAddr = 0; - meta->partitionRpcRequestOffset = 0; - meta->partitionRpcReplyOffset = 0; - meta->verified = 0; - return 0; -} - -static int r535_gsp_shared_init(struct nvkm_gsp *gsp) { struct { @@ -2271,23 +1178,11 @@ r535_gsp_shared_init(struct nvkm_gsp *gsp) return 0; } -static int -r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) +static void +r535_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume) { - GSP_ARGUMENTS_CACHED *args; - int ret; - - if (!resume) { - ret = r535_gsp_shared_init(gsp); - if (ret) - return ret; + GSP_ARGUMENTS_CACHED *args = gsp->rmargs.data; - ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs); - if (ret) - return ret; - } - - args = gsp->rmargs.data; args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr; args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; args->messageQueueInitArguments.cmdQueueOffset = @@ -2304,7 +1199,24 @@ r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) args->srInitArguments.flags = 0; args->srInitArguments.bInPMTransition = 1; } +} + +static int +r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) +{ + int ret; + + if (!resume) { + ret = r535_gsp_shared_init(gsp); + if (ret) + return ret; + + ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs); + if (ret) + return ret; + } + gsp->rm->api->gsp->set_rmargs(gsp, resume); return 0; } @@ -2797,18 +1709,22 @@ lvl1_fail: return ret; } +static u32 +r535_gsp_sr_data_size(struct nvkm_gsp *gsp) +{ + GspFwWprMeta *meta = gsp->wpr_meta.data; + + return meta->gspFwWprEnd - meta->gspFwWprStart; +} + int r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) { - u32 mbox0 = 0xff, mbox1 = 0xff; + struct nvkm_rm *rm = gsp->rm; int ret; - if (!gsp->running) - return 0; - if (suspend) { - GspFwWprMeta *meta = gsp->wpr_meta.data; - u64 len = meta->gspFwWprEnd - meta->gspFwWprStart; + u32 len = rm->api->gsp->sr_data_size(gsp); GspFwSRMeta *sr; ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt); @@ -2829,8 +1745,20 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr; sr->sizeOfSuspendResumeData = len; - mbox0 = lower_32_bits(gsp->sr.meta.addr); - mbox1 = upper_32_bits(gsp->sr.meta.addr); + ret = rm->api->fbsr->suspend(gsp); + if (ret) { + nvkm_gsp_mem_dtor(&gsp->sr.meta); + nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3); + nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); + return ret; + } + + /* + * TODO: Debug the GSP firmware / RPC handling to find out why + * without this Turing (but none of the other architectures) + * ends up resetting all channels after resume. + */ + msleep(50); } ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); @@ -2838,18 +1766,10 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) return ret; nvkm_msec(gsp->subdev.device, 2000, - if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000) + if (nvkm_falcon_rd32(&gsp->falcon, 0x040) == 0x80000000) break; ); - nvkm_falcon_reset(&gsp->falcon); - - ret = nvkm_gsp_fwsec_sb(gsp); - WARN_ON(ret); - - ret = r535_gsp_booter_unload(gsp, mbox0, mbox1); - WARN_ON(ret); - gsp->running = false; return 0; } @@ -2857,23 +1777,12 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) int r535_gsp_init(struct nvkm_gsp *gsp) { - u32 mbox0, mbox1; int ret; - if (!gsp->sr.meta.data) { - mbox0 = lower_32_bits(gsp->wpr_meta.addr); - mbox1 = upper_32_bits(gsp->wpr_meta.addr); - } else { - r535_gsp_rmargs_init(gsp, true); - - mbox0 = lower_32_bits(gsp->sr.meta.addr); - mbox1 = upper_32_bits(gsp->sr.meta.addr); - } + nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); - /* Execute booter to handle (eventually...) booting GSP-RM. */ - ret = r535_gsp_booter_load(gsp, mbox0, mbox1); - if (WARN_ON(ret)) - goto done; + if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) + return -EIO; ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE); if (ret) @@ -2883,6 +1792,8 @@ r535_gsp_init(struct nvkm_gsp *gsp) done: if (gsp->sr.meta.data) { + gsp->rm->api->fbsr->resume(gsp); + nvkm_gsp_mem_dtor(&gsp->sr.meta); nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3); nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); @@ -2944,19 +1855,6 @@ r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u return -ENOENT; } -static void -r535_gsp_dtor_fws(struct nvkm_gsp *gsp) -{ - nvkm_firmware_put(gsp->fws.bl); - gsp->fws.bl = NULL; - nvkm_firmware_put(gsp->fws.booter.unload); - gsp->fws.booter.unload = NULL; - nvkm_firmware_put(gsp->fws.booter.load); - gsp->fws.booter.load = NULL; - nvkm_firmware_put(gsp->fws.rm); - gsp->fws.rm = NULL; -} - #ifdef CONFIG_DEBUG_FS struct r535_gsp_log { @@ -3190,10 +2088,16 @@ r535_gsp_dtor(struct nvkm_gsp *gsp) nvkm_falcon_fw_dtor(&gsp->booter.unload); nvkm_falcon_fw_dtor(&gsp->booter.load); + nvkm_gsp_mem_dtor(&gsp->fmc.args); + kfree(gsp->fmc.sig); + kfree(gsp->fmc.pkey); + kfree(gsp->fmc.hash); + nvkm_gsp_mem_dtor(&gsp->fmc.fw); + mutex_destroy(&gsp->msgq.mutex); mutex_destroy(&gsp->cmdq.mutex); - r535_gsp_dtor_fws(gsp); + nvkm_gsp_dtor_fws(gsp); nvkm_gsp_mem_dtor(&gsp->rmargs); nvkm_gsp_mem_dtor(&gsp->wpr_meta); @@ -3206,10 +2110,17 @@ r535_gsp_dtor(struct nvkm_gsp *gsp) nvkm_gsp_mem_dtor(&gsp->logrm); } +static void +r535_gsp_drop_send_user_shared_data(struct nvkm_gsp *gsp) +{ + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL); +} + int r535_gsp_oneinit(struct nvkm_gsp *gsp) { struct nvkm_device *device = gsp->subdev.device; + const struct nvkm_rm_api *rmapi = gsp->rm->api; const u8 *data; u64 size; int ret; @@ -3217,16 +2128,6 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) mutex_init(&gsp->cmdq.mutex); mutex_init(&gsp->msgq.mutex); - ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load, - &device->sec2->falcon, &gsp->booter.load); - if (ret) - return ret; - - ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload, - &device->sec2->falcon, &gsp->booter.unload); - if (ret) - return ret; - /* Load GSP firmware from ELF image into DMA-accessible memory. */ ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size); if (ret) @@ -3255,65 +2156,29 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, r535_gsp_msg_run_cpu_sequencer, gsp); r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp); - r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, - r535_gsp_msg_rc_triggered, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, rmapi->fifo->rc_triggered, gsp); r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, r535_gsp_msg_mmu_fault_queued, gsp); r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp); r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL); r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL); - r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL); + if (rmapi->gsp->drop_send_user_shared_data) + rmapi->gsp->drop_send_user_shared_data(gsp); + if (rmapi->gsp->drop_post_nocat_record) + rmapi->gsp->drop_post_nocat_record(gsp); + ret = r535_gsp_rm_boot_ctor(gsp); if (ret) return ret; /* Release FW images - we've copied them to DMA buffers now. */ - r535_gsp_dtor_fws(gsp); - - /* Calculate FB layout. */ - gsp->fb.wpr2.frts.size = 0x100000; - gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size; - - gsp->fb.wpr2.boot.size = gsp->boot.fw.size; - gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000); - - gsp->fb.wpr2.elf.size = gsp->fw.len; - gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000); - - { - u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30); - - gsp->fb.wpr2.heap.size = - gsp->func->wpr_heap.os_carveout_size + - gsp->func->wpr_heap.base_size + - ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) + - ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20); - - gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size); - } - - gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000); - gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000); - - gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000); - gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr; - - gsp->fb.heap.size = 0x100000; - gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size; - - ret = nvkm_gsp_fwsec_frts(gsp); - if (WARN_ON(ret)) - return ret; + nvkm_gsp_dtor_fws(gsp); ret = r535_gsp_libos_init(gsp); if (WARN_ON(ret)) return ret; - ret = r535_gsp_wpr_meta_init(gsp); - if (WARN_ON(ret)) - return ret; - - ret = r535_gsp_rpc_set_system_info(gsp); + ret = rmapi->gsp->set_system_info(gsp); if (WARN_ON(ret)) return ret; @@ -3321,76 +2186,17 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) if (WARN_ON(ret)) return ret; - /* Reset GSP into RISC-V mode. */ - ret = gsp->func->reset(gsp); - if (WARN_ON(ret)) - return ret; - - nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); - nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); - mutex_init(&gsp->client_id.mutex); idr_init(&gsp->client_id.idr); return 0; } -static int -r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver, - const struct firmware **pfw) -{ - char fwname[64]; - - snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver); - return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw); -} - -int -r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) -{ - struct nvkm_subdev *subdev = &gsp->subdev; - int ret; - bool enable_gsp = fwif->enable; - -#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT) - enable_gsp = true; -#endif - if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp)) - return -EINVAL; - - if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) || - (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) || - (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) || - (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) { - r535_gsp_dtor_fws(gsp); - return ret; - } - - return 0; -} - -#define NVKM_GSP_FIRMWARE(chip) \ -MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \ -MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \ -MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \ -MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin") - -NVKM_GSP_FIRMWARE(tu102); -NVKM_GSP_FIRMWARE(tu104); -NVKM_GSP_FIRMWARE(tu106); - -NVKM_GSP_FIRMWARE(tu116); -NVKM_GSP_FIRMWARE(tu117); - -NVKM_GSP_FIRMWARE(ga100); - -NVKM_GSP_FIRMWARE(ga102); -NVKM_GSP_FIRMWARE(ga103); -NVKM_GSP_FIRMWARE(ga104); -NVKM_GSP_FIRMWARE(ga106); -NVKM_GSP_FIRMWARE(ga107); - -NVKM_GSP_FIRMWARE(ad102); -NVKM_GSP_FIRMWARE(ad103); -NVKM_GSP_FIRMWARE(ad104); -NVKM_GSP_FIRMWARE(ad106); -NVKM_GSP_FIRMWARE(ad107); +const struct nvkm_rm_api_gsp +r535_gsp = { + .set_rmargs = r535_gsp_set_rmargs, + .set_system_info = r535_gsp_set_system_info, + .get_static_info = r535_gsp_get_static_info, + .xlat_mc_engine_idx = r535_gsp_xlat_mc_engine_idx, + .drop_send_user_shared_data = r535_gsp_drop_send_user_shared_data, + .sr_data_size = r535_gsp_sr_data_size, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c index 1b4619ff9e8e..a8c42ec0367b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c @@ -19,26 +19,27 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include "priv.h" +#include <rm/engine.h> -#include <subdev/gsp.h> +#include "nvrm/nvdec.h" -#include <nvif/class.h> +static int +r535_nvdec_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, + struct nvkm_gsp_object *nvdec) +{ + NV_BSP_ALLOCATION_PARAMETERS *args; -static const struct nvkm_engine_func -ad102_nvenc = { - .sclass = { - { -1, -1, NVC9B7_VIDEO_ENCODER }, - {} - } -}; + args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvdec); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); -int -ad102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_nvenc **pnvenc) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_nvenc_new(&ad102_nvenc, device, type, inst, pnvenc); + args->size = sizeof(*args); + args->engineInstance = inst; - return -ENODEV; + return nvkm_gsp_rm_alloc_wr(nvdec, args); } + +const struct nvkm_rm_api_engine +r535_nvdec = { + .alloc = r535_nvdec_alloc, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c index d72b3aae9a2b..acb3ce8bb9de 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c @@ -19,26 +19,27 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include "priv.h" +#include <rm/engine.h> -#include <subdev/gsp.h> +#include "nvrm/nvenc.h" -#include <nvif/class.h> +static int +r535_nvenc_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, + struct nvkm_gsp_object *nvenc) +{ + NV_MSENC_ALLOCATION_PARAMETERS *args; -static const struct nvkm_engine_func -ad102_nvdec = { - .sclass = { - { -1, -1, NVC9B0_VIDEO_DECODER }, - {} - } -}; + args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvenc); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); -int -ad102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_nvdec **pnvdec) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_nvdec_new(&ad102_nvdec, device, type, inst, pnvdec); + args->size = sizeof(*args); + args->engineInstance = inst; - return -ENODEV; + return nvkm_gsp_rm_alloc_wr(nvenc, args); } + +const struct nvkm_rm_api_engine +r535_nvenc = { + .alloc = r535_nvenc_alloc, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c new file mode 100644 index 000000000000..fbc4080ad8d8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c @@ -0,0 +1,45 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <rm/engine.h> + +#include "nvrm/nvjpg.h" + +static int +r535_nvjpg_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, + struct nvkm_gsp_object *nvjpg) +{ + NV_NVJPG_ALLOCATION_PARAMETERS *args; + + args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvjpg); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->size = sizeof(*args); + args->engineInstance = inst; + + return nvkm_gsp_rm_alloc_wr(nvjpg, args); +} + +const struct nvkm_rm_api_engine +r535_nvjpg = { + .alloc = r535_nvjpg_alloc, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h new file mode 100644 index 000000000000..cbc7e611fbda --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_ALLOC_H__ +#define __NVRM_ALLOC_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct rpc_gsp_rm_alloc_v03_00 +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvU32 hClass; + NvU32 status; + NvU32 paramsSize; + NvU32 flags; + NvU8 reserved[4]; + NvU8 params[]; +} rpc_gsp_rm_alloc_v03_00; + +typedef struct NVOS00_PARAMETERS_v03_00 +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +} NVOS00_PARAMETERS_v03_00; + +typedef struct rpc_free_v03_00 +{ + NVOS00_PARAMETERS_v03_00 params; +} rpc_free_v03_00; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h new file mode 100644 index 000000000000..60b0b08491ee --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_BAR_H__ +#define __NVRM_BAR_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef enum +{ + NV_RPC_UPDATE_PDE_BAR_1, + NV_RPC_UPDATE_PDE_BAR_2, + NV_RPC_UPDATE_PDE_BAR_INVALID, +} NV_RPC_UPDATE_PDE_BAR_TYPE; + +typedef struct UpdateBarPde_v15_00 +{ + NV_RPC_UPDATE_PDE_BAR_TYPE barType; + NvU64 entryValue NV_ALIGN_BYTES(8); + NvU64 entryLevelShift NV_ALIGN_BYTES(8); +} UpdateBarPde_v15_00; + +typedef struct rpc_update_bar_pde_v15_00 +{ + UpdateBarPde_v15_00 info; +} rpc_update_bar_pde_v15_00; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h new file mode 100644 index 000000000000..90b0325203d2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_CE_H__ +#define __NVRM_CE_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct NVC0B5_ALLOCATION_PARAMETERS { + NvU32 version; + NvU32 engineType; +} NVC0B5_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h new file mode 100644 index 000000000000..df0e63c0cb6b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_CLIENT_H__ +#define __NVRM_CLIENT_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */ + +#define NV_PROC_NAME_MAX_LENGTH 100U + +typedef struct NV0000_ALLOC_PARAMETERS { + NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */ + NvU32 processID; + char processName[NV_PROC_NAME_MAX_LENGTH]; +} NV0000_ALLOC_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h new file mode 100644 index 000000000000..77f10acd82c9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_CTRL_H__ +#define __NVRM_CTRL_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct rpc_gsp_rm_control_v03_00 +{ + NvHandle hClient; + NvHandle hObject; + NvU32 cmd; + NvU32 status; + NvU32 paramsSize; + NvU32 flags; + NvU8 params[]; +} rpc_gsp_rm_control_v03_00; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h new file mode 100644 index 000000000000..3933b9ad61ce --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_DEVICE_H__ +#define __NVRM_DEVICE_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */ + +typedef struct NV0080_ALLOC_PARAMETERS { + NvU32 deviceId; + NvHandle hClientShare; + NvHandle hTargetClient; + NvHandle hTargetDevice; + NvV32 flags; + NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8); + NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8); + NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8); + NvV32 vaMode; +} NV0080_ALLOC_PARAMETERS; + +#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */ + +typedef struct NV2080_ALLOC_PARAMETERS { + NvU32 subDeviceId; +} NV2080_ALLOC_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h new file mode 100644 index 000000000000..7b7539639540 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h @@ -0,0 +1,741 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_DISP_H__ +#define __NVRM_DISP_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS { + NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8); + NV_DECLARE_ALIGNED(NvU64 instMemSize, 8); + NvU32 instMemAddrSpace; + NvU32 instMemCpuCacheAttr; +} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS; + +#define NV_MEMORY_WRITECOMBINED 2 + +#define NV04_DISPLAY_COMMON (0x00000073) + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS { + NvU32 feHwSysCap; + NvU32 windowPresentMask; + NvBool bFbRemapperEnabled; + NvU32 numHeads; + NvBool bPrimaryVga; + NvU32 i2cPort; + NvU32 internalDispActiveMask; +} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS; + +#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */ + +#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS { + NvU32 status; + NvU16 backLightDataSize; + NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE]; +} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS; + +typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS { + NvU32 subDeviceInstance; +} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 numHeads; +} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS { + NvU32 subDeviceInstance; + NvU32 headMask; +} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS; + +#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayMask; + NvU32 displayMaskDDC; +} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS; + +#define NV0073_CTRL_MAX_CONNECTORS 4U + +#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 DDCPartners; + NvU32 count; + struct { + NvU32 index; + NvU32 type; + NvU32 location; + } data[NV0073_CTRL_MAX_CONNECTORS]; + NvU32 platform; +} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 index; + NvU32 type; + NvU32 protocol; + NvU32 ditherType; + NvU32 ditherAlgo; + NvU32 location; + NvU32 rootPortId; + NvU32 dcbIndex; + NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8); + NvBool bIsLitByVbios; + NvBool bIsDispDynamic; +} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS; + +#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U) + +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU) + +#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS { + NvBool bDscSupported; + NvU32 encoderColorFormatMask; + NvU32 lineBufferSizeKB; + NvU32 rateBufferSizeKB; + NvU32 bitsPerPixelPrecision; + NvU32 maxNumHztSlices; + NvU32 lineBufferBitDepth; +} NV0073_CTRL_CMD_DSC_CAP_PARAMS; + +typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 sorIndex; + NvU32 maxLinkRate; + NvU32 dpVersionsSupported; + NvU32 UHBRSupported; + NvBool bIsMultistreamSupported; + NvBool bIsSCEnabled; + NvBool bHasIncreasedWatermarkLimits; + NvBool bIsPC2Disabled; + NvBool isSingleHeadMSTSupported; + NvBool bFECSupported; + NvBool bIsTrainPhyRepeater; + NvBool bOverrideLinkBw; + NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC; +} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */ +#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1 +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U) + +#define NV2080_NOTIFIERS_HOTPLUG (1) + +typedef struct { + NvU32 plugDisplayMask; + NvU32 unplugDisplayMask; +} Nv2080HotplugNotification; + +#define NV2080_NOTIFIERS_DP_IRQ (7) + +typedef struct Nv2080DpIrqNotificationRec { + NvU32 displayId; +} Nv2080DpIrqNotification; + +#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 displayMask; + NvU32 retryTimeMs; +} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS; + +#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 flags2; +} NV0073_CTRL_DFP_GET_INFO_PARAMS; + +#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0 +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U) +#define NV0073_CTRL_DFP_FLAGS_LANE 5:3 +#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6 +#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7 +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8 +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9 +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10 +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14 +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15 +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LINK 21:20 +#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22 +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23 +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25 +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U) + +#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 flags; + NvU32 displayId; +} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS; + +typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG; + +typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO { + NvU32 displayMask; + NvU32 sorType; +} NV0073_CTRL_DFP_ASSIGN_SOR_INFO; + +#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U + +#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 sorExcludeMask; + NvU32 slaveDisplayId; + NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig; + NvBool bIs2Head1Or; + NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]; + NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]; + NvU8 reservedSorMask; + NvU32 flags; +} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS; + +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0 +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1 +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U) + +#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 brightness; + NvBool bUncalibrated; +} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS; + +#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U + +typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 numELDSize; + NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER]; + NvU32 maxFreqSupported; + NvU32 ctrl; + NvU32 deviceEntry; +} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS; + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0 +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1 +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U) + +#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U + +#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 bufferSize; + NvU32 flags; + NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES]; +} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS { + NvU8 subDeviceInstance; + NvU32 displayId; + NvU8 enable; +} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 caps; +} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U) + +#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U + +#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 transmitControl; + NvU32 packetSize; + NvU32 targetHead; + NvBool bUsePsrHeadforSdp; + NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE]; +} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U) + +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS { + NvU8 subDeviceInstance; + NvU32 displayId; + NvU8 mute; +} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS; + +#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U + +#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bAddrOnly; + NvU32 cmd; + NvU32 addr; + NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE]; + NvU32 size; + NvU32 replyType; + NvU32 retryTimeMs; +} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS; + +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3 +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2 +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0 +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U) + +#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS { + // In + NvU32 subDeviceInstance; + NvU32 displayId; + NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + + // Out + NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + NvU8 linkBwCount; +} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS; + +#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DP_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 cmd; + NvU32 data; + NvU32 err; + NvU32 retryTimeMs; + NvU32 eightLaneDpcdBaseAddr; +} NV0073_CTRL_DP_CTRL_PARAMS; + +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0 +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1 +#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2 +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_UNUSED 3:3 +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4 +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5 +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6 +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7 +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8 +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U) +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U) +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9 +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10 +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11 +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U) +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13 +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14 +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15 +#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29 +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30 +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31 +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0 +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U) + +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0 +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8 +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU) +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18 +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U) +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U) +#define NV0073_CTRL_DP_DATA_TARGET 22:19 +#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U) + +#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_MAX_LANES 8U + +typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 numLanes; + NvU32 data[NV0073_CTRL_MAX_LANES]; +} NV0073_CTRL_DP_LANE_DATA_PARAMS; + +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0 +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2 +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U) + +#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 preferredDisplayId; + + NvBool force; + NvBool useBFM; + + NvU32 displayIdAssigned; + NvU32 allDisplayMask; +} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS; + +#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; +} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS; + +#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 sorIndex; + NvU32 dpLink; + + NvBool bEnableOverride; + NvBool bMST; + NvU32 singleHeadMultistreamMode; + NvU32 hBlankSym; + NvU32 vBlankSym; + NvU32 colorFormat; + NvBool bEnableTwoHeadOneOr; + + struct { + NvU32 slotStart; + NvU32 slotEnd; + NvU32 PBN; + NvU32 Timeslice; + NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT + NvU32 singleHeadMSTPipeline; + NvBool bEnableAudioOverRightPanel; + } MST; + + struct { + NvBool bEnhancedFraming; + NvU32 tuSize; + NvU32 waterMark; + NvU32 actualPclkHz; // deprecated -Use MvidWarParams + NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams + NvBool bEnableAudioOverRightPanel; + struct { + NvU32 activeCnt; + NvU32 activeFrac; + NvU32 activePolarity; + NvBool mvidWarEnabled; + struct { + NvU32 actualPclkHz; + NvU32 linkClkFreqHz; + } MvidWarParams; + } Legacy; + } SST; +} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS; + +#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool enable; +} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 mute; +} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS { + NvU32 addressSpace; + NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NvU32 cacheSnoop; + NvU32 hclass; + NvU32 channelInstance; + NvBool valid; +} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS; + +#define ADDR_SYSMEM (1) // System memory (PCI) + +#define ADDR_FBMEM 2 // Frame buffer memory space + +typedef struct +{ + NvV32 channelInstance; // One of the n channel instances of a given channel type. + // All PIO channels have two instances (one per head). + NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors. + NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel +} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvV32 channelInstance; // One of the n channel instances of a given channel type. + // Note that core channel has only one instance + // while all others have two (one per head). + NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer + NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications + NvU32 offset; // Initial offset for put/get, usually zero. + NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs + + NvU32 flags; +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001 + +} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h new file mode 100644 index 000000000000..b26dfc8f8087 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_ENGINE_H__ +#define __NVRM_ENGINE_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define MC_ENGINE_IDX_NULL 0 // This must be 0 +#define MC_ENGINE_IDX_TMR 1 +#define MC_ENGINE_IDX_DISP 2 +#define MC_ENGINE_IDX_FB 3 +#define MC_ENGINE_IDX_FIFO 4 +#define MC_ENGINE_IDX_VIDEO 5 +#define MC_ENGINE_IDX_MD 6 +#define MC_ENGINE_IDX_BUS 7 +#define MC_ENGINE_IDX_PMGR 8 +#define MC_ENGINE_IDX_VP2 9 +#define MC_ENGINE_IDX_CIPHER 10 +#define MC_ENGINE_IDX_BIF 11 +#define MC_ENGINE_IDX_PPP 12 +#define MC_ENGINE_IDX_PRIVRING 13 +#define MC_ENGINE_IDX_PMU 14 +#define MC_ENGINE_IDX_CE0 15 +#define MC_ENGINE_IDX_CE1 16 +#define MC_ENGINE_IDX_CE2 17 +#define MC_ENGINE_IDX_CE3 18 +#define MC_ENGINE_IDX_CE4 19 +#define MC_ENGINE_IDX_CE5 20 +#define MC_ENGINE_IDX_CE6 21 +#define MC_ENGINE_IDX_CE7 22 +#define MC_ENGINE_IDX_CE8 23 +#define MC_ENGINE_IDX_CE9 24 +#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE9 +#define MC_ENGINE_IDX_VIC 35 +#define MC_ENGINE_IDX_ISOHUB 36 +#define MC_ENGINE_IDX_VGPU 37 +#define MC_ENGINE_IDX_MSENC 38 +#define MC_ENGINE_IDX_MSENC1 39 +#define MC_ENGINE_IDX_MSENC2 40 +#define MC_ENGINE_IDX_C2C 41 +#define MC_ENGINE_IDX_LTC 42 +#define MC_ENGINE_IDX_FBHUB 43 +#define MC_ENGINE_IDX_HDACODEC 44 +#define MC_ENGINE_IDX_GMMU 45 +#define MC_ENGINE_IDX_SEC2 46 +#define MC_ENGINE_IDX_FSP 47 +#define MC_ENGINE_IDX_NVLINK 48 +#define MC_ENGINE_IDX_GSP 49 +#define MC_ENGINE_IDX_NVJPG 50 +#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG +#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG +#define MC_ENGINE_IDX_NVJPEG1 51 +#define MC_ENGINE_IDX_NVJPEG2 52 +#define MC_ENGINE_IDX_NVJPEG3 53 +#define MC_ENGINE_IDX_NVJPEG4 54 +#define MC_ENGINE_IDX_NVJPEG5 55 +#define MC_ENGINE_IDX_NVJPEG6 56 +#define MC_ENGINE_IDX_NVJPEG7 57 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT 58 +#define MC_ENGINE_IDX_ACCESS_CNTR 59 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 60 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 61 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 62 +#define MC_ENGINE_IDX_INFO_FAULT 63 +#define MC_ENGINE_IDX_BSP 64 +#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP +#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC +#define MC_ENGINE_IDX_NVDEC1 65 +#define MC_ENGINE_IDX_NVDEC2 66 +#define MC_ENGINE_IDX_NVDEC3 67 +#define MC_ENGINE_IDX_NVDEC4 68 +#define MC_ENGINE_IDX_NVDEC5 69 +#define MC_ENGINE_IDX_NVDEC6 70 +#define MC_ENGINE_IDX_NVDEC7 71 +#define MC_ENGINE_IDX_CPU_DOORBELL 72 +#define MC_ENGINE_IDX_PRIV_DOORBELL 73 +#define MC_ENGINE_IDX_MMU_ECC_ERROR 74 +#define MC_ENGINE_IDX_BLG 75 +#define MC_ENGINE_IDX_PERFMON 76 +#define MC_ENGINE_IDX_BUF_RESET 77 +#define MC_ENGINE_IDX_XBAR 78 +#define MC_ENGINE_IDX_ZPW 79 +#define MC_ENGINE_IDX_OFA0 80 +#define MC_ENGINE_IDX_TEGRA 81 +#define MC_ENGINE_IDX_GR 82 +#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR +#define MC_ENGINE_IDX_GR1 83 +#define MC_ENGINE_IDX_GR2 84 +#define MC_ENGINE_IDX_GR3 85 +#define MC_ENGINE_IDX_GR4 86 +#define MC_ENGINE_IDX_GR5 87 +#define MC_ENGINE_IDX_GR6 88 +#define MC_ENGINE_IDX_GR7 89 +#define MC_ENGINE_IDX_ESCHED 90 +#define MC_ENGINE_IDX_ESCHED__SIZE 64 +#define MC_ENGINE_IDX_GR_FECS_LOG 154 +#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG +#define MC_ENGINE_IDX_GR1_FECS_LOG 155 +#define MC_ENGINE_IDX_GR2_FECS_LOG 156 +#define MC_ENGINE_IDX_GR3_FECS_LOG 157 +#define MC_ENGINE_IDX_GR4_FECS_LOG 158 +#define MC_ENGINE_IDX_GR5_FECS_LOG 159 +#define MC_ENGINE_IDX_GR6_FECS_LOG 160 +#define MC_ENGINE_IDX_GR7_FECS_LOG 161 +#define MC_ENGINE_IDX_TMR_SWRL 162 +#define MC_ENGINE_IDX_DISP_GSP 163 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 164 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 165 +#define MC_ENGINE_IDX_PXUC 166 +#define MC_ENGINE_IDX_MAX 167 // This must be kept as the max bit if +#define MC_ENGINE_IDX_INVALID 0xFFFFFFFF +#define MC_ENGINE_IDX_GRn(x) (MC_ENGINE_IDX_GR0 + (x)) +#define MC_ENGINE_IDX_GRn_FECS_LOG(x) (MC_ENGINE_IDX_GR0_FECS_LOG + (x)) +#define MC_ENGINE_IDX_CE(x) (MC_ENGINE_IDX_CE0 + (x)) +#define MC_ENGINE_IDX_MSENCn(x) (MC_ENGINE_IDX_MSENC + (x)) +#define MC_ENGINE_IDX_NVDECn(x) (MC_ENGINE_IDX_NVDEC + (x)) +#define MC_ENGINE_IDX_NVJPEGn(x) (MC_ENGINE_IDX_NVJPEG + (x)) +#define MC_ENGINE_IDX_ESCHEDn(x) (MC_ENGINE_IDX_ESCHED + (x)) + +typedef enum +{ + RM_ENGINE_TYPE_NULL = (0x00000000), + RM_ENGINE_TYPE_GR0 = (0x00000001), + RM_ENGINE_TYPE_GR1 = (0x00000002), + RM_ENGINE_TYPE_GR2 = (0x00000003), + RM_ENGINE_TYPE_GR3 = (0x00000004), + RM_ENGINE_TYPE_GR4 = (0x00000005), + RM_ENGINE_TYPE_GR5 = (0x00000006), + RM_ENGINE_TYPE_GR6 = (0x00000007), + RM_ENGINE_TYPE_GR7 = (0x00000008), + RM_ENGINE_TYPE_COPY0 = (0x00000009), + RM_ENGINE_TYPE_COPY1 = (0x0000000a), + RM_ENGINE_TYPE_COPY2 = (0x0000000b), + RM_ENGINE_TYPE_COPY3 = (0x0000000c), + RM_ENGINE_TYPE_COPY4 = (0x0000000d), + RM_ENGINE_TYPE_COPY5 = (0x0000000e), + RM_ENGINE_TYPE_COPY6 = (0x0000000f), + RM_ENGINE_TYPE_COPY7 = (0x00000010), + RM_ENGINE_TYPE_COPY8 = (0x00000011), + RM_ENGINE_TYPE_COPY9 = (0x00000012), + RM_ENGINE_TYPE_NVDEC0 = (0x0000001d), + RM_ENGINE_TYPE_NVDEC1 = (0x0000001e), + RM_ENGINE_TYPE_NVDEC2 = (0x0000001f), + RM_ENGINE_TYPE_NVDEC3 = (0x00000020), + RM_ENGINE_TYPE_NVDEC4 = (0x00000021), + RM_ENGINE_TYPE_NVDEC5 = (0x00000022), + RM_ENGINE_TYPE_NVDEC6 = (0x00000023), + RM_ENGINE_TYPE_NVDEC7 = (0x00000024), + RM_ENGINE_TYPE_NVENC0 = (0x00000025), + RM_ENGINE_TYPE_NVENC1 = (0x00000026), + RM_ENGINE_TYPE_NVENC2 = (0x00000027), + RM_ENGINE_TYPE_VP = (0x00000028), + RM_ENGINE_TYPE_ME = (0x00000029), + RM_ENGINE_TYPE_PPP = (0x0000002a), + RM_ENGINE_TYPE_MPEG = (0x0000002b), + RM_ENGINE_TYPE_SW = (0x0000002c), + RM_ENGINE_TYPE_TSEC = (0x0000002d), + RM_ENGINE_TYPE_VIC = (0x0000002e), + RM_ENGINE_TYPE_MP = (0x0000002f), + RM_ENGINE_TYPE_SEC2 = (0x00000030), + RM_ENGINE_TYPE_HOST = (0x00000031), + RM_ENGINE_TYPE_DPU = (0x00000032), + RM_ENGINE_TYPE_PMU = (0x00000033), + RM_ENGINE_TYPE_FBFLCN = (0x00000034), + RM_ENGINE_TYPE_NVJPEG0 = (0x00000035), + RM_ENGINE_TYPE_NVJPEG1 = (0x00000036), + RM_ENGINE_TYPE_NVJPEG2 = (0x00000037), + RM_ENGINE_TYPE_NVJPEG3 = (0x00000038), + RM_ENGINE_TYPE_NVJPEG4 = (0x00000039), + RM_ENGINE_TYPE_NVJPEG5 = (0x0000003a), + RM_ENGINE_TYPE_NVJPEG6 = (0x0000003b), + RM_ENGINE_TYPE_NVJPEG7 = (0x0000003c), + RM_ENGINE_TYPE_OFA = (0x0000003d), + RM_ENGINE_TYPE_LAST = (0x0000003e), +} RM_ENGINE_TYPE; + +#define NV2080_ENGINE_TYPE_NULL (0x00000000) +#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001) +#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS +#define NV2080_ENGINE_TYPE_GR1 (0x00000002) +#define NV2080_ENGINE_TYPE_GR2 (0x00000003) +#define NV2080_ENGINE_TYPE_GR3 (0x00000004) +#define NV2080_ENGINE_TYPE_GR4 (0x00000005) +#define NV2080_ENGINE_TYPE_GR5 (0x00000006) +#define NV2080_ENGINE_TYPE_GR6 (0x00000007) +#define NV2080_ENGINE_TYPE_GR7 (0x00000008) +#define NV2080_ENGINE_TYPE_COPY0 (0x00000009) +#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a) +#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b) +#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c) +#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d) +#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e) +#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f) +#define NV2080_ENGINE_TYPE_COPY7 (0x00000010) +#define NV2080_ENGINE_TYPE_COPY8 (0x00000011) +#define NV2080_ENGINE_TYPE_COPY9 (0x00000012) +#define NV2080_ENGINE_TYPE_BSP (0x00000013) +#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP +#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014) +#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015) +#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016) +#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017) +#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018) +#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019) +#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a) +#define NV2080_ENGINE_TYPE_MSENC (0x0000001b) +#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */ +#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c) +#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d) +#define NV2080_ENGINE_TYPE_VP (0x0000001e) +#define NV2080_ENGINE_TYPE_ME (0x0000001f) +#define NV2080_ENGINE_TYPE_PPP (0x00000020) +#define NV2080_ENGINE_TYPE_MPEG (0x00000021) +#define NV2080_ENGINE_TYPE_SW (0x00000022) +#define NV2080_ENGINE_TYPE_CIPHER (0x00000023) +#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER +#define NV2080_ENGINE_TYPE_VIC (0x00000024) +#define NV2080_ENGINE_TYPE_MP (0x00000025) +#define NV2080_ENGINE_TYPE_SEC2 (0x00000026) +#define NV2080_ENGINE_TYPE_HOST (0x00000027) +#define NV2080_ENGINE_TYPE_DPU (0x00000028) +#define NV2080_ENGINE_TYPE_PMU (0x00000029) +#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a) +#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b) +#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG +#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c) +#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d) +#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e) +#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f) +#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030) +#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031) +#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032) +#define NV2080_ENGINE_TYPE_OFA (0x00000033) +#define NV2080_ENGINE_TYPE_LAST (0x0000003e) +#define NV2080_ENGINE_TYPE_ALLENGINES (0xffffffff) +#define NV2080_ENGINE_TYPE_COPY_SIZE 10 +#define NV2080_ENGINE_TYPE_NVENC_SIZE 3 +#define NV2080_ENGINE_TYPE_NVJPEG_SIZE 8 +#define NV2080_ENGINE_TYPE_NVDEC_SIZE 8 +#define NV2080_ENGINE_TYPE_GR_SIZE 8 +#define NV2080_ENGINE_TYPE_COPY(i) (NV2080_ENGINE_TYPE_COPY0+(i)) +#define NV2080_ENGINE_TYPE_IS_COPY(i) (((i) >= NV2080_ENGINE_TYPE_COPY0) && ((i) <= NV2080_ENGINE_TYPE_COPY9)) +#define NV2080_ENGINE_TYPE_COPY_IDX(i) ((i) - NV2080_ENGINE_TYPE_COPY0) +#define NV2080_ENGINE_TYPE_NVENC(i) (NV2080_ENGINE_TYPE_NVENC0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVENC(i) (((i) >= NV2080_ENGINE_TYPE_NVENC0) && ((i) < NV2080_ENGINE_TYPE_NVENC(NV2080_ENGINE_TYPE_NVENC_SIZE))) +#define NV2080_ENGINE_TYPE_NVENC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVENC0) +#define NV2080_ENGINE_TYPE_NVDEC(i) (NV2080_ENGINE_TYPE_NVDEC0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVDEC(i) (((i) >= NV2080_ENGINE_TYPE_NVDEC0) && ((i) < NV2080_ENGINE_TYPE_NVDEC(NV2080_ENGINE_TYPE_NVDEC_SIZE))) +#define NV2080_ENGINE_TYPE_NVDEC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVDEC0) +#define NV2080_ENGINE_TYPE_NVJPEG(i) (NV2080_ENGINE_TYPE_NVJPEG0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVJPEG(i) (((i) >= NV2080_ENGINE_TYPE_NVJPEG0) && ((i) < NV2080_ENGINE_TYPE_NVJPEG(NV2080_ENGINE_TYPE_NVJPEG_SIZE))) +#define NV2080_ENGINE_TYPE_NVJPEG_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVJPEG0) +#define NV2080_ENGINE_TYPE_GR(i) (NV2080_ENGINE_TYPE_GR0 + (i)) +#define NV2080_ENGINE_TYPE_IS_GR(i) (((i) >= NV2080_ENGINE_TYPE_GR0) && ((i) < NV2080_ENGINE_TYPE_GR(NV2080_ENGINE_TYPE_GR_SIZE))) +#define NV2080_ENGINE_TYPE_GR_IDX(i) ((i) - NV2080_ENGINE_TYPE_GR0) +#define NV2080_ENGINE_TYPE_IS_VALID(i) (((i) > (NV2080_ENGINE_TYPE_NULL)) && ((i) < (NV2080_ENGINE_TYPE_LAST))) +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h new file mode 100644 index 000000000000..057f7220c225 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_EVENT_H__ +#define __NVRM_EVENT_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e) + +typedef struct NV0005_ALLOC_PARAMETERS { + NvHandle hParentClient; + NvHandle hSrcResource; + + NvV32 hClass; + NvV32 notifyIndex; + NV_DECLARE_ALIGNED(NvP64 data, 8); +} NV0005_ALLOC_PARAMETERS; + +#define NV01_EVENT_CLIENT_RM (0x04000000) + +#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 event; + NvU32 action; + NvBool bNotifyState; + NvU32 info32; + NvU16 info16; +} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + +typedef struct rpc_post_event_v17_00 +{ + NvHandle hClient; + NvHandle hEvent; + NvU32 notifyIndex; + NvU32 data; + NvU16 info16; + NvU32 status; + NvU32 eventDataSize; + NvBool bNotifyList; + NvU8 eventData[]; +} rpc_post_event_v17_00; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h new file mode 100644 index 000000000000..28786ef013a2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_FBSR_H__ +#define __NVRM_FBSR_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV01_MEMORY_LIST_FBMEM (0x00000082) + +#define NV01_MEMORY_LIST_SYSTEM (0x00000081) + +#define NVOS02_FLAGS_PHYSICALITY 7:4 +#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000) +#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001) +#define NVOS02_FLAGS_LOCATION 11:8 +#define NVOS02_FLAGS_LOCATION_PCI (0x00000000) +#define NVOS02_FLAGS_LOCATION_AGP (0x00000001) +#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002) +#define NVOS02_FLAGS_COHERENCY 15:12 +#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000) +#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001) +#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002) +#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003) +#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004) +#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005) +#define NVOS02_FLAGS_ALLOC 17:16 +#define NVOS02_FLAGS_ALLOC_NONE (0x00000001) +#define NVOS02_FLAGS_GPU_CACHEABLE 18:18 +#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000) +#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001) +#define NVOS02_FLAGS_KERNEL_MAPPING 19:19 +#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000) +#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001) +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20 +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001) +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21 +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001) +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22 +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001) +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23 +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000) +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001) +#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24 +#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001) +#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25 +#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000) +#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001) +#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002) +#define NVOS02_FLAGS_MAPPING 31:30 +#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000) +#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001) +#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002) + +struct pte_desc +{ + NvU32 idr:2; + NvU32 reserved1:14; + NvU32 length:16; + union { + NvU64 pte; // PTE when IDR==0; PDE when IDR > 0 + NvU64 pde; // PTE when IDR==0; PDE when IDR > 0 + } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0 +}; + +typedef struct rpc_alloc_memory_v13_01 +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU32 hClass; + NvU32 flags; + NvU32 pteAdjust; + NvU32 format; + NvU64 length NV_ALIGN_BYTES(8); + NvU32 pageCount; + struct pte_desc pteDesc; +} rpc_alloc_memory_v13_01; + +#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest. + +#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS { + NvU32 fbsrType; + NvU32 numRegions; + NvHandle hClient; + NvHandle hSysMem; + NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8); + NvBool bEnteringGcoffState; +} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS { + NvU32 fbsrType; + NvHandle hClient; + NvHandle hVidMem; + NV_DECLARE_ALIGNED(NvU64 vidOffset, 8); + NV_DECLARE_ALIGNED(NvU64 sysOffset, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h new file mode 100644 index 000000000000..325fdd8b6090 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_FIFO_H__ +#define __NVRM_FIFO_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32 + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16 + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2 + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16 + +typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY { + NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES]; + NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA]; + NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA]; + NvU32 numPbdmas; + char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN]; +} NV2080_CTRL_FIFO_DEVICE_ENTRY; + +#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS { + NvU32 baseIndex; + NvU32 numEntries; + NvBool bMore; + // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES]; + NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES]; +} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS; + +typedef enum +{ + /* ************************************************************************* + * Bug 3820969 + * THINK BEFORE CHANGING ENUM ORDER HERE. + * VGPU-guest uses this same ordering. Because this enum is not versioned, + * changing the order here WILL BREAK old-guest-on-newer-host compatibility. + * ************************************************************************/ + + // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc., + ENGINE_INFO_TYPE_ENG_DESC = 0, + + // HW engine ID + ENGINE_INFO_TYPE_FIFO_TAG, + + // RM_ENGINE_TYPE_* + ENGINE_INFO_TYPE_RM_ENGINE_TYPE, + + // + // runlist id (meaning varies by GPU) + // Valid only for Esched-driven engines + // + ENGINE_INFO_TYPE_RUNLIST, + + // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_* + ENGINE_INFO_TYPE_MMU_FAULT_ID, + + // ROBUST_CHANNEL_* + ENGINE_INFO_TYPE_RC_MASK, + + // Reset Bit Position. On Ampere, only valid if not _INVALID + ENGINE_INFO_TYPE_RESET, + + // Interrupt Bit Position + ENGINE_INFO_TYPE_INTR, + + // log2(MC_ENGINE_*) + ENGINE_INFO_TYPE_MC, + + // The DEV_TYPE_ENUM for this engine + ENGINE_INFO_TYPE_DEV_TYPE_ENUM, + + // The particular instance of this engine type + ENGINE_INFO_TYPE_INSTANCE_ID, + + // + // The base address for this engine's NV_RUNLIST. Valid only on Ampere+ + // Valid only for Esched-driven engines + // + ENGINE_INFO_TYPE_RUNLIST_PRI_BASE, + + // + // If this entry is a host-driven engine. + // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry. + // + ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE, + + // + // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+ + // Valid only for Esched-driven engines + // + ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID, + + // + // The base address for this engine's NV_CHRAM registers. Valid only on + // Ampere+ + // + // Valid only for Esched-driven engines + // + ENGINE_INFO_TYPE_CHRAM_PRI_BASE, + + // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM + ENGINE_INFO_TYPE_KERNEL_RM_MAX, + // Used for iterating the engine info table by the index passed. + ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX, + + // Size of FIFO_ENGINE_LIST.engineData + ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID, + + // Input-only parameter for kfifoEngineInfoXlate. + ENGINE_INFO_TYPE_PBDMA_ID + + /* ************************************************************************* + * Bug 3820969 + * THINK BEFORE CHANGING ENUM ORDER HERE. + * VGPU-guest uses this same ordering. Because this enum is not versioned, + * changing the order here WILL BREAK old-guest-on-newer-host compatibility. + * ************************************************************************/ +} ENGINE_INFO_TYPE; + +#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS { + NvU32 size; +} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40 + +typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO { + NvU32 engDesc; + NvU32 ctxAttr; + NvU32 ctxBufferSize; + NvU32 addrSpaceList; + NvU32 registerBase; +} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO; + +#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS { + NvU32 numConstructedFalcons; + NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS]; +} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS; + +#define NV_MAX_SUBDEVICES 8 + +typedef struct NV_MEMORY_DESC_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 addressSpace; + NvU32 cacheAttrib; +} NV_MEMORY_DESC_PARAMS; + +#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U + +#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U + +typedef struct NV_CHANNEL_ALLOC_PARAMS { + + NvHandle hObjectError; // error context DMA + NvHandle hObjectBuffer; // no longer used + NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO + NvU32 gpFifoEntries; // number of GP FIFO entries + + NvU32 flags; + + + NvHandle hContextShare; // context share handle + NvHandle hVASpace; // VASpace for the channel + + // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0 + NvHandle hUserdMemory[NV_MAX_SUBDEVICES]; + + // offset to beginning of UserD within hUserdMemory[x] + NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8); + + // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated + NvU32 engineType; + // Channel identifier that is unique for the duration of a RM session + NvU32 cid; + // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods + NvU32 subDeviceId; + NvHandle hObjectEccError; // ECC error context DMA + + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8); + + NvHandle hPhysChannelGroup; // reserved + NvU32 internalFlags; // reserved + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved + NvU32 ProcessID; // reserved + NvU32 SubProcessID; // reserved + + // IV used for CPU-side encryption / GPU-side decryption. + NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved + // IV used for CPU-side decryption / GPU-side encryption. + NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved + // Nonce used CPU-side signing / GPU-side signature verification. + NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved +} NV_CHANNEL_ALLOC_PARAMS; + +typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS; + +#define NVOS04_FLAGS_CHANNEL_TYPE 1:0 +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000 +#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE +#define NVOS04_FLAGS_VPR 2:2 +#define NVOS04_FLAGS_VPR_FALSE 0x00000000 +#define NVOS04_FLAGS_VPR_TRUE 0x00000001 +#define NVOS04_FLAGS_CC_SECURE 2:2 +#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000 +#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002 +#define NVOS04_FLAGS_MAP_CHANNEL 30:30 +#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001 + +typedef enum { + /*! + * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by + * kernel CPU-RM clients. + */ + ERROR_NOTIFIER_TYPE_UNKNOWN = 0, + /*! @brief Error notifier is explicitly not set. + * + * The corresponding hErrorContext or hEccErrorContext must be + * NV01_NULL_OBJECT. + */ + ERROR_NOTIFIER_TYPE_NONE, + /*! @brief Error notifier is a ContextDma */ + ERROR_NOTIFIER_TYPE_CTXDMA, + /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */ + ERROR_NOTIFIER_TYPE_MEMORY +} ErrorNotifierType; + +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY + +#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */ +typedef struct NVA06F_CTRL_BIND_PARAMS { + NvU32 engineType; +} NVA06F_CTRL_BIND_PARAMS; + +#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */ +typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS { + NvBool bEnable; + NvBool bSkipSubmit; +} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS; + +#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U + +typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY { + NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8); + NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 physAttr; + NvU16 bufferId; + NvU8 bInitialize; + NvU8 bNonmapped; +} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY; + +#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS { + NvU32 engineType; + NvHandle hClient; + NvU32 ChID; + NvHandle hChanClient; + NvHandle hObject; + NvHandle hVirtMemory; + NV_DECLARE_ALIGNED(NvU64 virtAddress, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 entryCount; + // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8); +} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS; + +typedef struct rpc_rc_triggered_v17_02 +{ + NvU32 nv2080EngineType; + NvU32 chid; + NvU32 exceptType; + NvU32 scope; + NvU16 partitionAttributionId; +} rpc_rc_triggered_v17_02; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h new file mode 100644 index 000000000000..82c5ec727bb4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_GR_H__ +#define __NVRM_GR_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8 + +#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19 + +typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO { + NvU32 size; + NvU32 alignment; +} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO { + NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT]; +} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS; + +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0 +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x00000019) + +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U + +#include "fifo.h" +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h new file mode 100644 index 000000000000..b6683a5bf870 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h @@ -0,0 +1,825 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_GSP_H__ +#define __NVRM_GSP_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U + +typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES]; + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NV_DECLARE_ALIGNED(NvU64 reserved, 8); + NvU32 performance; + NvBool supportCompressed; + NvBool supportISO; + NvBool bProtected; + NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList; +} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS { + NvU32 numFBRegions; + NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8); +} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS; + +#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23 + +#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL) + +typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS { + NvU32 index; + NvU32 flags; + NvU32 length; + NvU8 data[NV2080_GPU_MAX_GID_LENGTH]; +} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS; + +typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS { + NvU32 gpcMask; +} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS; + +typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS { + NvU32 gpcId; + NvU32 tpcMask; +} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS; + +typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS { + NvU32 gpcId; + NvU32 zcullMask; +} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS; + +typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS { + NvU32 BoardID; + char chipSKU[4]; + char chipSKUMod[2]; + char project[5]; + char projectSKU[5]; + char CDP[6]; + char projectSKUMod[2]; + NvU32 businessCycle; +} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS; + +typedef enum +{ + COMPUTE_BRANDING_TYPE_NONE, + COMPUTE_BRANDING_TYPE_TESLA, +} COMPUTE_BRANDING_TYPE; + +#define MAX_GPC_COUNT 32 + +typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS { + NvU32 totalVFs; + NvU32 firstVfOffset; + NvU32 vfFeatureMask; + NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8); + NV_DECLARE_ALIGNED(NvU64 bar0Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar1Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar2Size, 8); + NvBool b64bitBar0; + NvBool b64bitBar1; + NvBool b64bitBar2; + NvBool bSriovEnabled; + NvBool bSriovHeavyEnabled; + NvBool bEmulateVFBar0TlbInvalidationRegister; + NvBool bClientRmAllocatedCtxBuffer; +} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS; + +#include "engine.h" + +#define NVGPU_ENGINE_CAPS_MASK_BITS 32 + +#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1) + +typedef struct GspSMInfo_t +{ + NvU32 version; + NvU32 regBankCount; + NvU32 regBankRegCount; + NvU32 maxWarpsPerSM; + NvU32 maxThreadsPerWarp; + NvU32 geomGsObufEntries; + NvU32 geomXbufEntries; + NvU32 maxSPPerSM; + NvU32 rtCoreCount; +} GspSMInfo; + +typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS { + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8, +} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS; + +#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U) + +typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS +{ + NvU32 numHeads; + NvU32 maxNumHeads; +} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS; + +typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS +{ + NvU32 headIndex; + NvU32 maxHResolution; + NvU32 maxVResolution; +} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS; + +typedef struct GspStaticConfigInfo_t +{ + NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE]; + NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo; + NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo; + NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT]; + NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT]; + NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo; + NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams; + COMPUTE_BRANDING_TYPE computeBranding; + + NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps; + NvU32 sriovMaxGfid; + + NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX]; + + GspSMInfo SM_info; + + NvBool poisonFuseEnabled; + + NvU64 fb_length; + NvU32 fbio_mask; + NvU32 fb_bus_width; + NvU32 fb_ram_type; + NvU32 fbp_mask; + NvU32 l2_cache_size; + + NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL]; + NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL]; + + NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvBool bGpuInternalSku; + NvBool bIsQuadroGeneric; + NvBool bIsQuadroAd; + NvBool bIsNvidiaNvs; + NvBool bIsVgx; + NvBool bGeforceSmb; + NvBool bIsTitan; + NvBool bIsTesla; + NvBool bIsMobile; + NvBool bIsGc6Rtd3Allowed; + NvBool bIsGcOffRtd3Allowed; + NvBool bIsGcoffLegacyAllowed; + + NvU64 bar1PdeBase; + NvU64 bar2PdeBase; + + NvBool bVbiosValid; + NvU32 vbiosSubVendor; + NvU32 vbiosSubDevice; + + NvBool bPageRetirementSupported; + + NvBool bSplitVasBetweenServerClientRm; + + NvBool bClRootportNeedsNosnoopWAR; + + VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads; + VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution; + NvU64 displaylessMaxPixels; + + // Client handle for internal RMAPI control. + NvHandle hInternalClient; + + // Device handle for internal RMAPI control. + NvHandle hInternalDevice; + + // Subdevice handle for internal RMAPI control. + NvHandle hInternalSubdevice; + + NvBool bSelfHostedMode; + NvBool bAtsSupported; + + NvBool bIsGpuUefi; +} GspStaticConfigInfo; + +typedef struct rpc_unloading_guest_driver_v1F_07 +{ + NvBool bInPMTransition; + NvBool bGc6Entering; + NvU32 newLevel; +} rpc_unloading_guest_driver_v1F_07; + +typedef struct PACKED_REGISTRY_ENTRY +{ + NvU32 nameOffset; + NvU8 type; + NvU32 data; + NvU32 length; +} PACKED_REGISTRY_ENTRY; + +typedef struct PACKED_REGISTRY_TABLE +{ + NvU32 size; + NvU32 numEntries; + PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries); +} PACKED_REGISTRY_TABLE; + +typedef struct +{ + NvU16 deviceID; // deviceID + NvU16 vendorID; // vendorID + NvU16 subdeviceID; // subsystem deviceID + NvU16 subvendorID; // subsystem vendorID + NvU8 revisionID; // revision ID +} BUSINFO; + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U) + +typedef struct DOD_METHOD_DATA +{ + NV_STATUS status; + NvU32 acpiIdListLen; + NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; +} DOD_METHOD_DATA; + +typedef struct JT_METHOD_DATA +{ + NV_STATUS status; + NvU32 jtCaps; + NvU16 jtRevId; + NvBool bSBIOSCaps; +} JT_METHOD_DATA; + +typedef struct MUX_METHOD_DATA_ELEMENT +{ + NvU32 acpiId; + NvU32 mode; + NV_STATUS status; +} MUX_METHOD_DATA_ELEMENT; + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U) + +typedef struct MUX_METHOD_DATA +{ + NvU32 tableLen; + MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; + MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; +} MUX_METHOD_DATA; + +typedef struct CAPS_METHOD_DATA +{ + NV_STATUS status; + NvU32 optimusCaps; +} CAPS_METHOD_DATA; + +typedef struct ACPI_METHOD_DATA +{ + NvBool bValid; + DOD_METHOD_DATA dodMethodData; + JT_METHOD_DATA jtMethodData; + MUX_METHOD_DATA muxMethodData; + CAPS_METHOD_DATA capsMethodData; +} ACPI_METHOD_DATA; + +typedef struct GSP_VF_INFO +{ + NvU32 totalVFs; + NvU32 firstVFOffset; + NvU64 FirstVFBar0Address; + NvU64 FirstVFBar1Address; + NvU64 FirstVFBar2Address; + NvBool b64bitBar0; + NvBool b64bitBar1; + NvBool b64bitBar2; +} GSP_VF_INFO; + +typedef struct GspSystemInfo +{ + NvU64 gpuPhysAddr; + NvU64 gpuPhysFbAddr; + NvU64 gpuPhysInstAddr; + NvU64 nvDomainBusDeviceFunc; + NvU64 simAccessBufPhysAddr; + NvU64 pcieAtomicsOpMask; + NvU64 consoleMemSize; + NvU64 maxUserVa; + NvU32 pciConfigMirrorBase; + NvU32 pciConfigMirrorSize; + NvU8 oorArch; + NvU64 clPdbProperties; + NvU32 Chipset; + NvBool bGpuBehindBridge; + NvBool bMnocAvailable; + NvBool bUpstreamL0sUnsupported; + NvBool bUpstreamL1Unsupported; + NvBool bUpstreamL1PorSupported; + NvBool bUpstreamL1PorMobileOnly; + NvU8 upstreamAddressValid; + BUSINFO FHBBusInfo; + BUSINFO chipsetIDInfo; + ACPI_METHOD_DATA acpiMethodData; + NvU32 hypervisorType; + NvBool bIsPassthru; + NvU64 sysTimerOffsetNs; + GSP_VF_INFO gspVFInfo; +} GspSystemInfo; + +typedef struct rpc_os_error_log_v17_00 +{ + NvU32 exceptType; + NvU32 runlistId; + NvU32 chid; + char errString[0x100]; +} rpc_os_error_log_v17_00; + +typedef struct rpc_run_cpu_sequencer_v17_00 +{ + NvU32 bufferSizeDWord; + NvU32 cmdIndex; + NvU32 regSaveArea[8]; + NvU32 commandBuffer[]; +} rpc_run_cpu_sequencer_v17_00; + +typedef enum GSP_SEQ_BUF_OPCODE +{ + GSP_SEQ_BUF_OPCODE_REG_WRITE = 0, + GSP_SEQ_BUF_OPCODE_REG_MODIFY, + GSP_SEQ_BUF_OPCODE_REG_POLL, + GSP_SEQ_BUF_OPCODE_DELAY_US, + GSP_SEQ_BUF_OPCODE_REG_STORE, + GSP_SEQ_BUF_OPCODE_CORE_RESET, + GSP_SEQ_BUF_OPCODE_CORE_START, + GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT, + GSP_SEQ_BUF_OPCODE_CORE_RESUME, +} GSP_SEQ_BUF_OPCODE; + +typedef struct +{ + NvU32 addr; + NvU32 val; +} GSP_SEQ_BUF_PAYLOAD_REG_WRITE; + +typedef struct +{ + NvU32 addr; + NvU32 mask; + NvU32 val; +} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY; + +typedef struct +{ + NvU32 addr; + NvU32 mask; + NvU32 val; + NvU32 timeout; + NvU32 error; +} GSP_SEQ_BUF_PAYLOAD_REG_POLL; + +typedef struct +{ + NvU32 val; +} GSP_SEQ_BUF_PAYLOAD_DELAY_US; + +typedef struct +{ + NvU32 addr; + NvU32 index; +} GSP_SEQ_BUF_PAYLOAD_REG_STORE; + +typedef struct GSP_SEQUENCER_BUFFER_CMD +{ + GSP_SEQ_BUF_OPCODE opCode; + union + { + GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite; + GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify; + GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll; + GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs; + GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore; + } payload; +} GSP_SEQUENCER_BUFFER_CMD; + +typedef struct +{ + // Magic + // BL to use for verification (i.e. Booter locked it in WPR2) + NvU64 magic; // = 0xdc3aae21371a60b3; + + // Revision number of Booter-BL-Sequencer handoff interface + // Bumped up when we change this interface so it is not backward compatible. + // Bumped up when we revoke GSP-RM ucode + NvU64 revision; // = 1; + + // ---- Members regarding data in SYSMEM ---------------------------- + // Consumed by Booter for DMA + + NvU64 sysmemAddrOfRadix3Elf; + NvU64 sizeOfRadix3Elf; + + NvU64 sysmemAddrOfBootloader; + NvU64 sizeOfBootloader; + + // Offsets inside bootloader image needed by Booter + NvU64 bootloaderCodeOffset; + NvU64 bootloaderDataOffset; + NvU64 bootloaderManifestOffset; + + union + { + // Used only at initial boot + struct + { + NvU64 sysmemAddrOfSignature; + NvU64 sizeOfSignature; + }; + + // + // Used at suspend/resume to read GspFwHeapFreeList + // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart) + // + struct + { + NvU32 gspFwHeapFreeListWprOffset; + NvU32 unused0; + NvU64 unused1; + }; + }; + + // ---- Members describing FB layout -------------------------------- + NvU64 gspFwRsvdStart; + + NvU64 nonWprHeapOffset; + NvU64 nonWprHeapSize; + + NvU64 gspFwWprStart; + + // GSP-RM to use to setup heap. + NvU64 gspFwHeapOffset; + NvU64 gspFwHeapSize; + + // BL to use to find ELF for jump + NvU64 gspFwOffset; + // Size is sizeOfRadix3Elf above. + + NvU64 bootBinOffset; + // Size is sizeOfBootloader above. + + NvU64 frtsOffset; + NvU64 frtsSize; + + NvU64 gspFwWprEnd; + + // GSP-RM to use for fbRegionInfo? + NvU64 fbSize; + + // ---- Other members ----------------------------------------------- + + // GSP-RM to use for fbRegionInfo? + NvU64 vgaWorkspaceOffset; + NvU64 vgaWorkspaceSize; + + // Boot count. Used to determine whether to load the firmware image. + NvU64 bootCount; + + // This union is organized the way it is to start at an 8-byte boundary and achieve natural + // packing of the internal struct fields. + union + { + struct + { + // TODO: the partitionRpc* fields below do not really belong in this + // structure. The values are patched in by the partition bootstrapper + // when GSP-RM is booted in a partition, and this structure was a + // convenient place for the bootstrapper to access them. These should + // be moved to a different comm. mechanism between the bootstrapper + // and the GSP-RM tasks. + + // Shared partition RPC memory (physical address) + NvU64 partitionRpcAddr; + + // Offsets relative to partitionRpcAddr + NvU16 partitionRpcRequestOffset; + NvU16 partitionRpcReplyOffset; + + // Code section and dataSection offset and size. + NvU32 elfCodeOffset; + NvU32 elfDataOffset; + NvU32 elfCodeSize; + NvU32 elfDataSize; + + // Used during GSP-RM resume to check for revocation + NvU32 lsUcodeVersion; + }; + + struct + { + // Pad for the partitionRpc* fields, plus 4 bytes + NvU32 partitionRpcPadding[4]; + + // CrashCat (contiguous) buffer size/location - occupies same bytes as the + // elf(Code|Data)(Offset|Size) fields above. + // TODO: move to GSP_FMC_INIT_PARAMS + NvU64 sysmemAddrOfCrashReportQueue; + NvU32 sizeOfCrashReportQueue; + + // Pad for the lsUcodeVersion field + NvU32 lsUcodeVersionPadding[1]; + }; + }; + + // Number of VF partitions allocating sub-heaps from the WPR heap + // Used during boot to ensure the heap is adequately sized + NvU8 gspFwHeapVfPartitionCount; + + // Pad structure to exactly 256 bytes. Can replace padding with additional + // fields without incrementing revision. Padding initialized to 0. + NvU8 padding[7]; + + // BL to use for verification (i.e. Booter says OK to boot) + NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified +} GspFwWprMeta; + +#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL + +#define GSP_FW_WPR_META_REVISION 1 + +typedef struct +{ + NvU32 version; // queue version + NvU32 size; // bytes, page aligned + NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum + NvU32 msgCount; // number of entries in queue + NvU32 writePtr; // message id of next slot + NvU32 flags; // if set it means "i want to swap RX" + NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store. + NvU32 entryOff; // Offset of entries from start of backing store. +} msgqTxHeader; + +typedef struct +{ + NvU32 readPtr; // message id of last message read +} msgqRxHeader; + +typedef struct { + RmPhysAddr sharedMemPhysAddr; + NvU32 pageTableEntryCount; + NvLength cmdQueueOffset; + NvLength statQueueOffset; + NvLength locklessCmdQueueOffset; + NvLength locklessStatQueueOffset; +} MESSAGE_QUEUE_INIT_ARGUMENTS; + +typedef struct { + NvU32 oldLevel; + NvU32 flags; + NvBool bInPMTransition; +} GSP_SR_INIT_ARGUMENTS; + +typedef struct +{ + MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments; + GSP_SR_INIT_ARGUMENTS srInitArguments; + NvU32 gpuInstance; + + struct + { + NvU64 pa; + NvU64 size; + } profilerArgs; +} GSP_ARGUMENTS_CACHED; + +#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U) + +#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U) + +typedef NvU64 LibosAddress; + +typedef struct +{ + LibosAddress id8; // Id tag. + LibosAddress pa; // Physical address. + LibosAddress size; // Size of memory area. + NvU8 kind; // See LibosMemoryRegionKind above. + NvU8 loc; // See LibosMemoryRegionLoc above. +} LibosMemoryRegionInitArgument; + +typedef enum { + LIBOS_MEMORY_REGION_NONE, + LIBOS_MEMORY_REGION_CONTIGUOUS, + LIBOS_MEMORY_REGION_RADIX3 +} LibosMemoryRegionKind; + +typedef enum { + LIBOS_MEMORY_REGION_LOC_NONE, + LIBOS_MEMORY_REGION_LOC_SYSMEM, + LIBOS_MEMORY_REGION_LOC_FB +} LibosMemoryRegionLoc; + +typedef struct +{ + // + // Magic + // Use for verification by Booter + // + NvU64 magic; // = GSP_FW_SR_META_MAGIC; + + // + // Revision number + // Bumped up when we change this interface so it is not backward compatible. + // Bumped up when we revoke GSP-RM ucode + // + NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION; + + // + // ---- Members regarding data in SYSMEM ---------------------------- + // Consumed by Booter for DMA + // + NvU64 sysmemAddrOfSuspendResumeData; + NvU64 sizeOfSuspendResumeData; + + // ---- Members for crypto ops across S/R --------------------------- + + // + // HMAC over the entire GspFwSRMeta structure (including padding) + // with the hmac field itself zeroed. + // + NvU8 hmac[32]; + + // Hash over GspFwWprMeta structure + NvU8 wprMetaHash[32]; + + // Hash over GspFwHeapFreeList structure. All zeros signifies no free list. + NvU8 heapFreeListHash[32]; + + // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details) + NvU8 dataHash[32]; + + // + // Pad structure to exactly 256 bytes (1 DMA chunk). + // Padding initialized to zero. + // + NvU32 padding[24]; + +} GspFwSRMeta; + +#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL + +#define GSP_FW_SR_META_REVISION 2 + +#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \ + ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \ + (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \ + (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \ + (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \ + (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \ + /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \ + /* GSP_SEQ_BUF_OPCODE_CORE_START */ \ + /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \ + /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \ + 0) + +typedef struct { + // + // Version 1 + // Version 2 + // Version 3 = for Partition boot + // Version 4 = for eb riscv boot + // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later. + // + NvU32 version; // structure version + NvU32 bootloaderOffset; + NvU32 bootloaderSize; + NvU32 bootloaderParamOffset; + NvU32 bootloaderParamSize; + NvU32 riscvElfOffset; + NvU32 riscvElfSize; + NvU32 appVersion; // Changelist number associated with the image + // + // Manifest contains information about Monitor and it is + // input to BR + // + NvU32 manifestOffset; + NvU32 manifestSize; + // + // Monitor Data offset within RISCV image and size + // + NvU32 monitorDataOffset; + NvU32 monitorDataSize; + // + // Monitor Code offset withtin RISCV image and size + // + NvU32 monitorCodeOffset; + NvU32 monitorCodeSize; + NvU32 bIsMonitorEnabled; + // + // Swbrom Code offset within RISCV image and size + // + NvU32 swbromCodeOffset; + NvU32 swbromCodeSize; + // + // Swbrom Data offset within RISCV image and size + // + NvU32 swbromDataOffset; + NvU32 swbromDataSize; + // + // Total size of FB carveout (image and reserved space). + // + NvU32 fbReservedSize; + // + // Indicates whether the entire RISC-V image is signed as "code" in code section. + // + NvU32 bSignedAsCode; +} RM_RISCV_UCODE_DESC; + +typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY { + NvU16 engineIdx; + NvU32 pmcIntrMask; + NvU32 vectorStall; + NvU32 vectorNonStall; +} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY; + +typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP { + NvU8 subtreeStart; + NvU8 subtreeEnd; +} NV2080_INTR_CATEGORY_SUBTREE_MAP; + +#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128 + +typedef enum NV2080_INTR_CATEGORY { + NV2080_INTR_CATEGORY_DEFAULT = 0, + NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1, + NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2, + NV2080_INTR_CATEGORY_RUNLIST = 3, + NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4, + NV2080_INTR_CATEGORY_UVM_OWNED = 5, + NV2080_INTR_CATEGORY_UVM_SHARED = 6, + NV2080_INTR_CATEGORY_ENUM_COUNT = 7, +} NV2080_INTR_CATEGORY; + +#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS { + NvU32 tableLen; + NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE]; + NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT]; +} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS; + +#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures + +#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels + +typedef union rpc_message_rpc_union_field_v03_00 +{ + NvU32 spare; + NvU32 cpuRmGfid; +} rpc_message_rpc_union_field_v03_00; + +typedef rpc_message_rpc_union_field_v03_00 rpc_message_rpc_union_field_v; + +typedef struct rpc_message_header_v03_00 +{ + NvU32 header_version; + NvU32 signature; + NvU32 length; + NvU32 function; + NvU32 rpc_result; + NvU32 rpc_result_private; + NvU32 sequence; + rpc_message_rpc_union_field_v u; + rpc_generic_union rpc_message_data[]; +} rpc_message_header_v03_00; + +typedef rpc_message_header_v03_00 rpc_message_header_v; + +typedef struct GSP_MSG_QUEUE_ELEMENT +{ + NvU8 authTagBuffer[16]; // Authentication tag buffer. + NvU8 aadBuffer[16]; // AAD buffer. + NvU32 checkSum; // Set to value needed to make checksum always zero. + NvU32 seqNum; // Sequence number maintained by the message queue. + NvU32 elemCount; // Number of message queue elements this message has. + NV_DECLARE_ALIGNED(rpc_message_header_v rpc, 8); +} GSP_MSG_QUEUE_ELEMENT; + +#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage +#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3 (20 << 20) + +#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada + +#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u) +#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB (84u) +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h new file mode 100644 index 000000000000..642c13aec325 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_MSGFN_H__ +#define __NVRM_MSGFN_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#ifndef E +# define E(RPC) NV_VGPU_MSG_EVENT_##RPC, +# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + E(FIRST_EVENT = 0x1000) // 0x1000 + E(GSP_INIT_DONE) // 0x1001 + E(GSP_RUN_CPU_SEQUENCER) // 0x1002 + E(POST_EVENT) // 0x1003 + E(RC_TRIGGERED) // 0x1004 + E(MMU_FAULT_QUEUED) // 0x1005 + E(OS_ERROR_LOG) // 0x1006 + E(RG_LINE_INTR) // 0x1007 + E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008 + E(SIM_READ) // 0x1009 + E(SIM_WRITE) // 0x100a + E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b + E(UCODE_LIBOS_PRINT) // 0x100c + E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d + E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e + E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f + E(VGPU_CONFIG) // 0x1010 + E(DISPLAY_MODESET) // 0x1011 + E(EXTDEV_INTR_SERVICE) // 0x1012 + E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013 + E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014 + E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015 + E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016 + E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017 + E(TIMED_SEMAPHORE_RELEASE) // 0x1018 + E(NVLINK_IS_GPU_DEGRADED) // 0x1019 + E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a + E(GSP_SEND_USER_SHARED_DATA) // 0x101b + E(NVLINK_FAULT_UP) // 0x101c + E(GSP_LOCKDOWN_NOTICE) // 0x101d + E(MIG_CI_CONFIG_UPDATE) // 0x101e + E(NUM_EVENTS) // END +#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +}; +# undef E +# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +#endif +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h new file mode 100644 index 000000000000..3a04e702677f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_NVDEC_H__ +#define __NVRM_NVDEC_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; + NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2 +} NV_BSP_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h new file mode 100644 index 000000000000..203c1d5304d9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_NVENC_H__ +#define __NVRM_NVENC_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC? + NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2 +} NV_MSENC_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h new file mode 100644 index 000000000000..71fc53889ec7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_NVJPG_H__ +#define __NVRM_NVJPG_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG? + NvU32 engineInstance; +} NV_NVJPG_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h new file mode 100644 index 000000000000..49d81c7673d2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_OFA_H__ +#define __NVRM_OFA_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA? +} NV_OFA_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h new file mode 100644 index 000000000000..2a037acc6b1e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_RPCFN_H__ +#define __NVRM_RPCFN_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#ifndef X +# define X(UNIT, RPC) NV_VGPU_MSG_FUNCTION_##RPC, +# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + X(RM, NOP) // 0 + X(RM, SET_GUEST_SYSTEM_INFO) // 1 + X(RM, ALLOC_ROOT) // 2 + X(RM, ALLOC_DEVICE) // 3 deprecated + X(RM, ALLOC_MEMORY) // 4 + X(RM, ALLOC_CTX_DMA) // 5 + X(RM, ALLOC_CHANNEL_DMA) // 6 + X(RM, MAP_MEMORY) // 7 + X(RM, BIND_CTX_DMA) // 8 deprecated + X(RM, ALLOC_OBJECT) // 9 + X(RM, FREE) //10 + X(RM, LOG) //11 + X(RM, ALLOC_VIDMEM) //12 + X(RM, UNMAP_MEMORY) //13 + X(RM, MAP_MEMORY_DMA) //14 + X(RM, UNMAP_MEMORY_DMA) //15 + X(RM, GET_EDID) //16 + X(RM, ALLOC_DISP_CHANNEL) //17 + X(RM, ALLOC_DISP_OBJECT) //18 + X(RM, ALLOC_SUBDEVICE) //19 + X(RM, ALLOC_DYNAMIC_MEMORY) //20 + X(RM, DUP_OBJECT) //21 + X(RM, IDLE_CHANNELS) //22 + X(RM, ALLOC_EVENT) //23 + X(RM, SEND_EVENT) //24 + X(RM, REMAPPER_CONTROL) //25 deprecated + X(RM, DMA_CONTROL) //26 + X(RM, DMA_FILL_PTE_MEM) //27 + X(RM, MANAGE_HW_RESOURCE) //28 + X(RM, BIND_ARBITRARY_CTX_DMA) //29 deprecated + X(RM, CREATE_FB_SEGMENT) //30 + X(RM, DESTROY_FB_SEGMENT) //31 + X(RM, ALLOC_SHARE_DEVICE) //32 + X(RM, DEFERRED_API_CONTROL) //33 + X(RM, REMOVE_DEFERRED_API) //34 + X(RM, SIM_ESCAPE_READ) //35 + X(RM, SIM_ESCAPE_WRITE) //36 + X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA) //37 + X(RM, FREE_VIDMEM_VIRT) //38 + X(RM, PERF_GET_PSTATE_INFO) //39 deprecated for vGPU, used by GSP + X(RM, PERF_GET_PERFMON_SAMPLE) //40 + X(RM, PERF_GET_VIRTUAL_PSTATE_INFO) //41 deprecated + X(RM, PERF_GET_LEVEL_INFO) //42 + X(RM, MAP_SEMA_MEMORY) //43 + X(RM, UNMAP_SEMA_MEMORY) //44 + X(RM, SET_SURFACE_PROPERTIES) //45 + X(RM, CLEANUP_SURFACE) //46 + X(RM, UNLOADING_GUEST_DRIVER) //47 + X(RM, TDR_SET_TIMEOUT_STATE) //48 + X(RM, SWITCH_TO_VGA) //49 + X(RM, GPU_EXEC_REG_OPS) //50 + X(RM, GET_STATIC_INFO) //51 + X(RM, ALLOC_VIRTMEM) //52 + X(RM, UPDATE_PDE_2) //53 + X(RM, SET_PAGE_DIRECTORY) //54 + X(RM, GET_STATIC_PSTATE_INFO) //55 + X(RM, TRANSLATE_GUEST_GPU_PTES) //56 + X(RM, RESERVED_57) //57 + X(RM, RESET_CURRENT_GR_CONTEXT) //58 + X(RM, SET_SEMA_MEM_VALIDATION_STATE) //59 + X(RM, GET_ENGINE_UTILIZATION) //60 + X(RM, UPDATE_GPU_PDES) //61 + X(RM, GET_ENCODER_CAPACITY) //62 + X(RM, VGPU_PF_REG_READ32) //63 + X(RM, SET_GUEST_SYSTEM_INFO_EXT) //64 + X(GSP, GET_GSP_STATIC_INFO) //65 + X(RM, RMFS_INIT) //66 + X(RM, RMFS_CLOSE_QUEUE) //67 + X(RM, RMFS_CLEANUP) //68 + X(RM, RMFS_TEST) //69 + X(RM, UPDATE_BAR_PDE) //70 + X(RM, CONTINUATION_RECORD) //71 + X(RM, GSP_SET_SYSTEM_INFO) //72 + X(RM, SET_REGISTRY) //73 + X(GSP, GSP_INIT_POST_OBJGPU) //74 deprecated + X(RM, SUBDEV_EVENT_SET_NOTIFICATION) //75 deprecated + X(GSP, GSP_RM_CONTROL) //76 + X(RM, GET_STATIC_INFO2) //77 + X(RM, DUMP_PROTOBUF_COMPONENT) //78 + X(RM, UNSET_PAGE_DIRECTORY) //79 + X(RM, GET_CONSOLIDATED_STATIC_INFO) //80 + X(RM, GMMU_REGISTER_FAULT_BUFFER) //81 deprecated + X(RM, GMMU_UNREGISTER_FAULT_BUFFER) //82 deprecated + X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER) //83 deprecated + X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER) //84 deprecated + X(RM, CTRL_SET_VGPU_FB_USAGE) //85 + X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO) //86 + X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO) //87 + X(RM, CTRL_RESET_CHANNEL) //88 + X(RM, CTRL_RESET_ISOLATED_CHANNEL) //89 + X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT) //90 + X(RM, CTRL_CLK_GET_EXTENDED_INFO) //91 + X(RM, CTRL_PERF_BOOST) //92 + X(RM, CTRL_PERF_VPSTATES_GET_CONTROL) //93 + X(RM, CTRL_GET_ZBC_CLEAR_TABLE) //94 + X(RM, CTRL_SET_ZBC_COLOR_CLEAR) //95 + X(RM, CTRL_SET_ZBC_DEPTH_CLEAR) //96 + X(RM, CTRL_GPFIFO_SCHEDULE) //97 + X(RM, CTRL_SET_TIMESLICE) //98 + X(RM, CTRL_PREEMPT) //99 + X(RM, CTRL_FIFO_DISABLE_CHANNELS) //100 + X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL) //101 + X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL) //102 + X(GSP, GSP_RM_ALLOC) //103 + X(RM, CTRL_GET_P2P_CAPS_V2) //104 + X(RM, CTRL_CIPHER_AES_ENCRYPT) //105 + X(RM, CTRL_CIPHER_SESSION_KEY) //106 + X(RM, CTRL_CIPHER_SESSION_KEY_STATUS) //107 + X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES) //108 + X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES) //109 + X(RM, CTRL_DBG_SET_EXCEPTION_MASK) //110 + X(RM, CTRL_GPU_PROMOTE_CTX) //111 + X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND) //112 + X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE) //113 + X(RM, CTRL_GR_CTXSW_ZCULL_BIND) //114 + X(RM, CTRL_GPU_INITIALIZE_CTX) //115 + X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES) //116 + X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT) //117 + X(RM, CTRL_GET_LATEST_ECC_ADDRESSES) //118 + X(RM, CTRL_MC_SERVICE_INTERRUPTS) //119 + X(RM, CTRL_DMA_SET_DEFAULT_VASPACE) //120 + X(RM, CTRL_GET_CE_PCE_MASK) //121 + X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY) //122 + X(RM, CTRL_GET_NVLINK_PEER_ID_MASK) //123 + X(RM, CTRL_GET_NVLINK_STATUS) //124 + X(RM, CTRL_GET_P2P_CAPS) //125 + X(RM, CTRL_GET_P2P_CAPS_MATRIX) //126 + X(RM, RESERVED_0) //127 + X(RM, CTRL_RESERVE_PM_AREA_SMPC) //128 + X(RM, CTRL_RESERVE_HWPM_LEGACY) //129 + X(RM, CTRL_B0CC_EXEC_REG_OPS) //130 + X(RM, CTRL_BIND_PM_RESOURCES) //131 + X(RM, CTRL_DBG_SUSPEND_CONTEXT) //132 + X(RM, CTRL_DBG_RESUME_CONTEXT) //133 + X(RM, CTRL_DBG_EXEC_REG_OPS) //134 + X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG) //135 + X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE) //136 + X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE) //137 + X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG) //138 + X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE) //139 + X(RM, CTRL_ALLOC_PMA_STREAM) //140 + X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT) //141 + X(RM, CTRL_FB_GET_INFO_V2) //142 + X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES) //143 + X(RM, CTRL_GR_GET_CTX_BUFFER_INFO) //144 + X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES) //145 + X(RM, CTRL_GPU_EVICT_CTX) //146 + X(RM, CTRL_FB_GET_FS_INFO) //147 + X(RM, CTRL_GRMGR_GET_GR_FS_INFO) //148 + X(RM, CTRL_STOP_CHANNEL) //149 + X(RM, CTRL_GR_PC_SAMPLING_MODE) //150 + X(RM, CTRL_PERF_RATED_TDP_GET_STATUS) //151 + X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL) //152 + X(RM, CTRL_FREE_PMA_STREAM) //153 + X(RM, CTRL_TIMER_SET_GR_TICK_FREQ) //154 + X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB) //155 + X(RM, GET_CONSOLIDATED_GR_STATIC_INFO) //156 + X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP) //157 + X(RM, CTRL_GR_GET_TPC_PARTITION_MODE) //158 + X(RM, CTRL_GR_SET_TPC_PARTITION_MODE) //159 + X(UVM, UVM_PAGING_CHANNEL_ALLOCATE) //160 + X(UVM, UVM_PAGING_CHANNEL_DESTROY) //161 + X(UVM, UVM_PAGING_CHANNEL_MAP) //162 + X(UVM, UVM_PAGING_CHANNEL_UNMAP) //163 + X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM) //164 + X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES) //165 + X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION) //166 + X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL) //167 + X(RM, DCE_RM_INIT) //168 + X(RM, REGISTER_VIRTUAL_EVENT_BUFFER) //169 + X(RM, CTRL_EVENT_BUFFER_UPDATE_GET) //170 + X(RM, GET_PLCABLE_ADDRESS_KIND) //171 + X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2) //172 + X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM) //173 + X(RM, CTRL_GET_MMU_DEBUG_MODE) //174 + X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS) //175 + X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE) //176 + X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO) //177 + X(RM, DISABLE_CHANNELS) //178 + X(RM, CTRL_FABRIC_MEMORY_DESCRIBE) //179 + X(RM, CTRL_FABRIC_MEM_STATS) //180 + X(RM, SAVE_HIBERNATION_DATA) //181 + X(RM, RESTORE_HIBERNATION_DATA) //182 + X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED) //183 + X(RM, CTRL_EXEC_PARTITIONS_CREATE) //184 + X(RM, CTRL_EXEC_PARTITIONS_DELETE) //185 + X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN) //186 + X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX) //187 + X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION) //188 + X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK) //189 + X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER) //190 + X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS) // 191 + X(RM, CTRL_BUS_SET_P2P_MAPPING) // 192 + X(RM, CTRL_BUS_UNSET_P2P_MAPPING) // 193 + X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK) // 194 + X(RM, CTRL_GPU_MIGRATABLE_OPS) // 195 + X(RM, CTRL_GET_TOTAL_HS_CREDITS) // 196 + X(RM, CTRL_GET_HS_CREDITS) // 197 + X(RM, CTRL_SET_HS_CREDITS) // 198 + X(RM, CTRL_PM_AREA_PC_SAMPLER) // 199 + X(RM, INVALIDATE_TLB) // 200 + X(RM, RESERVED_201) // 201 + X(RM, ECC_NOTIFIER_WRITE_ACK) // 202 + X(RM, NUM_FUNCTIONS) //END +#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +}; +# undef X +# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +#endif +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h new file mode 100644 index 000000000000..f6ec04efd119 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_VMM_H__ +#define __NVRM_VMM_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define FERMI_VASPACE_A (0x000090f1) + +typedef struct +{ + NvU32 index; + NvV32 flags; + NvU64 vaSize NV_ALIGN_BYTES(8); + NvU64 vaStartInternal NV_ALIGN_BYTES(8); + NvU64 vaLimitInternal NV_ALIGN_BYTES(8); + NvU32 bigPageSize; + NvU64 vaBase NV_ALIGN_BYTES(8); +} NV_VASPACE_ALLOCATION_PARAMETERS; + +#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 //<! Create new VASpace, by default + +#define NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED BIT(3) + +#define SPLIT_VAS_SERVER_RM_MANAGED_VA_START 0x100000000ULL // 4GB +#define SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE 0x20000000ULL // 512MB + +#define GMMU_FMT_MAX_LEVELS 6U + +#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */ +typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS { + /*! + * [in] GPU sub-device handle - this API only supports unicast. + * Pass 0 to use subDeviceId instead. + */ + NvHandle hSubDevice; + + /*! + * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. + */ + NvU32 subDeviceId; + + /*! + * [in] Page size (VA coverage) of the level to reserve. + * This need not be a leaf (page table) page size - it can be + * the coverage of an arbitrary level (including root page directory). + */ + NV_DECLARE_ALIGNED(NvU64 pageSize, 8); + + /*! + * [in] First GPU virtual address of the range to reserve. + * This must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8); + + /*! + * [in] Last GPU virtual address of the range to reserve. + * This (+1) must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8); + + /*! + * [in] Number of PDE levels to copy. + */ + NvU32 numLevelsToCopy; + + /*! + * [in] Per-level information. + */ + struct { + /*! + * Physical address of this page level instance. + */ + NV_DECLARE_ALIGNED(NvU64 physAddress, 8); + + /*! + * Size in bytes allocated for this level instance. + */ + NV_DECLARE_ALIGNED(NvU64 size, 8); + + /*! + * Aperture in which this page level instance resides. + */ + NvU32 aperture; + + /*! + * Page shift corresponding to the level + */ + NvU8 pageShift; + } levels[GMMU_FMT_MAX_LEVELS]; +} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS; + +#define NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY (0x801813U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ +ID << 8) | NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */ + +typedef struct NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS { + NV_DECLARE_ALIGNED(NvU64 physAddress, 8); + NvU32 numEntries; + NvU32 flags; + NvHandle hVASpace; + NvU32 chId; + NvU32 subDeviceId; // ID+1, 0 for BC + NvU32 pasid; +} NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS; + +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE 1:0 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_VIDMEM (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_COH (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_NONCOH (0x00000002U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES 2:2 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS 3:3 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY 4:4 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE 5:5 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_TRUE (0x00000001U) + +#define NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY (0x801814U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */ + +typedef struct NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS { + NvHandle hVASpace; + NvU32 subDeviceId; // ID+1, 0 for BC +} NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c index 7bfa6240d283..2156808cba4f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c @@ -19,28 +19,26 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include "gf100.h" +#include <rm/engine.h> -#include <subdev/gsp.h> +#include "nvrm/ofa.h" -#include <nvif/class.h> +static int +r535_ofa_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, + struct nvkm_gsp_object *ofa) +{ + NV_OFA_ALLOCATION_PARAMETERS *args; -static const struct gf100_gr_func -ad102_gr = { - .sclass = { - { -1, -1, FERMI_TWOD_A }, - { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, - { -1, -1, ADA_A }, - { -1, -1, ADA_COMPUTE_A }, - {} - } -}; + args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ofa); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); -int -ad102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_gr_new(&ad102_gr, device, type, inst, pgr); + args->size = sizeof(*args); - return -ENODEV; + return nvkm_gsp_rm_alloc_wr(ofa, args); } + +const struct nvkm_rm_api_engine +r535_ofa = { + .alloc = r535_ofa_alloc, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c new file mode 100644 index 000000000000..a4190676e1ad --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include <rm/rm.h> + +#include "nvrm/gsp.h" + +static const struct nvkm_rm_wpr +r535_wpr_libos2 = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB, +}; + +static const struct nvkm_rm_wpr +r535_wpr_libos3 = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, +}; + +static const struct nvkm_rm_api +r535_api = { + .gsp = &r535_gsp, + .rpc = &r535_rpc, + .ctrl = &r535_ctrl, + .alloc = &r535_alloc, + .client = &r535_client, + .device = &r535_device, + .fbsr = &r535_fbsr, + .disp = &r535_disp, + .fifo = &r535_fifo, + .ce = &r535_ce, + .gr = &r535_gr, + .nvdec = &r535_nvdec, + .nvenc = &r535_nvenc, + .nvjpg = &r535_nvjpg, + .ofa = &r535_ofa, +}; + +const struct nvkm_rm_impl +r535_rm_tu102 = { + .wpr = &r535_wpr_libos2, + .api = &r535_api, +}; + +const struct nvkm_rm_impl +r535_rm_ga102 = { + .wpr = &r535_wpr_libos3, + .api = &r535_api, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c new file mode 100644 index 000000000000..9d06ff722fea --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c @@ -0,0 +1,698 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <rm/rpc.h> + +#include "nvrm/rpcfn.h" + +#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE +#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16) + +/** + * DOC: GSP message queue element + * + * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h + * + * The GSP command queue and status queue are message queues for the + * communication between software and GSP. The software submits the GSP + * RPC via the GSP command queue, GSP writes the status of the submitted + * RPC in the status queue. + * + * A GSP message queue element consists of three parts: + * + * - message element header (struct r535_gsp_msg), which mostly maintains + * the metadata for queuing the element. + * + * - RPC message header (struct nvfw_gsp_rpc), which maintains the info + * of the RPC. E.g., the RPC function number. + * + * - The payload, where the RPC message stays. E.g. the params of a + * specific RPC function. Some RPC functions also have their headers + * in the payload. E.g. rm_alloc, rm_control. + * + * The memory layout of a GSP message element can be illustrated below:: + * + * +------------------------+ + * | Message Element Header | + * | (r535_gsp_msg) | + * | | + * | (r535_gsp_msg.data) | + * | | | + * |----------V-------------| + * | GSP RPC Header | + * | (nvfw_gsp_rpc) | + * | | + * | (nvfw_gsp_rpc.data) | + * | | | + * |----------V-------------| + * | Payload | + * | | + * | header(optional) | + * | params | + * +------------------------+ + * + * The max size of a message queue element is 16 pages (including the + * headers). When a GSP message to be sent is larger than 16 pages, the + * message should be split into multiple elements and sent accordingly. + * + * In the bunch of the split elements, the first element has the expected + * function number, while the rest of the elements are sent with the + * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD. + * + * GSP consumes the elements from the cmdq and always writes the result + * back to the msgq. The result is also formed as split elements. + * + * Terminology: + * + * - gsp_msg(msg): GSP message element (element header + GSP RPC header + + * payload) + * - gsp_rpc(rpc): GSP RPC (RPC header + payload) + * - gsp_rpc_buf: buffer for (GSP RPC header + payload) + * - gsp_rpc_len: size of (GSP RPC header + payload) + * - params_size: size of params in the payload + * - payload_size: size of (header if exists + params) in the payload + */ + +struct r535_gsp_msg { + u8 auth_tag_buffer[16]; + u8 aad_buffer[16]; + u32 checksum; + u32 sequence; + u32 elem_count; + u32 pad; + u8 data[]; +}; + +struct nvfw_gsp_rpc { + u32 header_version; + u32 signature; + u32 length; + u32 function; + u32 rpc_result; + u32 rpc_result_private; + u32 sequence; + union { + u32 spare; + u32 cpuRmGfid; + }; + u8 data[]; +}; + +#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data) + +#define to_gsp_hdr(p, header) \ + container_of((void *)p, typeof(*header), data) + +#define to_payload_hdr(p, header) \ + container_of((void *)p, typeof(*header), params) + +int +r535_rpc_status_to_errno(uint32_t rpc_status) +{ + switch (rpc_status) { + case 0x55: /* NV_ERR_NOT_READY */ + case 0x66: /* NV_ERR_TIMEOUT_RETRY */ + return -EBUSY; + case 0x51: /* NV_ERR_NO_MEMORY */ + return -ENOMEM; + default: + return -EINVAL; + } +} + +static int +r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime) +{ + u32 size, rptr = *gsp->msgq.rptr; + int used; + + size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len, + GSP_PAGE_SIZE); + if (WARN_ON(!size || size >= gsp->msgq.cnt)) + return -EINVAL; + + do { + u32 wptr = *gsp->msgq.wptr; + + used = wptr + gsp->msgq.cnt - rptr; + if (used >= gsp->msgq.cnt) + used -= gsp->msgq.cnt; + if (used >= size) + break; + + usleep_range(1, 2); + } while (--(*ptime)); + + if (WARN_ON(!*ptime)) + return -ETIMEDOUT; + + return used; +} + +static struct r535_gsp_msg * +r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp) +{ + u32 rptr = *gsp->msgq.rptr; + + /* Skip the first page, which is the message queue info */ + return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE + + rptr * GSP_PAGE_SIZE); +} + +/** + * DOC: Receive a GSP message queue element + * + * Receiving a GSP message queue element from the message queue consists of + * the following steps: + * + * - Peek the element from the queue: r535_gsp_msgq_peek(). + * Peek the first page of the element to determine the total size of the + * message before allocating the proper memory. + * + * - Allocate memory for the message. + * Once the total size of the message is determined from the GSP message + * queue element, the caller of r535_gsp_msgq_recv() allocates the + * required memory. + * + * - Receive the message: r535_gsp_msgq_recv(). + * Copy the message into the allocated memory. Advance the read pointer. + * If the message is a large GSP message, r535_gsp_msgq_recv() calls + * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts + * until the complete message is received. + * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into + * the return of the large GSP message. + * + * - Free the allocated memory: r535_gsp_msg_done(). + * The user is responsible for freeing the memory allocated for the GSP + * message pages after they have been processed. + */ +static void * +r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) +{ + struct r535_gsp_msg *mqe; + int ret; + + ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries); + if (ret < 0) + return ERR_PTR(ret); + + mqe = r535_gsp_msgq_get_entry(gsp); + + return mqe->data; +} + +struct r535_gsp_msg_info { + int *retries; + u32 gsp_rpc_len; + void *gsp_rpc_buf; + bool continuation; +}; + +static void +r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl); + +static void * +r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp, + struct r535_gsp_msg_info *info) +{ + u8 *buf = info->gsp_rpc_buf; + u32 rptr = *gsp->msgq.rptr; + struct r535_gsp_msg *mqe; + u32 size, expected, len; + int ret; + + expected = info->gsp_rpc_len; + + ret = r535_gsp_msgq_wait(gsp, expected, info->retries); + if (ret < 0) + return ERR_PTR(ret); + + mqe = r535_gsp_msgq_get_entry(gsp); + + if (info->continuation) { + struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data; + + if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) { + nvkm_error(&gsp->subdev, + "Not a continuation of a large RPC\n"); + r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR); + return ERR_PTR(-EIO); + } + } + + size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE); + + len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe); + len = min_t(u32, expected, len); + + if (info->continuation) + memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc), + len - sizeof(struct nvfw_gsp_rpc)); + else + memcpy(buf, mqe->data, len); + + expected -= len; + + if (expected) { + mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000); + memcpy(buf + len, mqe, expected); + } + + rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt; + + mb(); + (*gsp->msgq.rptr) = rptr; + return buf; +} + +static void * +r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) +{ + struct r535_gsp_msg *mqe; + const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe); + struct nvfw_gsp_rpc *rpc; + struct r535_gsp_msg_info info = {0}; + u32 expected = gsp_rpc_len; + void *buf; + + mqe = r535_gsp_msgq_get_entry(gsp); + rpc = (struct nvfw_gsp_rpc *)mqe->data; + + if (WARN_ON(rpc->length > max_rpc_size)) + return NULL; + + buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + info.gsp_rpc_buf = buf; + info.retries = retries; + info.gsp_rpc_len = rpc->length; + + buf = r535_gsp_msgq_recv_one_elem(gsp, &info); + if (IS_ERR(buf)) { + kvfree(info.gsp_rpc_buf); + info.gsp_rpc_buf = NULL; + return buf; + } + + if (expected <= max_rpc_size) + return buf; + + info.gsp_rpc_buf += info.gsp_rpc_len; + expected -= info.gsp_rpc_len; + + while (expected) { + u32 size; + + rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries); + if (IS_ERR_OR_NULL(rpc)) { + kfree(buf); + return rpc; + } + + info.gsp_rpc_len = rpc->length; + info.continuation = true; + + rpc = r535_gsp_msgq_recv_one_elem(gsp, &info); + if (IS_ERR_OR_NULL(rpc)) { + kfree(buf); + return rpc; + } + + size = info.gsp_rpc_len - sizeof(*rpc); + expected -= size; + info.gsp_rpc_buf += size; + } + + rpc = buf; + rpc->length = gsp_rpc_len; + return buf; +} + +static int +r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc) +{ + struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg); + struct r535_gsp_msg *cqe; + u32 gsp_rpc_len = msg->checksum; + u64 *ptr = (void *)msg; + u64 *end; + u64 csum = 0; + int free, time = 1000000; + u32 wptr, size, step, len; + u32 off = 0; + + len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE); + + end = (u64 *)((char *)ptr + len); + msg->pad = 0; + msg->checksum = 0; + msg->sequence = gsp->cmdq.seq++; + msg->elem_count = DIV_ROUND_UP(len, 0x1000); + + while (ptr < end) + csum ^= *ptr++; + + msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum); + + wptr = *gsp->cmdq.wptr; + do { + do { + free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1; + if (free >= gsp->cmdq.cnt) + free -= gsp->cmdq.cnt; + if (free >= 1) + break; + + usleep_range(1, 2); + } while(--time); + + if (WARN_ON(!time)) { + kvfree(msg); + return -ETIMEDOUT; + } + + cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000); + step = min_t(u32, free, (gsp->cmdq.cnt - wptr)); + size = min_t(u32, len, step * GSP_PAGE_SIZE); + + memcpy(cqe, (u8 *)msg + off, size); + + wptr += DIV_ROUND_UP(size, 0x1000); + if (wptr == gsp->cmdq.cnt) + wptr = 0; + + off += size; + len -= size; + } while (len); + + nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr); + wmb(); + (*gsp->cmdq.wptr) = wptr; + mb(); + + nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000); + + kvfree(msg); + return 0; +} + +static void * +r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len) +{ + struct r535_gsp_msg *msg; + u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len; + + size = ALIGN(size, GSP_MSG_MIN_SIZE); + msg = kvzalloc(size, GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + msg->checksum = gsp_rpc_len; + return msg->data; +} + +static void +r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg) +{ + kvfree(msg); +} + +static void +r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl) +{ + if (gsp->subdev.debug >= lvl) { + nvkm_printk__(&gsp->subdev, lvl, info, + "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n", + msg->function, msg->length, msg->length - sizeof(*msg), + msg->rpc_result, msg->rpc_result_private); + print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1, + msg->data, msg->length - sizeof(*msg), true); + } +} + +struct nvfw_gsp_rpc * +r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvfw_gsp_rpc *rpc; + int retries = 4000000, i; + +retry: + rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries); + if (IS_ERR_OR_NULL(rpc)) + return rpc; + + rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries); + if (IS_ERR_OR_NULL(rpc)) + return rpc; + + if (rpc->rpc_result) { + r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR); + r535_gsp_msg_done(gsp, rpc); + return ERR_PTR(-EINVAL); + } + + r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE); + + if (fn && rpc->function == fn) { + if (gsp_rpc_len) { + if (rpc->length < gsp_rpc_len) { + nvkm_error(subdev, "rpc len %d < %d\n", + rpc->length, gsp_rpc_len); + r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR); + r535_gsp_msg_done(gsp, rpc); + return ERR_PTR(-EIO); + } + + return rpc; + } + + r535_gsp_msg_done(gsp, rpc); + return NULL; + } + + for (i = 0; i < gsp->msgq.ntfy_nr; i++) { + struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i]; + + if (ntfy->fn == rpc->function) { + if (ntfy->func) + ntfy->func(ntfy->priv, ntfy->fn, rpc->data, + rpc->length - sizeof(*rpc)); + break; + } + } + + if (i == gsp->msgq.ntfy_nr) + r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN); + + r535_gsp_msg_done(gsp, rpc); + if (fn) + goto retry; + + if (*gsp->msgq.rptr != *gsp->msgq.wptr) + goto retry; + + return NULL; +} + +int +r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv) +{ + int ret = 0; + + mutex_lock(&gsp->msgq.mutex); + if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) { + ret = -ENOSPC; + } else { + gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn; + gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func; + gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv; + gsp->msgq.ntfy_nr++; + } + mutex_unlock(&gsp->msgq.mutex); + return ret; +} + +int +r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn) +{ + void *repv; + + mutex_lock(&gsp->cmdq.mutex); + repv = r535_gsp_msg_recv(gsp, fn, 0); + mutex_unlock(&gsp->cmdq.mutex); + if (IS_ERR(repv)) + return PTR_ERR(repv); + + return 0; +} + +static void * +r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn, + enum nvkm_gsp_rpc_reply_policy policy, + u32 gsp_rpc_len) +{ + struct nvfw_gsp_rpc *reply; + void *repv = NULL; + + switch (policy) { + case NVKM_GSP_RPC_REPLY_NOWAIT: + break; + case NVKM_GSP_RPC_REPLY_RECV: + reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len); + if (!IS_ERR_OR_NULL(reply)) + repv = reply->data; + else + repv = reply; + break; + case NVKM_GSP_RPC_REPLY_POLL: + repv = r535_gsp_msg_recv(gsp, fn, 0); + break; + } + + return repv; +} + +static void * +r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, + enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len) +{ + struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc); + u32 fn = rpc->function; + int ret; + + if (gsp->subdev.debug >= NV_DBG_TRACE) { + nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function, + rpc->length, rpc->length - sizeof(*rpc)); + print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1, + rpc->data, rpc->length - sizeof(*rpc), true); + } + + ret = r535_gsp_cmdq_push(gsp, rpc); + if (ret) + return ERR_PTR(ret); + + return r535_gsp_rpc_handle_reply(gsp, fn, policy, gsp_rpc_len); +} + +static void +r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv) +{ + struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data); + + r535_gsp_msg_done(gsp, rpc); +} + +static void * +r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size) +{ + struct nvfw_gsp_rpc *rpc; + + rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size, + sizeof(u64))); + if (IS_ERR(rpc)) + return ERR_CAST(rpc); + + rpc->header_version = 0x03000000; + rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V'; + rpc->function = fn; + rpc->rpc_result = 0xffffffff; + rpc->rpc_result_private = 0xffffffff; + rpc->length = sizeof(*rpc) + payload_size; + return rpc->data; +} + +static void * +r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, + enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len) +{ + struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc); + struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg); + const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg); + const u32 max_payload_size = max_rpc_size - sizeof(*rpc); + u32 payload_size = rpc->length - sizeof(*rpc); + void *repv; + + mutex_lock(&gsp->cmdq.mutex); + if (payload_size > max_payload_size) { + const u32 fn = rpc->function; + u32 remain_payload_size = payload_size; + void *next; + + /* Send initial RPC. */ + next = r535_gsp_rpc_get(gsp, fn, max_payload_size); + if (IS_ERR(next)) { + repv = next; + goto done; + } + + memcpy(next, payload, max_payload_size); + + repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0); + if (IS_ERR(repv)) + goto done; + + payload += max_payload_size; + remain_payload_size -= max_payload_size; + + /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */ + while (remain_payload_size) { + u32 size = min(remain_payload_size, + max_payload_size); + + next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size); + if (IS_ERR(next)) { + repv = next; + goto done; + } + + memcpy(next, payload, size); + + repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0); + if (IS_ERR(repv)) + goto done; + + payload += size; + remain_payload_size -= size; + } + + /* Wait for reply. */ + repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size + + sizeof(*rpc)); + if (!IS_ERR(repv)) + kvfree(msg); + } else { + repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len); + } + +done: + mutex_unlock(&gsp->cmdq.mutex); + return repv; +} + +const struct nvkm_rm_api_rpc +r535_rpc = { + .get = r535_gsp_rpc_get, + .push = r535_gsp_rpc_push, + .done = r535_gsp_rpc_done, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c index d3e95453f25d..f25ea610cd99 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c @@ -19,15 +19,38 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include "vmm.h" +#include <subdev/mmu/vmm.h> -#include <nvrm/nvtypes.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h> -#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> +#include <nvhw/drf.h> +#include "nvrm/vmm.h" -static int -r535_mmu_promote_vmm(struct nvkm_vmm *vmm) +void +r535_mmu_vaspace_del(struct nvkm_vmm *vmm) +{ + if (vmm->rm.external) { + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object, + NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY, + sizeof(*ctrl)); + if (!IS_ERR(ctrl)) { + ctrl->hVASpace = vmm->rm.object.handle; + + WARN_ON(nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl)); + } + + vmm->rm.external = false; + } + + nvkm_gsp_rm_free(&vmm->rm.object); + nvkm_gsp_device_dtor(&vmm->rm.device); + nvkm_gsp_client_dtor(&vmm->rm.client); + + nvkm_vmm_put(vmm, &vmm->rm.rsvd); +} + +int +r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle, bool external) { NV_VASPACE_ALLOCATION_PARAMETERS *args; int ret; @@ -37,58 +60,103 @@ r535_mmu_promote_vmm(struct nvkm_vmm *vmm) if (ret) return ret; - args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A, + args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, handle, FERMI_VASPACE_A, sizeof(*args), &vmm->rm.object); if (IS_ERR(args)) return PTR_ERR(args); args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW; + if (external) + args->flags = NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED; ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args); if (ret) return ret; - { + if (!external) { NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl; + u8 page_shift = 29; /* 512MiB */ + const u64 page_size = BIT_ULL(page_shift); + const struct nvkm_vmm_page *page; + const struct nvkm_vmm_desc *desc; + struct nvkm_vmm_pt *pd = vmm->pd; + + for (page = vmm->func->page; page->shift; page++) { + if (page->shift == page_shift) + break; + } + + if (WARN_ON(!page->shift)) + return -EINVAL; mutex_lock(&vmm->mutex.vmm); - ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000, + ret = nvkm_vmm_get_locked(vmm, true, false, false, page_shift, 32, page_size, &vmm->rm.rsvd); mutex_unlock(&vmm->mutex.vmm); if (ret) return ret; + /* Some parts of RM expect the server-reserved area to be in a specific location. */ + if (WARN_ON(vmm->rm.rsvd->addr != SPLIT_VAS_SERVER_RM_MANAGED_VA_START || + vmm->rm.rsvd->size != SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE)) + return -EINVAL; + ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object, NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES, sizeof(*ctrl)); if (IS_ERR(ctrl)) return PTR_ERR(ctrl); - ctrl->pageSize = 0x20000000; + ctrl->pageSize = page_size; ctrl->virtAddrLo = vmm->rm.rsvd->addr; ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1; - ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2; - ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr; - ctrl->levels[0].size = 0x20; - ctrl->levels[0].aperture = 1; - ctrl->levels[0].pageShift = 0x2f; - ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr; - ctrl->levels[1].size = 0x1000; - ctrl->levels[1].aperture = 1; - ctrl->levels[1].pageShift = 0x26; - if (vmm->pd->pde[0]->pde[0]) { - ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr; - ctrl->levels[2].size = 0x1000; - ctrl->levels[2].aperture = 1; - ctrl->levels[2].pageShift = 0x1d; + + for (desc = page->desc; desc->bits; desc++) { + ctrl->numLevelsToCopy++; + page_shift += desc->bits; + } + desc--; + + for (int i = 0; i < ctrl->numLevelsToCopy; i++, desc--) { + page_shift -= desc->bits; + + ctrl->levels[i].physAddress = pd->pt[0]->addr; + ctrl->levels[i].size = BIT_ULL(desc->bits) * desc->size; + ctrl->levels[i].aperture = 1; + ctrl->levels[i].pageShift = page_shift; + + pd = pd->pde[0]; } ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl); + } else { + NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object, + NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->physAddress = vmm->pd->pt[0]->addr; + ctrl->numEntries = 1 << vmm->func->page[0].desc->bits; + ctrl->flags = NVDEF(NV0080_CTRL_DMA_SET_PAGE_DIRECTORY, FLAGS, APERTURE, VIDMEM); + ctrl->hVASpace = vmm->rm.object.handle; + + ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl); + if (ret == 0) + vmm->rm.external = true; } return ret; } +static int +r535_mmu_promote_vmm(struct nvkm_vmm *vmm) +{ + return r535_mmu_vaspace_new(vmm, NVKM_RM_VASPACE, true); +} + static void r535_mmu_dtor(struct nvkm_mmu *mmu) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild new file mode 100644 index 000000000000..5db0e7009e1f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: MIT +nvkm-y += nvkm/subdev/gsp/rm/r570/rm.o +nvkm-y += nvkm/subdev/gsp/rm/r570/gsp.o +nvkm-y += nvkm/subdev/gsp/rm/r570/client.o +nvkm-y += nvkm/subdev/gsp/rm/r570/fbsr.o +nvkm-y += nvkm/subdev/gsp/rm/r570/disp.o +nvkm-y += nvkm/subdev/gsp/rm/r570/fifo.o +nvkm-y += nvkm/subdev/gsp/rm/r570/gr.o +nvkm-y += nvkm/subdev/gsp/rm/r570/ofa.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c new file mode 100644 index 000000000000..87e6240662ed --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include <rm/rm.h> + +#include "nvrm/client.h" + +static int +r570_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle) +{ + NV0000_ALLOC_PARAMETERS *args; + + args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args), + &client->object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->hClient = client->object.handle; + args->processID = ~0; + + return nvkm_gsp_rm_alloc_wr(&client->object, args); +} + +const struct nvkm_rm_api_client +r570_client = { + .ctor = r570_gsp_client_ctor, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c new file mode 100644 index 000000000000..a96e31c2d80b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include <rm/rm.h> + +#include <engine/disp.h> +#include <engine/disp/outp.h> + +#include "nvhw/drf.h" + +#include "nvrm/disp.h" + +static int +r570_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset, + struct nvkm_gsp_object *dmac) +{ + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args; + + args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass, + sizeof(*args), dmac); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->channelInstance = inst; + args->offset = put_offset; + args->subDeviceId = BIT(0); + + return nvkm_gsp_rm_alloc_wr(dmac, args); +} + +static int +r570_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory) +{ + struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp; + NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + if (memory) { + switch (nvkm_memory_target(memory)) { + case NVKM_MEM_TARGET_NCOH: + ctrl->addressSpace = ADDR_SYSMEM; + ctrl->cacheSnoop = 0; + ctrl->pbTargetAperture = PHYS_PCI; + break; + case NVKM_MEM_TARGET_HOST: + ctrl->addressSpace = ADDR_SYSMEM; + ctrl->cacheSnoop = 1; + ctrl->pbTargetAperture = PHYS_PCI_COHERENT; + break; + case NVKM_MEM_TARGET_VRAM: + ctrl->addressSpace = ADDR_FBMEM; + ctrl->pbTargetAperture = PHYS_NVM; + break; + default: + WARN_ON(1); + return -EINVAL; + } + + ctrl->physicalAddr = nvkm_memory_addr(memory); + ctrl->limit = nvkm_memory_size(memory) - 1; + } + + ctrl->hclass = oclass; + ctrl->channelInstance = inst; + ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0; + ctrl->subDeviceId = BIT(0); + + return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); +} + +static int +r570_dp_set_indexed_link_rates(struct nvkm_outp *outp) +{ + NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + + if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl))) + return -EINVAL; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(outp->index); + for (int i = 0; i < outp->dp.rates; i++) + ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200; + + return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); +} + +static int +r570_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm) +{ + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->sorIndex = ~0; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) { + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62: + *plink_bw = 0x06; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70: + *plink_bw = 0x0a; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40: + *plink_bw = 0x14; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10: + *plink_bw = 0x1e; + break; + default: + *plink_bw = 0x00; + break; + } + + *pmst = ctrl->bIsMultistreamSupported; + *pwm = ctrl->bHasIncreasedWatermarkLimits; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static int +r570_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval) +{ + u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS : + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS; + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(display_id); + ctrl->brightness = *pval; + ctrl->brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) + return ret; + + *pval = ctrl->brightness; + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static int +r570_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid) +{ + NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->head = head; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + *displayid = ctrl->displayId; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} +static int +r570_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id) +{ + NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayMask = BIT(display_id); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret == 0 && (ctrl->displayMask & BIT(display_id))) + ret = 1; + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; +} + +static int +r570_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask) +{ + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + *pmask = ctrl->displayMask; + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static int +r570_disp_get_static_info(struct nvkm_disp *disp) +{ + NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl; + struct nvkm_gsp *gsp = disp->engine.subdev.device->gsp; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + disp->wndw.mask = ctrl->windowPresentMask; + disp->wndw.nr = fls(disp->wndw.mask); + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return 0; +} + +const struct nvkm_rm_api_disp +r570_disp = { + .get_static_info = r570_disp_get_static_info, + .get_supported = r570_disp_get_supported, + .get_connect_state = r570_disp_get_connect_state, + .get_active = r570_disp_get_active, + .bl_ctrl = r570_bl_ctrl, + .dp = { + .get_caps = r570_dp_get_caps, + .set_indexed_link_rates = r570_dp_set_indexed_link_rates, + }, + .chan = { + .set_pushbuf = r570_disp_chan_set_pushbuf, + .dmac_alloc = r570_dmac_alloc, + }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c new file mode 100644 index 000000000000..2945d5b4e570 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include <subdev/instmem/priv.h> +#include <subdev/bar.h> +#include <subdev/gsp.h> +#include <subdev/mmu/vmm.h> + +#include "nvrm/fbsr.h" +#include "nvrm/fifo.h" + +static int +r570_fbsr_suspend_channels(struct nvkm_gsp *gsp, bool suspend) +{ + NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->bDisableActiveChannels = suspend; + + return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); +} + +static void +r570_fbsr_resume(struct nvkm_gsp *gsp) +{ + struct nvkm_device *device = gsp->subdev.device; + struct nvkm_instmem *imem = device->imem; + struct nvkm_instobj *iobj; + struct nvkm_vmm *vmm; + + /* Restore BAR2 page tables via BAR0 window, and re-enable BAR2. */ + list_for_each_entry(iobj, &imem->boot, head) { + if (iobj->suspend) + nvkm_instobj_load(iobj); + } + + device->bar->bar2 = true; + + vmm = nvkm_bar_bar2_vmm(device); + vmm->func->flush(vmm, 0); + + /* Restore remaining BAR2 allocations (including BAR1 page tables) via BAR2. */ + list_for_each_entry(iobj, &imem->list, head) { + if (iobj->suspend) + nvkm_instobj_load(iobj); + } + + vmm = nvkm_bar_bar1_vmm(device); + vmm->func->flush(vmm, 0); + + /* Resume channel scheduling. */ + r570_fbsr_suspend_channels(device->gsp, false); + + /* Finish cleaning up. */ + r535_fbsr_resume(gsp); +} + +static int +r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size) +{ + NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl; + struct nvkm_gsp_object memlist; + int ret; + + ret = r535_fbsr_memlist(&gsp->internal.device, 0xcaf00003, NVKM_MEM_TARGET_HOST, + 0, size, sgt, &memlist); + if (ret) + return ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->hClient = gsp->internal.client.object.handle; + ctrl->hSysMem = memlist.handle; + ctrl->sysmemAddrOfSuspendResumeData = gsp->sr.meta.addr; + ctrl->bEnteringGcoffState = 1; + + ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); + if (ret) + return ret; + + nvkm_gsp_rm_free(&memlist); + return 0; +} + +static int +r570_fbsr_suspend(struct nvkm_gsp *gsp) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_instmem *imem = device->imem; + struct nvkm_instobj *iobj; + u64 size; + int ret; + + /* Stop channel scheduling. */ + r570_fbsr_suspend_channels(gsp, true); + + /* Save BAR2 allocations to system memory. */ + list_for_each_entry(iobj, &imem->list, head) { + if (iobj->preserve) { + ret = nvkm_instobj_save(iobj); + if (ret) + return ret; + } + } + + list_for_each_entry(iobj, &imem->boot, head) { + ret = nvkm_instobj_save(iobj); + if (ret) + return ret; + } + + /* Disable BAR2 access. */ + device->bar->bar2 = false; + + /* Allocate system memory to hold RM's VRAM allocations across suspend. */ + size = gsp->fb.heap.size; + size += gsp->fb.rsvd_size; + size += gsp->fb.bios.vga_workspace.size; + nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", size); + + ret = nvkm_gsp_sg(device, size, &gsp->sr.fbsr); + if (ret) + return ret; + + /* Initialise FBSR on RM. */ + ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size); + if (ret) { + nvkm_gsp_sg_free(device, &gsp->sr.fbsr); + return ret; + } + + return 0; +} + +const struct nvkm_rm_api_fbsr +r570_fbsr = { + .suspend = r570_fbsr_suspend, + .resume = r570_fbsr_resume, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c new file mode 100644 index 000000000000..79132805cfcf --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include <rm/rm.h> + +#include <subdev/mmu.h> +#include <engine/fifo/priv.h> +#include <engine/fifo/chan.h> +#include <engine/fifo/runl.h> + +#include "nvhw/drf.h" + +#include "nvrm/fifo.h" +#include "nvrm/engine.h" + +#define CHID_PER_USERD 8 + +static int +r570_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq, + bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr, + struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length, + struct nvkm_gsp_object *chan) +{ + struct nvkm_gsp *gsp = device->object.client->gsp; + struct nvkm_fifo *fifo = gsp->subdev.device->fifo; + const int userd_p = chid / CHID_PER_USERD; + const int userd_i = chid % CHID_PER_USERD; + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args; + + args = nvkm_gsp_rm_alloc_get(&device->object, handle, + fifo->func->chan.user.oclass, sizeof(*args), chan); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->gpFifoOffset = gpfifo_offset; + args->gpFifoEntries = gpfifo_length / 8; + + args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL); + args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE); + args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq); + if (!priv) + args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE); + else + args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE); + args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE); + + args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE); + args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE); + + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT); + args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE); + + args->hVASpace = vmm->rm.object.handle; + args->engineType = nv2080_engine_type; + + args->instanceMem.base = inst_addr; + args->instanceMem.size = fifo->func->chan.func->inst->size; + args->instanceMem.addressSpace = 2; + args->instanceMem.cacheAttrib = 1; + + args->userdMem.base = userd_addr; + args->userdMem.size = fifo->func->chan.func->userd->size; + args->userdMem.addressSpace = 2; + args->userdMem.cacheAttrib = 1; + + args->ramfcMem.base = inst_addr; + args->ramfcMem.size = 0x200; + args->ramfcMem.addressSpace = 2; + args->ramfcMem.cacheAttrib = 1; + + args->mthdbufMem.base = mthdbuf_addr; + args->mthdbufMem.size = fifo->rm.mthdbuf_size; + args->mthdbufMem.addressSpace = 1; + args->mthdbufMem.cacheAttrib = 0; + + if (!priv) + args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER); + else + args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN); + args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE); + args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE); + + return nvkm_gsp_rm_alloc_wr(chan, args); +} + +static int +r570_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) +{ + rpc_rc_triggered_v17_02 *msg = repv; + struct nvkm_gsp *gsp = priv; + + if (WARN_ON(repc < sizeof(*msg))) + return -EINVAL; + + nvkm_error(&gsp->subdev, "rc engn:%08x chid:%d gfid:%d level:%d type:%d scope:%d part:%d " + "fault_addr:%08x%08x fault_type:%08x\n", + msg->nv2080EngineType, msg->chid, msg->gfid, msg->exceptLevel, msg->exceptType, + msg->scope, msg->partitionAttributionId, + msg->mmuFaultAddrHi, msg->mmuFaultAddrLo, msg->mmuFaultType); + + r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid); + return 0; +} + +static int +r570_fifo_ectx_size(struct nvkm_fifo *fifo) +{ + NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl; + struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp; + struct nvkm_runl *runl; + struct nvkm_engn *engn; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO, + sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return PTR_ERR(ctrl); + + for (int i = 0; i < ctrl->numConstructedFalcons; i++) { + nvkm_runl_foreach(runl, fifo) { + nvkm_runl_foreach_engn(engn, runl) { + if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) { + engn->rm.size = + ctrl->constructedFalconsTable[i].ctxBufferSize; + break; + } + } + } + } + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return 0; +} + +static int +r570_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080) +{ +#define RM_ENGINE_TYPE(RM,NVKM,INST) \ + RM_ENGINE_TYPE_##RM: \ + *ptype = NVKM_ENGINE_##NVKM; \ + *p2080 = NV2080_ENGINE_TYPE_##RM; \ + return INST + + switch (rm) { + case RM_ENGINE_TYPE( GR0, GR, 0); + case RM_ENGINE_TYPE( COPY0, CE, 0); + case RM_ENGINE_TYPE( COPY1, CE, 1); + case RM_ENGINE_TYPE( COPY2, CE, 2); + case RM_ENGINE_TYPE( COPY3, CE, 3); + case RM_ENGINE_TYPE( COPY4, CE, 4); + case RM_ENGINE_TYPE( COPY5, CE, 5); + case RM_ENGINE_TYPE( COPY6, CE, 6); + case RM_ENGINE_TYPE( COPY7, CE, 7); + case RM_ENGINE_TYPE( COPY8, CE, 8); + case RM_ENGINE_TYPE( COPY9, CE, 9); + case RM_ENGINE_TYPE( COPY10, CE, 10); + case RM_ENGINE_TYPE( COPY11, CE, 11); + case RM_ENGINE_TYPE( COPY12, CE, 12); + case RM_ENGINE_TYPE( COPY13, CE, 13); + case RM_ENGINE_TYPE( COPY14, CE, 14); + case RM_ENGINE_TYPE( COPY15, CE, 15); + case RM_ENGINE_TYPE( COPY16, CE, 16); + case RM_ENGINE_TYPE( COPY17, CE, 17); + case RM_ENGINE_TYPE( COPY18, CE, 18); + case RM_ENGINE_TYPE( COPY19, CE, 19); + case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0); + case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1); + case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2); + case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3); + case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4); + case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5); + case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6); + case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7); + case RM_ENGINE_TYPE( NVENC0, NVENC, 0); + case RM_ENGINE_TYPE( NVENC1, NVENC, 1); + case RM_ENGINE_TYPE( NVENC2, NVENC, 2); + case RM_ENGINE_TYPE( NVENC3, NVENC, 3); + case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0); + case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1); + case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2); + case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3); + case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4); + case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5); + case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6); + case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7); + case RM_ENGINE_TYPE( SW, SW, 0); + case RM_ENGINE_TYPE( SEC2, SEC2, 0); + case RM_ENGINE_TYPE( OFA0, OFA, 0); + case RM_ENGINE_TYPE( OFA1, OFA, 1); + default: + return -EINVAL; + } +#undef RM_ENGINE_TYPE +} + +const struct nvkm_rm_api_fifo +r570_fifo = { + .xlat_rm_engine_type = r570_fifo_xlat_rm_engine_type, + .ectx_size = r570_fifo_ectx_size, + .rsvd_chids = 1, + .rc_triggered = r570_fifo_rc_triggered, + .chan = { + .alloc = r570_chan_alloc, + }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c new file mode 100644 index 000000000000..b6cced9b8aa1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include <rm/gr.h> + +#include <subdev/mmu.h> +#include <engine/fifo.h> +#include <engine/fifo/chid.h> +#include <engine/gr/priv.h> + +#include "nvrm/gr.h" +#include "nvrm/engine.h" + +int +r570_gr_tpc_mask(struct nvkm_gsp *gsp, int gpc, u32 *pmask) +{ + NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->gpcId = gpc; + + ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl)); + if (ret) + return ret; + + *pmask = ctrl->tpcMask; + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return 0; +} + +int +r570_gr_gpc_mask(struct nvkm_gsp *gsp, u32 *pmask) +{ + NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + *pmask = ctrl->gpcMask; + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return 0; +} + +static int +r570_gr_scrubber_ctrl(struct r535_gr *gr, bool teardown) +{ + NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&gr->scrubber.vmm->rm.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->bTeardown = teardown; + + return nvkm_gsp_rm_ctrl_wr(&gr->scrubber.vmm->rm.device.subdevice, ctrl); +} + +static void +r570_gr_scrubber_fini(struct r535_gr *gr) +{ + /* Teardown scrubber channel on RM. */ + if (gr->scrubber.enabled) { + WARN_ON(r570_gr_scrubber_ctrl(gr, true)); + gr->scrubber.enabled = false; + } + + /* Free scrubber channel. */ + nvkm_gsp_rm_free(&gr->scrubber.threed); + nvkm_gsp_rm_free(&gr->scrubber.chan); + + for (int i = 0; i < gr->ctxbuf_nr; i++) { + nvkm_vmm_put(gr->scrubber.vmm, &gr->scrubber.ctxbuf.vma[i]); + nvkm_memory_unref(&gr->scrubber.ctxbuf.mem[i]); + } + + nvkm_vmm_unref(&gr->scrubber.vmm); + nvkm_memory_unref(&gr->scrubber.inst); +} + +static int +r570_gr_scrubber_init(struct r535_gr *gr) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_gsp *gsp = device->gsp; + struct nvkm_rm *rm = gsp->rm; + int ret; + + /* Scrubber channel only required on TU10x. */ + switch (device->chipset) { + case 0x162: + case 0x164: + case 0x166: + break; + default: + return 0; + } + + if (gr->scrubber.chid < 0) { + gr->scrubber.chid = nvkm_chid_get(device->fifo->chid, NULL); + if (gr->scrubber.chid < 0) + return gr->scrubber.chid; + } + + /* Allocate scrubber channel. */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, + 0x2000 + device->fifo->rm.mthdbuf_size, 0, true, + &gr->scrubber.inst); + if (ret) + goto done; + + ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grScrubberVmm", + &gr->scrubber.vmm); + if (ret) + goto done; + + ret = r535_mmu_vaspace_new(gr->scrubber.vmm, KGRAPHICS_SCRUBBER_HANDLE_VAS, false); + if (ret) + goto done; + + ret = rm->api->fifo->chan.alloc(&gr->scrubber.vmm->rm.device, KGRAPHICS_SCRUBBER_HANDLE_CHANNEL, + NV2080_ENGINE_TYPE_GR0, 0, false, gr->scrubber.chid, + nvkm_memory_addr(gr->scrubber.inst), + nvkm_memory_addr(gr->scrubber.inst) + 0x1000, + nvkm_memory_addr(gr->scrubber.inst) + 0x2000, + gr->scrubber.vmm, 0, 0x1000, &gr->scrubber.chan); + if (ret) + goto done; + + ret = r535_gr_promote_ctx(gr, false, gr->scrubber.vmm, gr->scrubber.ctxbuf.mem, + gr->scrubber.ctxbuf.vma, &gr->scrubber.chan); + if (ret) + goto done; + + ret = nvkm_gsp_rm_alloc(&gr->scrubber.chan, KGRAPHICS_SCRUBBER_HANDLE_3DOBJ, + rm->gpu->gr.class.threed, 0, &gr->scrubber.threed); + if (ret) + goto done; + + /* Initialise scrubber channel on RM. */ + ret = r570_gr_scrubber_ctrl(gr, false); + if (ret) + goto done; + + gr->scrubber.enabled = true; + +done: + if (ret) + r570_gr_scrubber_fini(gr); + + return ret; +} + +static int +r570_gr_get_ctxbufs_info(struct r535_gr *gr) +{ + NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info; + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_gsp *gsp = subdev->device->gsp; + + info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO, + sizeof(*info)); + if (WARN_ON(IS_ERR(info))) + return PTR_ERR(info); + + for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) + r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]); + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info); + return 0; +} + +const struct nvkm_rm_api_gr +r570_gr = { + .get_ctxbufs_info = r570_gr_get_ctxbufs_info, + .scrubber.init = r570_gr_scrubber_init, + .scrubber.fini = r570_gr_scrubber_fini, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c new file mode 100644 index 000000000000..9d2fa4e66d59 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include <rm/rm.h> +#include <rm/rpc.h> + +#include <asm-generic/video.h> + +#include "nvrm/gsp.h" +#include "nvrm/rpcfn.h" +#include "nvrm/msgfn.h" + +#include <core/pci.h> +#include <subdev/pci/priv.h> + +static u32 +r570_gsp_sr_data_size(struct nvkm_gsp *gsp) +{ + GspFwWprMeta *meta = gsp->wpr_meta.data; + + return (meta->frtsOffset + meta->frtsSize) - + (meta->nonWprHeapOffset + meta->nonWprHeapSize); +} + +static void +r570_gsp_drop_post_nocat_record(struct nvkm_gsp *gsp) +{ + if (gsp->subdev.debug < NV_DBG_DEBUG) { + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, NULL, NULL); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, NULL, NULL); + } +} + +static bool +r570_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst) +{ + switch (mc_engine_idx) { + case MC_ENGINE_IDX_GSP: + *ptype = NVKM_SUBDEV_GSP; + *pinst = 0; + return true; + case MC_ENGINE_IDX_DISP: + *ptype = NVKM_ENGINE_DISP; + *pinst = 0; + return true; + case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE19: + *ptype = NVKM_ENGINE_CE; + *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0; + return true; + case MC_ENGINE_IDX_GR0: + *ptype = NVKM_ENGINE_GR; + *pinst = 0; + return true; + case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: + *ptype = NVKM_ENGINE_NVDEC; + *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0; + return true; + case MC_ENGINE_IDX_NVENC ... MC_ENGINE_IDX_NVENC3: + *ptype = NVKM_ENGINE_NVENC; + *pinst = mc_engine_idx - MC_ENGINE_IDX_NVENC; + return true; + case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: + *ptype = NVKM_ENGINE_NVJPG; + *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0; + return true; + case MC_ENGINE_IDX_OFA0 ... MC_ENGINE_IDX_OFA1: + *ptype = NVKM_ENGINE_OFA; + *pinst = mc_engine_idx - MC_ENGINE_IDX_OFA0; + return true; + default: + return false; + } +} + +static int +r570_gsp_get_static_info(struct nvkm_gsp *gsp) +{ + GspStaticConfigInfo *rpc; + u32 gpc_mask; + u32 tpc_mask; + int ret; + + rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + gsp->internal.client.object.client = &gsp->internal.client; + gsp->internal.client.object.parent = NULL; + gsp->internal.client.object.handle = rpc->hInternalClient; + gsp->internal.client.gsp = gsp; + INIT_LIST_HEAD(&gsp->internal.client.events); + + gsp->internal.device.object.client = &gsp->internal.client; + gsp->internal.device.object.parent = &gsp->internal.client.object; + gsp->internal.device.object.handle = rpc->hInternalDevice; + + gsp->internal.device.subdevice.client = &gsp->internal.client; + gsp->internal.device.subdevice.parent = &gsp->internal.device.object; + gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; + + gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; + gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; + + r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams); + + if (gsp->rm->wpr->offset_set_by_acr) { + GspFwWprMeta *meta = gsp->wpr_meta.data; + + meta->nonWprHeapOffset = rpc->fwWprLayoutOffset.nonWprHeapOffset; + meta->frtsOffset = rpc->fwWprLayoutOffset.frtsOffset; + } + + nvkm_gsp_rpc_done(gsp, rpc); + + ret = r570_gr_gpc_mask(gsp, &gpc_mask); + if (ret) + return ret; + + for (int gpc = 0; gpc < 32; gpc++) { + if (gpc_mask & BIT(gpc)) { + ret = r570_gr_tpc_mask(gsp, gpc, &tpc_mask); + if (ret) + return ret; + + gsp->gr.tpcs += hweight32(tpc_mask); + gsp->gr.gpcs++; + } + } + + return 0; +} + +static void +r570_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi) +{ +#if defined(CONFIG_ACPI) && defined(CONFIG_X86) + acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev); + + if (!handle) + return; + + acpi->bValid = 1; + + r535_gsp_acpi_dod(handle, &acpi->dodMethodData); + r535_gsp_acpi_jt(handle, &acpi->jtMethodData); + r535_gsp_acpi_caps(handle, &acpi->capsMethodData); +#endif +} + +static int +r570_gsp_set_system_info(struct nvkm_gsp *gsp) +{ + struct nvkm_device *device = gsp->subdev.device; + struct pci_dev *pdev = container_of(device, struct nvkm_device_pci, device)->pdev; + GspSystemInfo *info; + + if (WARN_ON(device->type == NVKM_DEVICE_TEGRA)) + return -ENOSYS; + + info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info)); + if (IS_ERR(info)) + return PTR_ERR(info); + + info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI); + info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB); + info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST); + info->nvDomainBusDeviceFunc = pci_dev_id(pdev); + info->maxUserVa = TASK_SIZE; + info->pciConfigMirrorBase = device->pci->func->cfg.addr; + info->pciConfigMirrorSize = device->pci->func->cfg.size; + info->PCIDeviceID = (pdev->device << 16) | pdev->vendor; + info->PCISubDeviceID = (pdev->subsystem_device << 16) | pdev->subsystem_vendor; + info->PCIRevisionID = pdev->revision; + r570_gsp_acpi_info(gsp, &info->acpiMethodData); + info->bIsPrimary = video_is_primary_device(device->dev); + info->bPreserveVideoMemoryAllocations = false; + + return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); +} + +static void +r570_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume) +{ + GSP_ARGUMENTS_CACHED *args; + + args = gsp->rmargs.data; + args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr; + args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; + args->messageQueueInitArguments.cmdQueueOffset = + (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data; + args->messageQueueInitArguments.statQueueOffset = + (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data; + + if (!resume) { + args->srInitArguments.oldLevel = 0; + args->srInitArguments.flags = 0; + args->srInitArguments.bInPMTransition = 0; + } else { + args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; + args->srInitArguments.flags = 0; + args->srInitArguments.bInPMTransition = 1; + } + + args->bDmemStack = 1; +} + +const struct nvkm_rm_api_gsp +r570_gsp = { + .set_rmargs = r570_gsp_set_rmargs, + .set_system_info = r570_gsp_set_system_info, + .get_static_info = r570_gsp_get_static_info, + .xlat_mc_engine_idx = r570_gsp_xlat_mc_engine_idx, + .drop_post_nocat_record = r570_gsp_drop_post_nocat_record, + .sr_data_size = r570_gsp_sr_data_size, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h new file mode 100644 index 000000000000..e8714e0abc37 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_CLIENT_H__ +#define __NVRM_CLIENT_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */ + +#define NV_PROC_NAME_MAX_LENGTH 100U + +typedef struct NV0000_ALLOC_PARAMETERS { + NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */ + NvU32 processID; + char processName[NV_PROC_NAME_MAX_LENGTH]; + NV_DECLARE_ALIGNED(NvP64 pOsPidInfo, 8); +} NV0000_ALLOC_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h new file mode 100644 index 000000000000..06e972835d77 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h @@ -0,0 +1,355 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_DISP_H__ +#define __NVRM_DISP_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS { + NvU32 feHwSysCap; + NvU32 windowPresentMask; + NvBool bFbRemapperEnabled; + NvU32 numHeads; + NvU32 i2cPort; + NvU32 internalDispActiveMask; + NvU32 embeddedDisplayPortMask; + NvBool bExternalMuxSupported; + NvBool bInternalMuxSupported; + NvU32 numDispChannels; +} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS; + +#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730107U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayMask; + NvU32 displayMaskDDC; +} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS; + +#define NV0073_CTRL_MAX_CONNECTORS 4U + +#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 DDCPartners; + NvU32 count; + struct { + NvU32 index; + NvU32 type; + NvU32 location; + } data[NV0073_CTRL_MAX_CONNECTORS]; + NvU32 platform; +} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS { + NvBool bDscSupported; + NvU32 encoderColorFormatMask; + NvU32 lineBufferSizeKB; + NvU32 rateBufferSizeKB; + NvU32 bitsPerPixelPrecision; + NvU32 maxNumHztSlices; + NvU32 lineBufferBitDepth; +} NV0073_CTRL_CMD_DSC_CAP_PARAMS; + +typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 sorIndex; + NvU32 maxLinkRate; + NvU32 dpVersionsSupported; + NvU32 UHBRSupportedByGpu; + NvU32 minPClkForCompressed; + NvBool bIsMultistreamSupported; + NvBool bIsSCEnabled; + NvBool bHasIncreasedWatermarkLimits; + NvBool bIsPC2Disabled; + NvBool isSingleHeadMSTSupported; + NvBool bFECSupported; + NvBool bIsTrainPhyRepeater; + NvBool bOverrideLinkBw; + NvBool bUseRgFlushSequence; + NvBool bSupportDPDownSpread; + NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC; +} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */ +#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1 +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0 2:2 +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0 0:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5 1:1 +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0 2:2 +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U) + +#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730108U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 displayMask; + NvU32 retryTimeMs; +} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS; + +#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 UHBRSupportedByDfp; +} NV0073_CTRL_DFP_GET_INFO_PARAMS; + +#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0 +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U) +#define NV0073_CTRL_DFP_FLAGS_LANE 5:3 +#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6 +#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7 +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8 +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9 +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10 +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR 13:13 +#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14 +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15 +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LINK 21:20 +#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22 +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23 +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25 +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS 0:0 +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS 1:1 +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS 2:2 +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_TRUE (0x00000001U) + +#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x73010cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 flags; + NvU32 displayId; +} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 brightness; + NvBool bUncalibrated; + NvU8 brightnessType; +} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS; + +#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS { + // In + NvU32 subDeviceInstance; + NvU32 displayId; + NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + + // Out + NvU16 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + NvU8 linkBwCount; +} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS; + +#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DP_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 cmd; + NvU32 data; + NvU32 err; + NvU32 retryTimeMs; + NvU32 eightLaneDpcdBaseAddr; +} NV0073_CTRL_DP_CTRL_PARAMS; + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 sorIndex; + NvU32 dpLink; + + NvBool bEnableOverride; + NvBool bMST; + NvU32 singleHeadMultistreamMode; + NvU32 hBlankSym; + NvU32 vBlankSym; + NvU32 colorFormat; + NvBool bEnableTwoHeadOneOr; + + struct { + NvU32 slotStart; + NvU32 slotEnd; + NvU32 PBN; + NvU32 Timeslice; + NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT + NvU32 singleHeadMSTPipeline; + NvBool bEnableAudioOverRightPanel; + } MST; + + struct { + NvBool bEnhancedFraming; + NvU32 tuSize; + NvU32 waterMark; + NvBool bEnableAudioOverRightPanel; + } SST; +} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 mute; +} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS { + NvU32 addressSpace; + NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NvU32 cacheSnoop; + NvU32 hclass; + NvU32 channelInstance; + NvBool valid; + NvU32 pbTargetAperture; + NvU32 channelPBSize; + NvU32 subDeviceId; +} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS; + +#define ADDR_SYSMEM (1) + +#define ADDR_FBMEM 2 // Frame buffer memory space + +typedef enum +{ + PB_SIZE_4KB = 0, + PB_SIZE_8KB, + PB_SIZE_16KB, + PB_SIZE_32KB, + PB_SIZE_64KB +} ChannelPBSize; + +typedef struct +{ + NvV32 channelInstance; // One of the n channel instances of a given channel type. + // Note that core channel has only one instance + // while all others have two (one per head). + NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer + NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications + NvU32 offset; // Initial offset for put/get, usually zero. + NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs + + NvU32 flags; + ChannelPBSize channelPBSize; // Size of Push Buffer requested by client (allowed values in enum) +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001 + + NvU32 subDeviceId; // One-hot encoded subDeviceId (i.e. SDM) that will be used to address the channel in the pushbuffer stream (via SSDM method) +} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS; + +#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100 1 +#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT1000 2 +#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_NITS 3 + +typedef enum +{ + IOVA, + PHYS_NVM, + PHYS_PCI, + PHYS_PCI_COHERENT +} PBTARGETAPERTURE; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h new file mode 100644 index 000000000000..7997050a4f29 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_ENGINE_H__ +#define __NVRM_ENGINE_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define MC_ENGINE_IDX_NULL 0 // This must be 0 +#define MC_ENGINE_IDX_TMR 1 +#define MC_ENGINE_IDX_DISP 2 +#define MC_ENGINE_IDX_FB 3 +#define MC_ENGINE_IDX_FIFO 4 +#define MC_ENGINE_IDX_VIDEO 5 +#define MC_ENGINE_IDX_MD 6 +#define MC_ENGINE_IDX_BUS 7 +#define MC_ENGINE_IDX_PMGR 8 +#define MC_ENGINE_IDX_VP2 9 +#define MC_ENGINE_IDX_CIPHER 10 +#define MC_ENGINE_IDX_BIF 11 +#define MC_ENGINE_IDX_PPP 12 +#define MC_ENGINE_IDX_PRIVRING 13 +#define MC_ENGINE_IDX_PMU 14 +#define MC_ENGINE_IDX_CE0 15 +#define MC_ENGINE_IDX_CE1 16 +#define MC_ENGINE_IDX_CE2 17 +#define MC_ENGINE_IDX_CE3 18 +#define MC_ENGINE_IDX_CE4 19 +#define MC_ENGINE_IDX_CE5 20 +#define MC_ENGINE_IDX_CE6 21 +#define MC_ENGINE_IDX_CE7 22 +#define MC_ENGINE_IDX_CE8 23 +#define MC_ENGINE_IDX_CE9 24 +#define MC_ENGINE_IDX_CE10 25 +#define MC_ENGINE_IDX_CE11 26 +#define MC_ENGINE_IDX_CE12 27 +#define MC_ENGINE_IDX_CE13 28 +#define MC_ENGINE_IDX_CE14 29 +#define MC_ENGINE_IDX_CE15 30 +#define MC_ENGINE_IDX_CE16 31 +#define MC_ENGINE_IDX_CE17 32 +#define MC_ENGINE_IDX_CE18 33 +#define MC_ENGINE_IDX_CE19 34 +#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE19 +#define MC_ENGINE_IDX_VIC 35 +#define MC_ENGINE_IDX_ISOHUB 36 +#define MC_ENGINE_IDX_VGPU 37 +#define MC_ENGINE_IDX_NVENC 38 +#define MC_ENGINE_IDX_NVENC1 39 +#define MC_ENGINE_IDX_NVENC2 40 +#define MC_ENGINE_IDX_NVENC3 41 +#define MC_ENGINE_IDX_C2C 42 +#define MC_ENGINE_IDX_LTC 43 +#define MC_ENGINE_IDX_FBHUB 44 +#define MC_ENGINE_IDX_HDACODEC 45 +#define MC_ENGINE_IDX_GMMU 46 +#define MC_ENGINE_IDX_SEC2 47 +#define MC_ENGINE_IDX_FSP 48 +#define MC_ENGINE_IDX_NVLINK 49 +#define MC_ENGINE_IDX_GSP 50 +#define MC_ENGINE_IDX_NVJPG 51 +#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG +#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG +#define MC_ENGINE_IDX_NVJPEG1 52 +#define MC_ENGINE_IDX_NVJPEG2 53 +#define MC_ENGINE_IDX_NVJPEG3 54 +#define MC_ENGINE_IDX_NVJPEG4 55 +#define MC_ENGINE_IDX_NVJPEG5 56 +#define MC_ENGINE_IDX_NVJPEG6 57 +#define MC_ENGINE_IDX_NVJPEG7 58 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT 59 +#define MC_ENGINE_IDX_ACCESS_CNTR 60 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 61 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 62 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 63 +#define MC_ENGINE_IDX_INFO_FAULT 64 +#define MC_ENGINE_IDX_BSP 65 +#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP +#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC +#define MC_ENGINE_IDX_NVDEC1 66 +#define MC_ENGINE_IDX_NVDEC2 67 +#define MC_ENGINE_IDX_NVDEC3 68 +#define MC_ENGINE_IDX_NVDEC4 69 +#define MC_ENGINE_IDX_NVDEC5 70 +#define MC_ENGINE_IDX_NVDEC6 71 +#define MC_ENGINE_IDX_NVDEC7 72 +#define MC_ENGINE_IDX_CPU_DOORBELL 73 +#define MC_ENGINE_IDX_PRIV_DOORBELL 74 +#define MC_ENGINE_IDX_MMU_ECC_ERROR 75 +#define MC_ENGINE_IDX_BLG 76 +#define MC_ENGINE_IDX_PERFMON 77 +#define MC_ENGINE_IDX_BUF_RESET 78 +#define MC_ENGINE_IDX_XBAR 79 +#define MC_ENGINE_IDX_ZPW 80 +#define MC_ENGINE_IDX_OFA0 81 +#define MC_ENGINE_IDX_OFA1 82 +#define MC_ENGINE_IDX_TEGRA 83 +#define MC_ENGINE_IDX_GR 84 +#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR +#define MC_ENGINE_IDX_GR1 85 +#define MC_ENGINE_IDX_GR2 86 +#define MC_ENGINE_IDX_GR3 87 +#define MC_ENGINE_IDX_GR4 88 +#define MC_ENGINE_IDX_GR5 89 +#define MC_ENGINE_IDX_GR6 90 +#define MC_ENGINE_IDX_GR7 91 +#define MC_ENGINE_IDX_ESCHED 92 +#define MC_ENGINE_IDX_ESCHED__SIZE 64 +#define MC_ENGINE_IDX_GR_FECS_LOG 156 +#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG +#define MC_ENGINE_IDX_GR1_FECS_LOG 157 +#define MC_ENGINE_IDX_GR2_FECS_LOG 158 +#define MC_ENGINE_IDX_GR3_FECS_LOG 159 +#define MC_ENGINE_IDX_GR4_FECS_LOG 160 +#define MC_ENGINE_IDX_GR5_FECS_LOG 161 +#define MC_ENGINE_IDX_GR6_FECS_LOG 162 +#define MC_ENGINE_IDX_GR7_FECS_LOG 163 +#define MC_ENGINE_IDX_TMR_SWRL 164 +#define MC_ENGINE_IDX_DISP_GSP 165 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 166 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 167 +#define MC_ENGINE_IDX_PXUC 168 +#define MC_ENGINE_IDX_SYSLTC 169 +#define MC_ENGINE_IDX_LRCC 170 +#define MC_ENGINE_IDX_GSPLITE 171 +#define MC_ENGINE_IDX_GSPLITE0 MC_ENGINE_IDX_GSPLITE +#define MC_ENGINE_IDX_GSPLITE1 172 +#define MC_ENGINE_IDX_GSPLITE2 173 +#define MC_ENGINE_IDX_GSPLITE3 174 +#define MC_ENGINE_IDX_GSPLITE_MAX MC_ENGINE_IDX_GSPLITE3 +#define MC_ENGINE_IDX_DPAUX 175 +#define MC_ENGINE_IDX_DISP_LOW 176 +#define MC_ENGINE_IDX_MAX 177 + +typedef enum +{ + RM_ENGINE_TYPE_NULL = (0x00000000), + RM_ENGINE_TYPE_GR0 = (0x00000001), + RM_ENGINE_TYPE_GR1 = (0x00000002), + RM_ENGINE_TYPE_GR2 = (0x00000003), + RM_ENGINE_TYPE_GR3 = (0x00000004), + RM_ENGINE_TYPE_GR4 = (0x00000005), + RM_ENGINE_TYPE_GR5 = (0x00000006), + RM_ENGINE_TYPE_GR6 = (0x00000007), + RM_ENGINE_TYPE_GR7 = (0x00000008), + RM_ENGINE_TYPE_COPY0 = (0x00000009), + RM_ENGINE_TYPE_COPY1 = (0x0000000a), + RM_ENGINE_TYPE_COPY2 = (0x0000000b), + RM_ENGINE_TYPE_COPY3 = (0x0000000c), + RM_ENGINE_TYPE_COPY4 = (0x0000000d), + RM_ENGINE_TYPE_COPY5 = (0x0000000e), + RM_ENGINE_TYPE_COPY6 = (0x0000000f), + RM_ENGINE_TYPE_COPY7 = (0x00000010), + RM_ENGINE_TYPE_COPY8 = (0x00000011), + RM_ENGINE_TYPE_COPY9 = (0x00000012), + RM_ENGINE_TYPE_COPY10 = (0x00000013), + RM_ENGINE_TYPE_COPY11 = (0x00000014), + RM_ENGINE_TYPE_COPY12 = (0x00000015), + RM_ENGINE_TYPE_COPY13 = (0x00000016), + RM_ENGINE_TYPE_COPY14 = (0x00000017), + RM_ENGINE_TYPE_COPY15 = (0x00000018), + RM_ENGINE_TYPE_COPY16 = (0x00000019), + RM_ENGINE_TYPE_COPY17 = (0x0000001a), + RM_ENGINE_TYPE_COPY18 = (0x0000001b), + RM_ENGINE_TYPE_COPY19 = (0x0000001c), + RM_ENGINE_TYPE_NVDEC0 = (0x0000001d), + RM_ENGINE_TYPE_NVDEC1 = (0x0000001e), + RM_ENGINE_TYPE_NVDEC2 = (0x0000001f), + RM_ENGINE_TYPE_NVDEC3 = (0x00000020), + RM_ENGINE_TYPE_NVDEC4 = (0x00000021), + RM_ENGINE_TYPE_NVDEC5 = (0x00000022), + RM_ENGINE_TYPE_NVDEC6 = (0x00000023), + RM_ENGINE_TYPE_NVDEC7 = (0x00000024), + RM_ENGINE_TYPE_NVENC0 = (0x00000025), + RM_ENGINE_TYPE_NVENC1 = (0x00000026), + RM_ENGINE_TYPE_NVENC2 = (0x00000027), + // Bug 4175886 - Use this new value for all chips once GB20X is released + RM_ENGINE_TYPE_NVENC3 = (0x00000028), + RM_ENGINE_TYPE_VP = (0x00000029), + RM_ENGINE_TYPE_ME = (0x0000002a), + RM_ENGINE_TYPE_PPP = (0x0000002b), + RM_ENGINE_TYPE_MPEG = (0x0000002c), + RM_ENGINE_TYPE_SW = (0x0000002d), + RM_ENGINE_TYPE_TSEC = (0x0000002e), + RM_ENGINE_TYPE_VIC = (0x0000002f), + RM_ENGINE_TYPE_MP = (0x00000030), + RM_ENGINE_TYPE_SEC2 = (0x00000031), + RM_ENGINE_TYPE_HOST = (0x00000032), + RM_ENGINE_TYPE_DPU = (0x00000033), + RM_ENGINE_TYPE_PMU = (0x00000034), + RM_ENGINE_TYPE_FBFLCN = (0x00000035), + RM_ENGINE_TYPE_NVJPEG0 = (0x00000036), + RM_ENGINE_TYPE_NVJPEG1 = (0x00000037), + RM_ENGINE_TYPE_NVJPEG2 = (0x00000038), + RM_ENGINE_TYPE_NVJPEG3 = (0x00000039), + RM_ENGINE_TYPE_NVJPEG4 = (0x0000003a), + RM_ENGINE_TYPE_NVJPEG5 = (0x0000003b), + RM_ENGINE_TYPE_NVJPEG6 = (0x0000003c), + RM_ENGINE_TYPE_NVJPEG7 = (0x0000003d), + RM_ENGINE_TYPE_OFA0 = (0x0000003e), + RM_ENGINE_TYPE_OFA1 = (0x0000003f), + RM_ENGINE_TYPE_RESERVED40 = (0x00000040), + RM_ENGINE_TYPE_RESERVED41 = (0x00000041), + RM_ENGINE_TYPE_RESERVED42 = (0x00000042), + RM_ENGINE_TYPE_RESERVED43 = (0x00000043), + RM_ENGINE_TYPE_RESERVED44 = (0x00000044), + RM_ENGINE_TYPE_RESERVED45 = (0x00000045), + RM_ENGINE_TYPE_RESERVED46 = (0x00000046), + RM_ENGINE_TYPE_RESERVED47 = (0x00000047), + RM_ENGINE_TYPE_RESERVED48 = (0x00000048), + RM_ENGINE_TYPE_RESERVED49 = (0x00000049), + RM_ENGINE_TYPE_RESERVED4a = (0x0000004a), + RM_ENGINE_TYPE_RESERVED4b = (0x0000004b), + RM_ENGINE_TYPE_RESERVED4c = (0x0000004c), + RM_ENGINE_TYPE_RESERVED4d = (0x0000004d), + RM_ENGINE_TYPE_RESERVED4e = (0x0000004e), + RM_ENGINE_TYPE_RESERVED4f = (0x0000004f), + RM_ENGINE_TYPE_RESERVED50 = (0x00000050), + RM_ENGINE_TYPE_RESERVED51 = (0x00000051), + RM_ENGINE_TYPE_RESERVED52 = (0x00000052), + RM_ENGINE_TYPE_RESERVED53 = (0x00000053), + RM_ENGINE_TYPE_LAST = (0x00000054), +} RM_ENGINE_TYPE; + +#define NV2080_ENGINE_TYPE_NULL (0x00000000) +#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001) +#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS +#define NV2080_ENGINE_TYPE_GR1 (0x00000002) +#define NV2080_ENGINE_TYPE_GR2 (0x00000003) +#define NV2080_ENGINE_TYPE_GR3 (0x00000004) +#define NV2080_ENGINE_TYPE_GR4 (0x00000005) +#define NV2080_ENGINE_TYPE_GR5 (0x00000006) +#define NV2080_ENGINE_TYPE_GR6 (0x00000007) +#define NV2080_ENGINE_TYPE_GR7 (0x00000008) +#define NV2080_ENGINE_TYPE_COPY0 (0x00000009) +#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a) +#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b) +#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c) +#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d) +#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e) +#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f) +#define NV2080_ENGINE_TYPE_COPY7 (0x00000010) +#define NV2080_ENGINE_TYPE_COPY8 (0x00000011) +#define NV2080_ENGINE_TYPE_COPY9 (0x00000012) +#define NV2080_ENGINE_TYPE_BSP (0x00000013) +#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP +#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014) +#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015) +#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016) +#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017) +#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018) +#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019) +#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a) +#define NV2080_ENGINE_TYPE_MSENC (0x0000001b) +#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */ +#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c) +#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d) +#define NV2080_ENGINE_TYPE_VP (0x0000001e) +#define NV2080_ENGINE_TYPE_ME (0x0000001f) +#define NV2080_ENGINE_TYPE_PPP (0x00000020) +#define NV2080_ENGINE_TYPE_MPEG (0x00000021) +#define NV2080_ENGINE_TYPE_SW (0x00000022) +#define NV2080_ENGINE_TYPE_CIPHER (0x00000023) +#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER +#define NV2080_ENGINE_TYPE_VIC (0x00000024) +#define NV2080_ENGINE_TYPE_MP (0x00000025) +#define NV2080_ENGINE_TYPE_SEC2 (0x00000026) +#define NV2080_ENGINE_TYPE_HOST (0x00000027) +#define NV2080_ENGINE_TYPE_DPU (0x00000028) +#define NV2080_ENGINE_TYPE_PMU (0x00000029) +#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a) +#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b) +#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG +#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c) +#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d) +#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e) +#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f) +#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030) +#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031) +#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032) +#define NV2080_ENGINE_TYPE_OFA (0x00000033) +#define NV2080_ENGINE_TYPE_OFA0 NV2080_ENGINE_TYPE_OFA +#define NV2080_ENGINE_TYPE_COPY10 (0x00000034) +#define NV2080_ENGINE_TYPE_COPY11 (0x00000035) +#define NV2080_ENGINE_TYPE_COPY12 (0x00000036) +#define NV2080_ENGINE_TYPE_COPY13 (0x00000037) +#define NV2080_ENGINE_TYPE_COPY14 (0x00000038) +#define NV2080_ENGINE_TYPE_COPY15 (0x00000039) +#define NV2080_ENGINE_TYPE_COPY16 (0x0000003a) +#define NV2080_ENGINE_TYPE_COPY17 (0x0000003b) +#define NV2080_ENGINE_TYPE_COPY18 (0x0000003c) +#define NV2080_ENGINE_TYPE_COPY19 (0x0000003d) +#define NV2080_ENGINE_TYPE_OFA1 (0x0000003e) +#define NV2080_ENGINE_TYPE_NVENC3 (0x0000003f) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY0 (0x00000040) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY1 (0x00000041) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY2 (0x00000042) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY3 (0x00000043) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY4 (0x00000044) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY5 (0x00000045) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY6 (0x00000046) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY7 (0x00000047) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY8 (0x00000048) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY9 (0x00000049) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY10 (0x0000004a) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY11 (0x0000004b) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY12 (0x0000004c) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY13 (0x0000004d) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY14 (0x0000004e) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY15 (0x0000004f) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY16 (0x00000050) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY17 (0x00000051) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY18 (0x00000052) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY19 (0x00000053) +#define NV2080_ENGINE_TYPE_LAST (0x00000054) +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h new file mode 100644 index 000000000000..8af432375f7a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_FBSR_H__ +#define __NVRM_FBSR_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS { + NvHandle hClient; + NvHandle hSysMem; + NvBool bEnteringGcoffState; + NV_DECLARE_ALIGNED(NvU64 sysmemAddrOfSuspendResumeData, 8); +} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS; + +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h new file mode 100644 index 000000000000..2b002ca64e0f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_FIFO_H__ +#define __NVRM_FIFO_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define NV_MAX_SUBDEVICES 8 + +typedef struct NV_MEMORY_DESC_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 addressSpace; + NvU32 cacheAttrib; +} NV_MEMORY_DESC_PARAMS; + +#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U + +#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U + +typedef struct NV_CHANNEL_ALLOC_PARAMS { + + NvHandle hObjectError; // error context DMA + NvHandle hObjectBuffer; // no longer used + NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO + NvU32 gpFifoEntries; // number of GP FIFO entries + + NvU32 flags; + + + NvHandle hContextShare; // context share handle + NvHandle hVASpace; // VASpace for the channel + + // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0 + NvHandle hUserdMemory[NV_MAX_SUBDEVICES]; + + // offset to beginning of UserD within hUserdMemory[x] + NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8); + + // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated + NvU32 engineType; + // Channel identifier that is unique for the duration of a RM session + NvU32 cid; + // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods + NvU32 subDeviceId; + NvHandle hObjectEccError; // ECC error context DMA + + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8); + + NvHandle hPhysChannelGroup; // reserved + NvU32 internalFlags; // reserved + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved + NvU32 ProcessID; // reserved + NvU32 SubProcessID; // reserved + + // IV used for CPU-side encryption / GPU-side decryption. + NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved + // IV used for CPU-side decryption / GPU-side encryption. + NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved + // Nonce used CPU-side signing / GPU-side signature verification. + NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved + NvU32 tpcConfigID; // TPC Configuration Id as supported by DTD-PG Feature +} NV_CHANNEL_ALLOC_PARAMS; + +typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS; + +#define NVOS04_FLAGS_CHANNEL_TYPE 1:0 +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000 +#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE +#define NVOS04_FLAGS_VPR 2:2 +#define NVOS04_FLAGS_VPR_FALSE 0x00000000 +#define NVOS04_FLAGS_VPR_TRUE 0x00000001 +#define NVOS04_FLAGS_CC_SECURE 2:2 +#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000 +#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002 +#define NVOS04_FLAGS_MAP_CHANNEL 30:30 +#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001 + +typedef enum { + /*! + * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by + * kernel CPU-RM clients. + */ + ERROR_NOTIFIER_TYPE_UNKNOWN = 0, + /*! @brief Error notifier is explicitly not set. + * + * The corresponding hErrorContext or hEccErrorContext must be + * NV01_NULL_OBJECT. + */ + ERROR_NOTIFIER_TYPE_NONE, + /*! @brief Error notifier is a ContextDma */ + ERROR_NOTIFIER_TYPE_CTXDMA, + /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */ + ERROR_NOTIFIER_TYPE_MEMORY +} ErrorNotifierType; + +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED 6:6 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_NO 0x0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_YES 0x1 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED 7:7 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_NO 0x0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_YES 0x1 + +typedef struct rpc_rc_triggered_v17_02 +{ + NvU32 nv2080EngineType; + NvU32 chid; + NvU32 gfid; + NvU32 exceptLevel; + NvU32 exceptType; + NvU32 scope; + NvU16 partitionAttributionId; + NvU32 mmuFaultAddrLo; + NvU32 mmuFaultAddrHi; + NvU32 mmuFaultType; + NvBool bCallbackNeeded; + NvU32 rcJournalBufferSize; + NvU8 rcJournalBuffer[]; +} rpc_rc_triggered_v17_02; + +#define NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS 0x40 + +typedef struct NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO { + NvU32 engDesc; + NvU32 ctxAttr; + NvU32 ctxBufferSize; + NvU32 addrSpaceList; + NvU32 registerBase; +} NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO; + +#define NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO (0x208001b0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS { + NvU32 numConstructedFalcons; + NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS]; +} NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS; + +typedef struct NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS { + NvBool bDisableActiveChannels; +} NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS_MESSAGE_ID" */ +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h new file mode 100644 index 000000000000..feed1dabd9d2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_GR_H__ +#define __NVRM_GR_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8 + +#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x1a + +typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO { + NvU32 size; + NvU32 alignment; +} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO { + NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT]; +} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS; + +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0 +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SETUP (0x00000019) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x0000001a) + +#define NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO (0x20800137U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS { + NvU32 gpcMask; +} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO (0x20800138U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS { + NvU32 gpcId; + NvU32 tpcMask; +} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS; + +#define KGRAPHICS_SCRUBBER_HANDLE_VAS 0xdada0042 +#define KGRAPHICS_SCRUBBER_HANDLE_CHANNEL (KGRAPHICS_SCRUBBER_HANDLE_VAS + 3) +#define KGRAPHICS_SCRUBBER_HANDLE_3DOBJ (KGRAPHICS_SCRUBBER_HANDLE_VAS + 4) + +typedef struct NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS { + NvBool bTeardown; +} NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR (0x20800a46) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS_MESSAGE_ID" */ +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h new file mode 100644 index 000000000000..b6075021e74f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h @@ -0,0 +1,634 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_GSP_H__ +#define __NVRM_GSP_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U + +typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES]; + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NV_DECLARE_ALIGNED(NvU64 reserved, 8); + NvU32 performance; + NvBool supportCompressed; + NvBool supportISO; + NvBool bProtected; + NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList; +} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS { + NvU32 numFBRegions; + NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8); +} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS; + +#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23 + +#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL) + +typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS { + NvU32 index; + NvU32 flags; + NvU32 length; + NvU8 data[NV2080_GPU_MAX_GID_LENGTH]; +} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS; + +typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS { + NvU32 BoardID; + char chipSKU[9]; + char chipSKUMod[5]; + NvU32 skuConfigVersion; + char project[5]; + char projectSKU[5]; + char CDP[6]; + char projectSKUMod[2]; + NvU32 businessCycle; +} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS; + +#define MAX_GPC_COUNT 32 + +typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS { + NvU32 totalVFs; + NvU32 firstVfOffset; + NvU32 vfFeatureMask; + NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8); + NV_DECLARE_ALIGNED(NvU64 bar0Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar1Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar2Size, 8); + NvBool b64bitBar0; + NvBool b64bitBar1; + NvBool b64bitBar2; + NvBool bSriovEnabled; + NvBool bSriovHeavyEnabled; + NvBool bEmulateVFBar0TlbInvalidationRegister; + NvBool bClientRmAllocatedCtxBuffer; + NvBool bNonPowerOf2ChannelCountSupported; + NvBool bVfResizableBAR1Supported; +} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS; + +#include "engine.h" + +#define NVGPU_ENGINE_CAPS_MASK_BITS 32 + +#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1) + +#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U) + +typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS +{ + NvU32 numHeads; + NvU32 maxNumHeads; +} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS; + +typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS +{ + NvU32 headIndex; + NvU32 maxHResolution; + NvU32 maxVResolution; +} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS; + +#define MAX_GROUP_COUNT 2 + +typedef struct +{ + NvU32 ecidLow; + NvU32 ecidHigh; + NvU32 ecidExtended; +} EcidManufacturingInfo; + +typedef struct +{ + NvU64 nonWprHeapOffset; + NvU64 frtsOffset; +} FW_WPR_LAYOUT_OFFSET; + +typedef struct GspStaticConfigInfo_t +{ + NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE]; + NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo; + NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo; + NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams; + + NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps; + NvU32 sriovMaxGfid; + + NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX]; + + NvBool poisonFuseEnabled; + + NvU64 fb_length; + NvU64 fbio_mask; + NvU32 fb_bus_width; + NvU32 fb_ram_type; + NvU64 fbp_mask; + NvU32 l2_cache_size; + + NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvBool bGpuInternalSku; + NvBool bIsQuadroGeneric; + NvBool bIsQuadroAd; + NvBool bIsNvidiaNvs; + NvBool bIsVgx; + NvBool bGeforceSmb; + NvBool bIsTitan; + NvBool bIsTesla; + NvBool bIsMobile; + NvBool bIsGc6Rtd3Allowed; + NvBool bIsGc8Rtd3Allowed; + NvBool bIsGcOffRtd3Allowed; + NvBool bIsGcoffLegacyAllowed; + NvBool bIsMigSupported; + + /* "Total Board Power" refers to power requirement of GPU, + * while in GC6 state. Majority of this power will be used + * to keep V-RAM active to preserve its content. + * Some energy maybe consumed by Always-on components on GPU chip. + * This power will be provided by 3.3v voltage rail. + */ + NvU16 RTD3GC6TotalBoardPower; + + /* PERST# (i.e. PCI Express Reset) is a sideband signal + * generated by the PCIe Host to indicate the PCIe devices, + * that the power-rails and the reference-clock are stable. + * The endpoint device typically uses this signal as a global reset. + */ + NvU16 RTD3GC6PerstDelay; + + NvU64 bar1PdeBase; + NvU64 bar2PdeBase; + + NvBool bVbiosValid; + NvU32 vbiosSubVendor; + NvU32 vbiosSubDevice; + + NvBool bPageRetirementSupported; + + NvBool bSplitVasBetweenServerClientRm; + + NvBool bClRootportNeedsNosnoopWAR; + + VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads; + VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution; + NvU64 displaylessMaxPixels; + + // Client handle for internal RMAPI control. + NvHandle hInternalClient; + + // Device handle for internal RMAPI control. + NvHandle hInternalDevice; + + // Subdevice handle for internal RMAPI control. + NvHandle hInternalSubdevice; + + NvBool bSelfHostedMode; + NvBool bAtsSupported; + + NvBool bIsGpuUefi; + NvBool bIsEfiInit; + + EcidManufacturingInfo ecidInfo[MAX_GROUP_COUNT]; + + FW_WPR_LAYOUT_OFFSET fwWprLayoutOffset; +} GspStaticConfigInfo; + +typedef struct +{ + NvU16 deviceID; // deviceID + NvU16 vendorID; // vendorID + NvU16 subdeviceID; // subsystem deviceID + NvU16 subvendorID; // subsystem vendorID + NvU8 revisionID; // revision ID +} BUSINFO; + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U) + +typedef struct DOD_METHOD_DATA +{ + NV_STATUS status; + NvU32 acpiIdListLen; + NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; +} DOD_METHOD_DATA; + +typedef struct JT_METHOD_DATA +{ + NV_STATUS status; + NvU32 jtCaps; + NvU16 jtRevId; + NvBool bSBIOSCaps; +} JT_METHOD_DATA; + +typedef struct MUX_METHOD_DATA_ELEMENT +{ + NvU32 acpiId; + NvU32 mode; + NV_STATUS status; +} MUX_METHOD_DATA_ELEMENT; + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U) + +typedef struct MUX_METHOD_DATA +{ + NvU32 tableLen; + MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; + MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; + MUX_METHOD_DATA_ELEMENT acpiIdMuxStateTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; +} MUX_METHOD_DATA; + +typedef struct CAPS_METHOD_DATA +{ + NV_STATUS status; + NvU32 optimusCaps; +} CAPS_METHOD_DATA; + +typedef struct ACPI_METHOD_DATA +{ + NvBool bValid; + DOD_METHOD_DATA dodMethodData; + JT_METHOD_DATA jtMethodData; + MUX_METHOD_DATA muxMethodData; + CAPS_METHOD_DATA capsMethodData; +} ACPI_METHOD_DATA; + +typedef struct GSP_VF_INFO +{ + NvU32 totalVFs; + NvU32 firstVFOffset; + NvU64 FirstVFBar0Address; + NvU64 FirstVFBar1Address; + NvU64 FirstVFBar2Address; + NvBool b64bitBar0; + NvBool b64bitBar1; + NvBool b64bitBar2; +} GSP_VF_INFO; + +typedef struct +{ + // Link capabilities + NvU32 linkCap; +} GSP_PCIE_CONFIG_REG; + +typedef struct GspSystemInfo +{ + NvU64 gpuPhysAddr; + NvU64 gpuPhysFbAddr; + NvU64 gpuPhysInstAddr; + NvU64 gpuPhysIoAddr; + NvU64 nvDomainBusDeviceFunc; + NvU64 simAccessBufPhysAddr; + NvU64 notifyOpSharedSurfacePhysAddr; + NvU64 pcieAtomicsOpMask; + NvU64 consoleMemSize; + NvU64 maxUserVa; + NvU32 pciConfigMirrorBase; + NvU32 pciConfigMirrorSize; + NvU32 PCIDeviceID; + NvU32 PCISubDeviceID; + NvU32 PCIRevisionID; + NvU32 pcieAtomicsCplDeviceCapMask; + NvU8 oorArch; + NvU64 clPdbProperties; + NvU32 Chipset; + NvBool bGpuBehindBridge; + NvBool bFlrSupported; + NvBool b64bBar0Supported; + NvBool bMnocAvailable; + NvU32 chipsetL1ssEnable; + NvBool bUpstreamL0sUnsupported; + NvBool bUpstreamL1Unsupported; + NvBool bUpstreamL1PorSupported; + NvBool bUpstreamL1PorMobileOnly; + NvBool bSystemHasMux; + NvU8 upstreamAddressValid; + BUSINFO FHBBusInfo; + BUSINFO chipsetIDInfo; + ACPI_METHOD_DATA acpiMethodData; + NvU32 hypervisorType; + NvBool bIsPassthru; + NvU64 sysTimerOffsetNs; + GSP_VF_INFO gspVFInfo; + NvBool bIsPrimary; + NvBool isGridBuild; + GSP_PCIE_CONFIG_REG pcieConfigReg; + NvU32 gridBuildCsp; + NvBool bPreserveVideoMemoryAllocations; + NvBool bTdrEventSupported; + NvBool bFeatureStretchVblankCapable; + NvBool bEnableDynamicGranularityPageArrays; + NvBool bClockBoostSupported; + NvBool bRouteDispIntrsToCPU; + NvU64 hostPageSize; +} GspSystemInfo; + +typedef struct rpc_os_error_log_v17_00 +{ + NvU32 exceptType; + NvU32 runlistId; + NvU32 chid; + char errString[0x100]; + NvU32 preemptiveRemovalPreviousXid; +} rpc_os_error_log_v17_00; + +typedef struct +{ + // Magic + // BL to use for verification (i.e. Booter locked it in WPR2) + NvU64 magic; // = 0xdc3aae21371a60b3; + + // Revision number of Booter-BL-Sequencer handoff interface + // Bumped up when we change this interface so it is not backward compatible. + // Bumped up when we revoke GSP-RM ucode + NvU64 revision; // = 1; + + // ---- Members regarding data in SYSMEM ---------------------------- + // Consumed by Booter for DMA + + NvU64 sysmemAddrOfRadix3Elf; + NvU64 sizeOfRadix3Elf; + + NvU64 sysmemAddrOfBootloader; + NvU64 sizeOfBootloader; + + // Offsets inside bootloader image needed by Booter + NvU64 bootloaderCodeOffset; + NvU64 bootloaderDataOffset; + NvU64 bootloaderManifestOffset; + + union + { + // Used only at initial boot + struct + { + NvU64 sysmemAddrOfSignature; + NvU64 sizeOfSignature; + }; + + // + // Used at suspend/resume to read GspFwHeapFreeList + // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart) + // + struct + { + NvU32 gspFwHeapFreeListWprOffset; + NvU32 unused0; + NvU64 unused1; + }; + }; + + // ---- Members describing FB layout -------------------------------- + NvU64 gspFwRsvdStart; + + NvU64 nonWprHeapOffset; + NvU64 nonWprHeapSize; + + NvU64 gspFwWprStart; + + // GSP-RM to use to setup heap. + NvU64 gspFwHeapOffset; + NvU64 gspFwHeapSize; + + // BL to use to find ELF for jump + NvU64 gspFwOffset; + // Size is sizeOfRadix3Elf above. + + NvU64 bootBinOffset; + // Size is sizeOfBootloader above. + + NvU64 frtsOffset; + NvU64 frtsSize; + + NvU64 gspFwWprEnd; + + // GSP-RM to use for fbRegionInfo? + NvU64 fbSize; + + // ---- Other members ----------------------------------------------- + + // GSP-RM to use for fbRegionInfo? + NvU64 vgaWorkspaceOffset; + NvU64 vgaWorkspaceSize; + + // Boot count. Used to determine whether to load the firmware image. + NvU64 bootCount; + + // This union is organized the way it is to start at an 8-byte boundary and achieve natural + // packing of the internal struct fields. + union + { + struct + { + // TODO: the partitionRpc* fields below do not really belong in this + // structure. The values are patched in by the partition bootstrapper + // when GSP-RM is booted in a partition, and this structure was a + // convenient place for the bootstrapper to access them. These should + // be moved to a different comm. mechanism between the bootstrapper + // and the GSP-RM tasks. + + // Shared partition RPC memory (physical address) + NvU64 partitionRpcAddr; + + // Offsets relative to partitionRpcAddr + NvU16 partitionRpcRequestOffset; + NvU16 partitionRpcReplyOffset; + + // Code section and dataSection offset and size. + NvU32 elfCodeOffset; + NvU32 elfDataOffset; + NvU32 elfCodeSize; + NvU32 elfDataSize; + + // Used during GSP-RM resume to check for revocation + NvU32 lsUcodeVersion; + }; + + struct + { + // Pad for the partitionRpc* fields, plus 4 bytes + NvU32 partitionRpcPadding[4]; + + // CrashCat (contiguous) buffer size/location - occupies same bytes as the + // elf(Code|Data)(Offset|Size) fields above. + // TODO: move to GSP_FMC_INIT_PARAMS + NvU64 sysmemAddrOfCrashReportQueue; + NvU32 sizeOfCrashReportQueue; + + // Pad for the lsUcodeVersion field + NvU32 lsUcodeVersionPadding[1]; + }; + }; + + // Number of VF partitions allocating sub-heaps from the WPR heap + // Used during boot to ensure the heap is adequately sized + NvU8 gspFwHeapVfPartitionCount; + + // Flags to help decide GSP-FW flow. + NvU8 flags; + + // Pad structure to exactly 256 bytes. Can replace padding with additional + // fields without incrementing revision. Padding initialized to 0. + NvU8 padding[2]; + + // + // Starts at gspFwWprEnd+frtsSize b/c FRTS is positioned + // to end where this allocation starts (when RM requests FSP to create + // FRTS). + // + NvU32 pmuReservedSize; + + // BL to use for verification (i.e. Booter says OK to boot) + NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified +} GspFwWprMeta; + +#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL + +#define GSP_FW_WPR_META_REVISION 1 + +typedef struct { + NvU64 sharedMemPhysAddr; + NvU32 pageTableEntryCount; + NvLength cmdQueueOffset; + NvLength statQueueOffset; +} MESSAGE_QUEUE_INIT_ARGUMENTS; + +typedef struct { + NvU32 oldLevel; + NvU32 flags; + NvBool bInPMTransition; +} GSP_SR_INIT_ARGUMENTS; + +typedef struct +{ + MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments; + GSP_SR_INIT_ARGUMENTS srInitArguments; + NvU32 gpuInstance; + NvBool bDmemStack; + + struct + { + NvU64 pa; + NvU64 size; + } profilerArgs; +} GSP_ARGUMENTS_CACHED; + +#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U) + +typedef struct +{ + // Magic for verification by secure ucode + NvU64 magic; // = GSP_FW_SR_META_MAGIC; + + // + // Revision number + // Bumped up when we change this interface so it is not backward compatible. + // + NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION; + + // Members regarding data in SYSMEM + NvU64 sysmemAddrOfSuspendResumeData; + NvU64 sizeOfSuspendResumeData; + + // + // Internal members for use by secure ucode + // Must be exactly GSP_FW_SR_META_INTERNAL_SIZE bytes. + // + NvU32 internal[32]; + + // Same as flags of GspFwWprMeta + NvU32 flags; + + // Subrevision number used by secure ucode + NvU32 subrevision; + + // + // Pad structure to exactly 256 bytes (1 DMA chunk). + // Padding initialized to zero. + // + NvU32 padding[22]; +} GspFwSRMeta; + +#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage + +#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL (22 << 20) + +#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada + +#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u) + +#define BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA (12u) + +#define BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA (70u) + +#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB \ + (88u + (BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA) + \ + (BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA)) + +typedef struct GSP_FMC_INIT_PARAMS +{ + // CC initialization "registry keys" + NvU32 regkeys; +} GSP_FMC_INIT_PARAMS; + +typedef enum { + GSP_DMA_TARGET_LOCAL_FB, + GSP_DMA_TARGET_COHERENT_SYSTEM, + GSP_DMA_TARGET_NONCOHERENT_SYSTEM, + GSP_DMA_TARGET_COUNT +} GSP_DMA_TARGET; + +typedef struct GSP_ACR_BOOT_GSP_RM_PARAMS +{ + // Physical memory aperture through which gspRmDescPa is accessed + GSP_DMA_TARGET target; + // Size in bytes of the GSP-RM descriptor structure + NvU32 gspRmDescSize; + // Physical offset in the target aperture of the GSP-RM descriptor structure + NvU64 gspRmDescOffset; + // Physical offset in FB to set the start of the WPR containing GSP-RM + NvU64 wprCarveoutOffset; + // Size in bytes of the WPR containing GSP-RM + NvU32 wprCarveoutSize; + // Whether to boot GSP-RM or GSP-Proxy through ACR + NvBool bIsGspRmBoot; +} GSP_ACR_BOOT_GSP_RM_PARAMS; + +typedef struct GSP_RM_PARAMS +{ + // Physical memory aperture through which bootArgsOffset is accessed + GSP_DMA_TARGET target; + // Physical offset in the memory aperture that will be passed to GSP-RM + NvU64 bootArgsOffset; +} GSP_RM_PARAMS; + +typedef struct GSP_SPDM_PARAMS +{ + // Physical Memory Aperture through which all addresses are accessed + GSP_DMA_TARGET target; + + // Physical offset in the memory aperture where SPDM payload is stored + NvU64 payloadBufferOffset; + + // Size of the above payload buffer + NvU32 payloadBufferSize; +} GSP_SPDM_PARAMS; + +typedef struct GSP_FMC_BOOT_PARAMS +{ + GSP_FMC_INIT_PARAMS initParams; + GSP_ACR_BOOT_GSP_RM_PARAMS bootGspRmParams; + GSP_RM_PARAMS gspRmParams; + GSP_SPDM_PARAMS gspSpdmParams; +} GSP_FMC_BOOT_PARAMS; + +#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100 (14 << 20) // Hopper+ +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h new file mode 100644 index 000000000000..e06643f57695 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_MSGFN_H__ +#define __NVRM_MSGFN_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#ifndef E +# define E(RPC, VAL) NV_VGPU_MSG_EVENT_##RPC = VAL, +# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + E(FIRST_EVENT, 0x1000) + E(GSP_INIT_DONE, 0x1001) + E(GSP_RUN_CPU_SEQUENCER, 0x1002) + E(POST_EVENT, 0x1003) + E(RC_TRIGGERED, 0x1004) + E(MMU_FAULT_QUEUED, 0x1005) + E(OS_ERROR_LOG, 0x1006) + E(RG_LINE_INTR, 0x1007) + E(GPUACCT_PERFMON_UTIL_SAMPLES, 0x1008) + E(SIM_READ, 0x1009) + E(SIM_WRITE, 0x100a) + E(SEMAPHORE_SCHEDULE_CALLBACK, 0x100b) + E(UCODE_LIBOS_PRINT, 0x100c) + E(VGPU_GSP_PLUGIN_TRIGGERED, 0x100d) + E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK, 0x100e) + E(PERF_BRIDGELESS_INFO_UPDATE, 0x100f) + E(VGPU_CONFIG, 0x1010) + E(DISPLAY_MODESET, 0x1011) + E(EXTDEV_INTR_SERVICE, 0x1012) + E(NVLINK_INBAND_RECEIVED_DATA_256, 0x1013) + E(NVLINK_INBAND_RECEIVED_DATA_512, 0x1014) + E(NVLINK_INBAND_RECEIVED_DATA_1024, 0x1015) + E(NVLINK_INBAND_RECEIVED_DATA_2048, 0x1016) + E(NVLINK_INBAND_RECEIVED_DATA_4096, 0x1017) + E(TIMED_SEMAPHORE_RELEASE, 0x1018) + E(NVLINK_IS_GPU_DEGRADED, 0x1019) + E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK, 0x101a) + E(NVLINK_FAULT_UP, 0x101b) + E(GSP_LOCKDOWN_NOTICE, 0x101c) + E(MIG_CI_CONFIG_UPDATE, 0x101d) + E(UPDATE_GSP_TRACE, 0x101e) + E(NVLINK_FATAL_ERROR_RECOVERY, 0x101f) + E(GSP_POST_NOCAT_RECORD, 0x1020) + E(FECS_ERROR, 0x1021) + E(RECOVERY_ACTION, 0x1022) + E(NUM_EVENTS, 0x1023) +#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +}; +# undef E +# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +#endif +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h new file mode 100644 index 000000000000..fcaef7f553a6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_OFA_H__ +#define __NVRM_OFA_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA? + NvU32 engineInstance; +} NV_OFA_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h new file mode 100644 index 000000000000..2d67b598c58b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_RPCFN_H__ +#define __NVRM_RPCFN_H__ +#include <nvrm/nvtypes.h> + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#ifndef X +# define X(UNIT, RPC, VAL) NV_VGPU_MSG_FUNCTION_##RPC = VAL, +# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + X(RM, NOP, 0) + X(RM, SET_GUEST_SYSTEM_INFO, 1) + X(RM, ALLOC_ROOT, 2) + X(RM, ALLOC_DEVICE, 3) // deprecated + X(RM, ALLOC_MEMORY, 4) + X(RM, ALLOC_CTX_DMA, 5) + X(RM, ALLOC_CHANNEL_DMA, 6) + X(RM, MAP_MEMORY, 7) + X(RM, BIND_CTX_DMA, 8) // deprecated + X(RM, ALLOC_OBJECT, 9) + X(RM, FREE, 10) + X(RM, LOG, 11) + X(RM, ALLOC_VIDMEM, 12) + X(RM, UNMAP_MEMORY, 13) + X(RM, MAP_MEMORY_DMA, 14) + X(RM, UNMAP_MEMORY_DMA, 15) + X(RM, GET_EDID, 16) // deprecated + X(RM, ALLOC_DISP_CHANNEL, 17) + X(RM, ALLOC_DISP_OBJECT, 18) + X(RM, ALLOC_SUBDEVICE, 19) + X(RM, ALLOC_DYNAMIC_MEMORY, 20) + X(RM, DUP_OBJECT, 21) + X(RM, IDLE_CHANNELS, 22) + X(RM, ALLOC_EVENT, 23) + X(RM, SEND_EVENT, 24) // deprecated + X(RM, REMAPPER_CONTROL, 25) // deprecated + X(RM, DMA_CONTROL, 26) // deprecated + X(RM, DMA_FILL_PTE_MEM, 27) + X(RM, MANAGE_HW_RESOURCE, 28) + X(RM, BIND_ARBITRARY_CTX_DMA, 29) // deprecated + X(RM, CREATE_FB_SEGMENT, 30) + X(RM, DESTROY_FB_SEGMENT, 31) + X(RM, ALLOC_SHARE_DEVICE, 32) + X(RM, DEFERRED_API_CONTROL, 33) + X(RM, REMOVE_DEFERRED_API, 34) + X(RM, SIM_ESCAPE_READ, 35) + X(RM, SIM_ESCAPE_WRITE, 36) + X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA, 37) + X(RM, FREE_VIDMEM_VIRT, 38) + X(RM, PERF_GET_PSTATE_INFO, 39) // deprecated + X(RM, PERF_GET_PERFMON_SAMPLE, 40) + X(RM, PERF_GET_VIRTUAL_PSTATE_INFO, 41) // deprecated + X(RM, PERF_GET_LEVEL_INFO, 42) + X(RM, MAP_SEMA_MEMORY, 43) + X(RM, UNMAP_SEMA_MEMORY, 44) + X(RM, SET_SURFACE_PROPERTIES, 45) + X(RM, CLEANUP_SURFACE, 46) + X(RM, UNLOADING_GUEST_DRIVER, 47) + X(RM, TDR_SET_TIMEOUT_STATE, 48) + X(RM, SWITCH_TO_VGA, 49) + X(RM, GPU_EXEC_REG_OPS, 50) + X(RM, GET_STATIC_INFO, 51) + X(RM, ALLOC_VIRTMEM, 52) + X(RM, UPDATE_PDE_2, 53) + X(RM, SET_PAGE_DIRECTORY, 54) + X(RM, GET_STATIC_PSTATE_INFO, 55) + X(RM, TRANSLATE_GUEST_GPU_PTES, 56) + X(RM, RESERVED_57, 57) + X(RM, RESET_CURRENT_GR_CONTEXT, 58) + X(RM, SET_SEMA_MEM_VALIDATION_STATE, 59) + X(RM, GET_ENGINE_UTILIZATION, 60) + X(RM, UPDATE_GPU_PDES, 61) + X(RM, GET_ENCODER_CAPACITY, 62) + X(RM, VGPU_PF_REG_READ32, 63) // deprecated + X(RM, SET_GUEST_SYSTEM_INFO_EXT, 64) + X(GSP, GET_GSP_STATIC_INFO, 65) + X(RM, RMFS_INIT, 66) // deprecated + X(RM, RMFS_CLOSE_QUEUE, 67) // deprecated + X(RM, RMFS_CLEANUP, 68) // deprecated + X(RM, RMFS_TEST, 69) // deprecated + X(RM, UPDATE_BAR_PDE, 70) + X(RM, CONTINUATION_RECORD, 71) + X(RM, GSP_SET_SYSTEM_INFO, 72) + X(RM, SET_REGISTRY, 73) + X(GSP, GSP_INIT_POST_OBJGPU, 74) // deprecated + X(RM, SUBDEV_EVENT_SET_NOTIFICATION, 75) // deprecated + X(GSP, GSP_RM_CONTROL, 76) + X(RM, GET_STATIC_INFO2, 77) + X(RM, DUMP_PROTOBUF_COMPONENT, 78) + X(RM, UNSET_PAGE_DIRECTORY, 79) + X(RM, GET_CONSOLIDATED_STATIC_INFO, 80) // deprecated + X(RM, GMMU_REGISTER_FAULT_BUFFER, 81) // deprecated + X(RM, GMMU_UNREGISTER_FAULT_BUFFER, 82) // deprecated + X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER, 83) // deprecated + X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER, 84) // deprecated + X(RM, CTRL_SET_VGPU_FB_USAGE, 85) + X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO, 86) + X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO, 87) + X(RM, CTRL_RESET_CHANNEL, 88) + X(RM, CTRL_RESET_ISOLATED_CHANNEL, 89) + X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT, 90) + X(RM, CTRL_CLK_GET_EXTENDED_INFO, 91) + X(RM, CTRL_PERF_BOOST, 92) + X(RM, CTRL_PERF_VPSTATES_GET_CONTROL, 93) + X(RM, CTRL_GET_ZBC_CLEAR_TABLE, 94) + X(RM, CTRL_SET_ZBC_COLOR_CLEAR, 95) + X(RM, CTRL_SET_ZBC_DEPTH_CLEAR, 96) + X(RM, CTRL_GPFIFO_SCHEDULE, 97) + X(RM, CTRL_SET_TIMESLICE, 98) + X(RM, CTRL_PREEMPT, 99) + X(RM, CTRL_FIFO_DISABLE_CHANNELS, 100) + X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL, 101) + X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL, 102) + X(GSP, GSP_RM_ALLOC, 103) + X(RM, CTRL_GET_P2P_CAPS_V2, 104) + X(RM, CTRL_CIPHER_AES_ENCRYPT, 105) + X(RM, CTRL_CIPHER_SESSION_KEY, 106) + X(RM, CTRL_CIPHER_SESSION_KEY_STATUS, 107) + X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES, 108) + X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES, 109) + X(RM, CTRL_DBG_SET_EXCEPTION_MASK, 110) + X(RM, CTRL_GPU_PROMOTE_CTX, 111) + X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND, 112) + X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE, 113) + X(RM, CTRL_GR_CTXSW_ZCULL_BIND, 114) + X(RM, CTRL_GPU_INITIALIZE_CTX, 115) + X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES, 116) + X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT, 117) + X(RM, CTRL_GET_LATEST_ECC_ADDRESSES, 118) + X(RM, CTRL_MC_SERVICE_INTERRUPTS, 119) + X(RM, CTRL_DMA_SET_DEFAULT_VASPACE, 120) + X(RM, CTRL_GET_CE_PCE_MASK, 121) + X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY, 122) + X(RM, CTRL_GET_NVLINK_PEER_ID_MASK, 123) // deprecated + X(RM, CTRL_GET_NVLINK_STATUS, 124) + X(RM, CTRL_GET_P2P_CAPS, 125) + X(RM, CTRL_GET_P2P_CAPS_MATRIX, 126) + X(RM, RESERVED_0, 127) + X(RM, CTRL_RESERVE_PM_AREA_SMPC, 128) + X(RM, CTRL_RESERVE_HWPM_LEGACY, 129) + X(RM, CTRL_B0CC_EXEC_REG_OPS, 130) + X(RM, CTRL_BIND_PM_RESOURCES, 131) + X(RM, CTRL_DBG_SUSPEND_CONTEXT, 132) + X(RM, CTRL_DBG_RESUME_CONTEXT, 133) + X(RM, CTRL_DBG_EXEC_REG_OPS, 134) + X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG, 135) + X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE, 136) + X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE, 137) + X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG, 138) + X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE, 139) + X(RM, CTRL_ALLOC_PMA_STREAM, 140) + X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT, 141) + X(RM, CTRL_FB_GET_INFO_V2, 142) + X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES, 143) + X(RM, CTRL_GR_GET_CTX_BUFFER_INFO, 144) + X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES, 145) + X(RM, CTRL_GPU_EVICT_CTX, 146) + X(RM, CTRL_FB_GET_FS_INFO, 147) + X(RM, CTRL_GRMGR_GET_GR_FS_INFO, 148) + X(RM, CTRL_STOP_CHANNEL, 149) + X(RM, CTRL_GR_PC_SAMPLING_MODE, 150) + X(RM, CTRL_PERF_RATED_TDP_GET_STATUS, 151) + X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL, 152) + X(RM, CTRL_FREE_PMA_STREAM, 153) + X(RM, CTRL_TIMER_SET_GR_TICK_FREQ, 154) + X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB, 155) + X(RM, GET_CONSOLIDATED_GR_STATIC_INFO, 156) + X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP, 157) + X(RM, CTRL_GR_GET_TPC_PARTITION_MODE, 158) + X(RM, CTRL_GR_SET_TPC_PARTITION_MODE, 159) + X(UVM, UVM_PAGING_CHANNEL_ALLOCATE, 160) + X(UVM, UVM_PAGING_CHANNEL_DESTROY, 161) + X(UVM, UVM_PAGING_CHANNEL_MAP, 162) + X(UVM, UVM_PAGING_CHANNEL_UNMAP, 163) + X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM, 164) + X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES, 165) + X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION, 166) + X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL, 167) + X(RM, DCE_RM_INIT, 168) + X(RM, REGISTER_VIRTUAL_EVENT_BUFFER, 169) + X(RM, CTRL_EVENT_BUFFER_UPDATE_GET, 170) + X(RM, GET_PLCABLE_ADDRESS_KIND, 171) + X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2, 172) + X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM, 173) + X(RM, CTRL_GET_MMU_DEBUG_MODE, 174) + X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS, 175) + X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE, 176) + X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO, 177) + X(RM, DISABLE_CHANNELS, 178) + X(RM, CTRL_FABRIC_MEMORY_DESCRIBE, 179) + X(RM, CTRL_FABRIC_MEM_STATS, 180) + X(RM, SAVE_HIBERNATION_DATA, 181) + X(RM, RESTORE_HIBERNATION_DATA, 182) + X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED, 183) + X(RM, CTRL_EXEC_PARTITIONS_CREATE, 184) + X(RM, CTRL_EXEC_PARTITIONS_DELETE, 185) + X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN, 186) + X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX, 187) + X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION, 188) + X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK, 189) + X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER, 190) + X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS, 191) + X(RM, CTRL_BUS_SET_P2P_MAPPING, 192) + X(RM, CTRL_BUS_UNSET_P2P_MAPPING, 193) + X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK, 194) + X(RM, CTRL_GPU_MIGRATABLE_OPS, 195) + X(RM, CTRL_GET_TOTAL_HS_CREDITS, 196) + X(RM, CTRL_GET_HS_CREDITS, 197) + X(RM, CTRL_SET_HS_CREDITS, 198) + X(RM, CTRL_PM_AREA_PC_SAMPLER, 199) + X(RM, INVALIDATE_TLB, 200) + X(RM, CTRL_GPU_QUERY_ECC_STATUS, 201) // deprecated + X(RM, ECC_NOTIFIER_WRITE_ACK, 202) + X(RM, CTRL_DBG_GET_MODE_MMU_DEBUG, 203) + X(RM, RM_API_CONTROL, 204) + X(RM, CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE, 205) + X(RM, CTRL_NVLINK_GET_INBAND_RECEIVED_DATA, 206) + X(RM, GET_STATIC_DATA, 207) + X(RM, RESERVED_208, 208) + X(RM, CTRL_GPU_GET_INFO_V2, 209) + X(RM, GET_BRAND_CAPS, 210) + X(RM, CTRL_CMD_NVLINK_INBAND_SEND_DATA, 211) + X(RM, UPDATE_GPM_GUEST_BUFFER_INFO, 212) + X(RM, CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE, 213) + X(RM, CTRL_SET_ZBC_STENCIL_CLEAR, 214) + X(RM, CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS, 215) + X(RM, CTRL_SUBDEVICE_GET_LIBOS_HEAP_STATS, 216) + X(RM, CTRL_DBG_SET_MODE_MMU_GCC_DEBUG, 217) + X(RM, CTRL_DBG_GET_MODE_MMU_GCC_DEBUG, 218) + X(RM, CTRL_RESERVE_HES, 219) + X(RM, CTRL_RELEASE_HES, 220) + X(RM, CTRL_RESERVE_CCU_PROF, 221) + X(RM, CTRL_RELEASE_CCU_PROF, 222) + X(RM, RESERVED, 223) + X(RM, CTRL_CMD_GET_CHIPLET_HS_CREDIT_POOL, 224) + X(RM, CTRL_CMD_GET_HS_CREDITS_MAPPING, 225) + X(RM, CTRL_EXEC_PARTITIONS_EXPORT, 226) + X(RM, NUM_FUNCTIONS, 227) +#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +}; +# undef X +# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +#endif +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c new file mode 100644 index 000000000000..6fb3083edde3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include <rm/engine.h> + +#include "nvrm/ofa.h" + +static int +r570_ofa_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, int inst, + struct nvkm_gsp_object *ofa) +{ + NV_OFA_ALLOCATION_PARAMETERS *args; + + args = nvkm_gsp_rm_alloc_get(parent, handle, oclass, sizeof(*args), ofa); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->size = sizeof(*args); + args->engineInstance = inst; + + return nvkm_gsp_rm_alloc_wr(ofa, args); +} + +const struct nvkm_rm_api_engine +r570_ofa = { + .alloc = r570_ofa_alloc, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c new file mode 100644 index 000000000000..498658d0c60c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include <rm/rm.h> + +#include "nvrm/gsp.h" + +static const struct nvkm_rm_wpr +r570_wpr_libos2 = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB, +}; + +static const struct nvkm_rm_wpr +r570_wpr_libos3 = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, +}; + +static const struct nvkm_rm_wpr +r570_wpr_libos3_gh100 = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, + .heap_size_non_wpr = 0x200000, + .offset_set_by_acr = true, +}; + +static const struct nvkm_rm_wpr +r570_wpr_libos3_gb10x = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, + .heap_size_non_wpr = 0x200000, + .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000), + .offset_set_by_acr = true, +}; + +static const struct nvkm_rm_wpr +r570_wpr_libos3_gb20x = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, + .heap_size_non_wpr = 0x220000, + .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000), + .offset_set_by_acr = true, +}; + +static const struct nvkm_rm_api +r570_api = { + .gsp = &r570_gsp, + .rpc = &r535_rpc, + .ctrl = &r535_ctrl, + .alloc = &r535_alloc, + .client = &r570_client, + .device = &r535_device, + .fbsr = &r570_fbsr, + .disp = &r570_disp, + .fifo = &r570_fifo, + .ce = &r535_ce, + .gr = &r570_gr, + .nvdec = &r535_nvdec, + .nvenc = &r535_nvenc, + .nvjpg = &r535_nvjpg, + .ofa = &r570_ofa, +}; + +const struct nvkm_rm_impl +r570_rm_tu102 = { + .wpr = &r570_wpr_libos2, + .api = &r570_api, +}; + +const struct nvkm_rm_impl +r570_rm_ga102 = { + .wpr = &r570_wpr_libos3, + .api = &r570_api, +}; + +const struct nvkm_rm_impl +r570_rm_gh100 = { + .wpr = &r570_wpr_libos3_gh100, + .api = &r570_api, +}; + +const struct nvkm_rm_impl +r570_rm_gb10x = { + .wpr = &r570_wpr_libos3_gb10x, + .api = &r570_api, +}; + +const struct nvkm_rm_impl +r570_rm_gb20x = { + .wpr = &r570_wpr_libos3_gb20x, + .api = &r570_api, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h new file mode 100644 index 000000000000..393ea775941f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include <subdev/gsp.h> +#ifndef __NVKM_RM_H__ +#define __NVKM_RM_H__ +#include "handles.h" +struct nvkm_outp; +struct r535_gr; + +struct nvkm_rm_impl { + const struct nvkm_rm_wpr *wpr; + const struct nvkm_rm_api *api; +}; + +struct nvkm_rm { + struct nvkm_device *device; + const struct nvkm_rm_gpu *gpu; + const struct nvkm_rm_wpr *wpr; + const struct nvkm_rm_api *api; +}; + +struct nvkm_rm_wpr { + u32 os_carveout_size; + u32 base_size; + u64 heap_size_min; + u32 heap_size_non_wpr; + u32 rsvd_size_pmu; + bool offset_set_by_acr; +}; + +struct nvkm_rm_api { + const struct nvkm_rm_api_gsp { + void (*set_rmargs)(struct nvkm_gsp *, bool resume); + int (*set_system_info)(struct nvkm_gsp *); + int (*get_static_info)(struct nvkm_gsp *); + bool (*xlat_mc_engine_idx)(u32 mc_engine_idx, enum nvkm_subdev_type *, int *inst); + void (*drop_send_user_shared_data)(struct nvkm_gsp *); + void (*drop_post_nocat_record)(struct nvkm_gsp *); + u32 (*sr_data_size)(struct nvkm_gsp *); + } *gsp; + + const struct nvkm_rm_api_rpc { + void *(*get)(struct nvkm_gsp *, u32 fn, u32 argc); + void *(*push)(struct nvkm_gsp *gsp, void *argv, + enum nvkm_gsp_rpc_reply_policy policy, u32 repc); + void (*done)(struct nvkm_gsp *gsp, void *repv); + } *rpc; + + const struct nvkm_rm_api_ctrl { + void *(*get)(struct nvkm_gsp_object *, u32 cmd, u32 params_size); + int (*push)(struct nvkm_gsp_object *, void **params, u32 repc); + void (*done)(struct nvkm_gsp_object *, void *params); + } *ctrl; + + const struct nvkm_rm_api_alloc { + void *(*get)(struct nvkm_gsp_object *, u32 oclass, u32 params_size); + void *(*push)(struct nvkm_gsp_object *, void *params); + void (*done)(struct nvkm_gsp_object *, void *params); + + int (*free)(struct nvkm_gsp_object *); + } *alloc; + + const struct nvkm_rm_api_client { + int (*ctor)(struct nvkm_gsp_client *, u32 handle); + } *client; + + const struct nvkm_rm_api_device { + int (*ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *); + void (*dtor)(struct nvkm_gsp_device *); + + struct { + int (*ctor)(struct nvkm_gsp_device *, u32 handle, u32 id, + nvkm_gsp_event_func, struct nvkm_gsp_event *); + void (*dtor)(struct nvkm_gsp_event *); + } event; + } *device; + + const struct nvkm_rm_api_fbsr { + int (*suspend)(struct nvkm_gsp *); + void (*resume)(struct nvkm_gsp *); + } *fbsr; + + const struct nvkm_rm_api_disp { + int (*get_static_info)(struct nvkm_disp *); + int (*get_supported)(struct nvkm_disp *, unsigned long *display_mask); + int (*get_connect_state)(struct nvkm_disp *, unsigned display_id); + int (*get_active)(struct nvkm_disp *, unsigned head, u32 *display_id); + + int (*bl_ctrl)(struct nvkm_disp *, unsigned display_id, bool set, int *val); + + struct { + int (*get_caps)(struct nvkm_disp *, int *link_bw, bool *mst, bool *wm); + int (*set_indexed_link_rates)(struct nvkm_outp *); + } dp; + + struct { + int (*set_pushbuf)(struct nvkm_disp *, s32 oclass, int inst, + struct nvkm_memory *); + int (*dmac_alloc)(struct nvkm_disp *, u32 oclass, int inst, u32 put_offset, + struct nvkm_gsp_object *); + } chan; + } *disp; + + const struct nvkm_rm_api_fifo { + int (*xlat_rm_engine_type)(u32 rm_engine_type, + enum nvkm_subdev_type *, int *nv2080_type); + int (*ectx_size)(struct nvkm_fifo *); + unsigned rsvd_chids; + int (*rc_triggered)(void *priv, u32 fn, void *repv, u32 repc); + struct { + int (*alloc)(struct nvkm_gsp_device *, u32 handle, + u32 nv2080_engine_type, u8 runq, bool priv, int chid, + u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr, + struct nvkm_vmm *, u64 gpfifo_offset, u32 gpfifo_length, + struct nvkm_gsp_object *); + } chan; + } *fifo; + + const struct nvkm_rm_api_engine { + int (*alloc)(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, + struct nvkm_gsp_object *); + } *ce, *nvdec, *nvenc, *nvjpg, *ofa; + + const struct nvkm_rm_api_gr { + int (*get_ctxbufs_info)(struct r535_gr *); + struct { + int (*init)(struct r535_gr *); + void (*fini)(struct r535_gr *); + } scrubber; + } *gr; +}; + +extern const struct nvkm_rm_impl r535_rm_tu102; +extern const struct nvkm_rm_impl r535_rm_ga102; +extern const struct nvkm_rm_api_gsp r535_gsp; +typedef struct DOD_METHOD_DATA DOD_METHOD_DATA; +typedef struct JT_METHOD_DATA JT_METHOD_DATA; +typedef struct CAPS_METHOD_DATA CAPS_METHOD_DATA; +void r535_gsp_acpi_dod(acpi_handle, DOD_METHOD_DATA *); +void r535_gsp_acpi_jt(acpi_handle, JT_METHOD_DATA *); +void r535_gsp_acpi_caps(acpi_handle, CAPS_METHOD_DATA *); +struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS; +void r535_gsp_get_static_info_fb(struct nvkm_gsp *, + const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *); +extern const struct nvkm_rm_api_rpc r535_rpc; +extern const struct nvkm_rm_api_ctrl r535_ctrl; +extern const struct nvkm_rm_api_alloc r535_alloc; +extern const struct nvkm_rm_api_client r535_client; +void r535_gsp_client_dtor(struct nvkm_gsp_client *); +extern const struct nvkm_rm_api_device r535_device; +int r535_mmu_vaspace_new(struct nvkm_vmm *, u32 handle, bool external); +void r535_mmu_vaspace_del(struct nvkm_vmm *); +extern const struct nvkm_rm_api_fbsr r535_fbsr; +void r535_fbsr_resume(struct nvkm_gsp *); +int r535_fbsr_memlist(struct nvkm_gsp_device *, u32 handle, enum nvkm_memory_target, + u64 phys, u64 size, struct sg_table *, struct nvkm_gsp_object *); +extern const struct nvkm_rm_api_disp r535_disp; +extern const struct nvkm_rm_api_fifo r535_fifo; +void r535_fifo_rc_chid(struct nvkm_fifo *, int chid); +extern const struct nvkm_rm_api_engine r535_ce; +extern const struct nvkm_rm_api_gr r535_gr; +void *r535_gr_dtor(struct nvkm_gr *); +int r535_gr_oneinit(struct nvkm_gr *); +u64 r535_gr_units(struct nvkm_gr *); +int r535_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *, const struct nvkm_oclass *, + struct nvkm_object **); +int r535_gr_promote_ctx(struct r535_gr *, bool golden, struct nvkm_vmm *, + struct nvkm_memory **pctxbuf_mem, struct nvkm_vma **pctxbuf_vma, + struct nvkm_gsp_object *chan); +extern const struct nvkm_rm_api_engine r535_nvdec; +extern const struct nvkm_rm_api_engine r535_nvenc; +extern const struct nvkm_rm_api_engine r535_nvjpg; +extern const struct nvkm_rm_api_engine r535_ofa; + +extern const struct nvkm_rm_impl r570_rm_tu102; +extern const struct nvkm_rm_impl r570_rm_ga102; +extern const struct nvkm_rm_impl r570_rm_gh100; +extern const struct nvkm_rm_impl r570_rm_gb10x; +extern const struct nvkm_rm_impl r570_rm_gb20x; +extern const struct nvkm_rm_api_gsp r570_gsp; +extern const struct nvkm_rm_api_client r570_client; +extern const struct nvkm_rm_api_fbsr r570_fbsr; +extern const struct nvkm_rm_api_disp r570_disp; +extern const struct nvkm_rm_api_fifo r570_fifo; +extern const struct nvkm_rm_api_gr r570_gr; +int r570_gr_gpc_mask(struct nvkm_gsp *, u32 *mask); +int r570_gr_tpc_mask(struct nvkm_gsp *, int gpc, u32 *mask); +extern const struct nvkm_rm_api_engine r570_ofa; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h new file mode 100644 index 000000000000..4431e33b3304 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __NVKM_RM_RPC_H__ +#define __NVKM_RM_RPC_H__ +#include "rm.h" + +#define to_payload_hdr(p, header) \ + container_of((void *)p, typeof(*header), params) + +int r535_gsp_rpc_poll(struct nvkm_gsp *, u32 fn); + +struct nvfw_gsp_rpc *r535_gsp_msg_recv(struct nvkm_gsp *, int fn, u32 gsp_rpc_len); +int r535_gsp_msg_ntfy_add(struct nvkm_gsp *, u32 fn, nvkm_gsp_msg_ntfy_func, void *priv); + +int r535_rpc_status_to_errno(uint32_t rpc_status); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c new file mode 100644 index 000000000000..423502f870db --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +#include <engine/fifo/priv.h> + +#include <nvif/class.h> + +const struct nvkm_rm_gpu +tu1xx_gpu = { + .disp.class = { + .root = TU102_DISP, + .caps = GV100_DISP_CAPS, + .core = TU102_DISP_CORE_CHANNEL_DMA, + .wndw = TU102_DISP_WINDOW_CHANNEL_DMA, + .wimm = TU102_DISP_WINDOW_IMM_CHANNEL_DMA, + .curs = TU102_DISP_CURSOR, + }, + + .usermode.class = TURING_USERMODE_A, + + .fifo.chan = { + .class = TURING_CHANNEL_GPFIFO_A, + .doorbell_handle = tu102_chan_doorbell_handle, + }, + + .ce.class = TURING_DMA_COPY_A, + .gr.class = { + .i2m = KEPLER_INLINE_TO_MEMORY_B, + .twod = FERMI_TWOD_A, + .threed = TURING_A, + .compute = TURING_COMPUTE_A, + }, + .nvdec.class = NVC4B0_VIDEO_DECODER, + .nvenc.class = NVC4B7_VIDEO_ENCODER, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c index 59c5f2b9172a..58e233bc53b1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c @@ -22,11 +22,45 @@ #include "priv.h" #include <subdev/fb.h> +#include <engine/sec2.h> + +#include <rm/r535/nvrm/gsp.h> #include <nvfw/flcn.h> #include <nvfw/fw.h> #include <nvfw/hs.h> +static int +tu102_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + u32 wpr2_hi; + int ret; + + wpr2_hi = nvkm_rd32(device, 0x1fa828); + if (!wpr2_hi) { + nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n"); + return 0; + } + + ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); + if (WARN_ON(ret)) + return ret; + + wpr2_hi = nvkm_rd32(device, 0x1fa828); + if (WARN_ON(wpr2_hi)) + return -EIO; + + return 0; +} + +static int +tu102_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) +{ + return nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); +} + int tu102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob, struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw) @@ -114,6 +148,118 @@ tu102_gsp_reset(struct nvkm_gsp *gsp) return gsp->falcon.func->reset_eng(&gsp->falcon); } +int +tu102_gsp_fini(struct nvkm_gsp *gsp, bool suspend) +{ + u32 mbox0 = 0xff, mbox1 = 0xff; + int ret; + + ret = r535_gsp_fini(gsp, suspend); + if (ret && suspend) + return ret; + + nvkm_falcon_reset(&gsp->falcon); + + ret = nvkm_gsp_fwsec_sb(gsp); + WARN_ON(ret); + + if (suspend) { + mbox0 = lower_32_bits(gsp->sr.meta.addr); + mbox1 = upper_32_bits(gsp->sr.meta.addr); + } + + ret = tu102_gsp_booter_unload(gsp, mbox0, mbox1); + WARN_ON(ret); + return 0; +} + +int +tu102_gsp_init(struct nvkm_gsp *gsp) +{ + u32 mbox0, mbox1; + int ret; + + if (!gsp->sr.meta.data) { + mbox0 = lower_32_bits(gsp->wpr_meta.addr); + mbox1 = upper_32_bits(gsp->wpr_meta.addr); + } else { + gsp->rm->api->gsp->set_rmargs(gsp, true); + + mbox0 = lower_32_bits(gsp->sr.meta.addr); + mbox1 = upper_32_bits(gsp->sr.meta.addr); + } + + /* Execute booter to handle (eventually...) booting GSP-RM. */ + ret = tu102_gsp_booter_load(gsp, mbox0, mbox1); + if (WARN_ON(ret)) + return ret; + + return r535_gsp_init(gsp); +} + +static int +tu102_gsp_wpr_meta_init(struct nvkm_gsp *gsp) +{ + GspFwWprMeta *meta; + int ret; + + ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta); + if (ret) + return ret; + + meta = gsp->wpr_meta.data; + + meta->magic = GSP_FW_WPR_META_MAGIC; + meta->revision = GSP_FW_WPR_META_REVISION; + + meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr; + meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; + + meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; + meta->sizeOfBootloader = gsp->boot.fw.size; + meta->bootloaderCodeOffset = gsp->boot.code_offset; + meta->bootloaderDataOffset = gsp->boot.data_offset; + meta->bootloaderManifestOffset = gsp->boot.manifest_offset; + + meta->sysmemAddrOfSignature = gsp->sig.addr; + meta->sizeOfSignature = gsp->sig.size; + + meta->gspFwRsvdStart = gsp->fb.heap.addr; + meta->nonWprHeapOffset = gsp->fb.heap.addr; + meta->nonWprHeapSize = gsp->fb.heap.size; + meta->gspFwWprStart = gsp->fb.wpr2.addr; + meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr; + meta->gspFwHeapSize = gsp->fb.wpr2.heap.size; + meta->gspFwOffset = gsp->fb.wpr2.elf.addr; + meta->bootBinOffset = gsp->fb.wpr2.boot.addr; + meta->frtsOffset = gsp->fb.wpr2.frts.addr; + meta->frtsSize = gsp->fb.wpr2.frts.size; + meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000); + meta->fbSize = gsp->fb.size; + meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr; + meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; + meta->bootCount = 0; + meta->partitionRpcAddr = 0; + meta->partitionRpcRequestOffset = 0; + meta->partitionRpcReplyOffset = 0; + meta->verified = 0; + return 0; +} + +u64 +tu102_gsp_wpr_heap_size(struct nvkm_gsp *gsp) +{ + u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30); + u64 heap_size; + + heap_size = gsp->rm->wpr->os_carveout_size + + gsp->rm->wpr->base_size + + ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) + + ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20); + + return max(heap_size, gsp->rm->wpr->heap_size_min); +} + static u64 tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size) { @@ -136,14 +282,67 @@ tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size) int tu102_gsp_oneinit(struct nvkm_gsp *gsp) { - gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device); + struct nvkm_device *device = gsp->subdev.device; + int ret; + + gsp->fb.size = nvkm_fb_vidmem_size(device); gsp->fb.bios.vga_workspace.addr = tu102_gsp_vga_workspace_addr(gsp, gsp->fb.size); gsp->fb.bios.vga_workspace.size = gsp->fb.size - gsp->fb.bios.vga_workspace.addr; gsp->fb.bios.addr = gsp->fb.bios.vga_workspace.addr; gsp->fb.bios.size = gsp->fb.bios.vga_workspace.size; - return r535_gsp_oneinit(gsp); + ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load, + &device->sec2->falcon, &gsp->booter.load); + if (ret) + return ret; + + ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload, + &device->sec2->falcon, &gsp->booter.unload); + if (ret) + return ret; + + ret = r535_gsp_oneinit(gsp); + if (ret) + return ret; + + /* Calculate FB layout. */ + gsp->fb.wpr2.frts.size = 0x100000; + gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size; + + gsp->fb.wpr2.boot.size = gsp->boot.fw.size; + gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000); + + gsp->fb.wpr2.elf.size = gsp->fw.len; + gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000); + + gsp->fb.wpr2.heap.size = tu102_gsp_wpr_heap_size(gsp); + + gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000); + gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000); + + gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000); + gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr; + + gsp->fb.heap.size = 0x100000; + gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size; + + ret = tu102_gsp_wpr_meta_init(gsp); + if (ret) + return ret; + + ret = nvkm_gsp_fwsec_frts(gsp); + if (WARN_ON(ret)) + return ret; + + /* Reset GSP into RISC-V mode. */ + ret = gsp->func->reset(gsp); + if (ret) + return ret; + + nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); + nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); + return 0; } const struct nvkm_falcon_func @@ -163,29 +362,73 @@ tu102_gsp_flcn = { }; static const struct nvkm_gsp_func -tu102_gsp_r535_113_01 = { +tu102_gsp = { .flcn = &tu102_gsp_flcn, .fwsec = &tu102_gsp_fwsec, .sig_section = ".fwsignature_tu10x", - .wpr_heap.base_size = 8 << 20, - .wpr_heap.min_size = 64 << 20, - .booter.ctor = tu102_gsp_booter_ctor, .dtor = r535_gsp_dtor, .oneinit = tu102_gsp_oneinit, - .init = r535_gsp_init, - .fini = r535_gsp_fini, + .init = tu102_gsp_init, + .fini = tu102_gsp_fini, .reset = tu102_gsp_reset, - .rm = &r535_gsp_rm, + .rm.gpu = &tu1xx_gpu, }; +int +tu102_gsp_load_rm(struct nvkm_gsp *gsp, const struct nvkm_gsp_fwif *fwif) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + bool enable_gsp = fwif->enable; + int ret; + +#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT) + enable_gsp = true; +#endif + if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp)) + return -EINVAL; + + ret = nvkm_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm); + if (ret) + return ret; + + ret = nvkm_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl); + if (ret) + return ret; + + return 0; +} + +int +tu102_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) +{ + int ret; + + ret = tu102_gsp_load_rm(gsp, fwif); + if (ret) + goto done; + + ret = nvkm_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load); + if (ret) + goto done; + + ret = nvkm_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload); + +done: + if (ret) + nvkm_gsp_dtor_fws(gsp); + + return ret; +} + static struct nvkm_gsp_fwif tu102_gsps[] = { - { 0, r535_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" }, + { 1, tu102_gsp_load, &tu102_gsp, &r570_rm_tu102, "570.144" }, + { 0, tu102_gsp_load, &tu102_gsp, &r535_rm_tu102, "535.113.01" }, { -1, gv100_gsp_nofw, &gv100_gsp }, {} }; @@ -196,3 +439,11 @@ tu102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, { return nvkm_gsp_new_(tu102_gsps, device, type, inst, pgsp); } + +NVKM_GSP_FIRMWARE_BOOTER(tu102, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(tu104, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(tu106, 535.113.01); + +NVKM_GSP_FIRMWARE_BOOTER(tu102, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(tu104, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(tu106, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c index 04fbd9ed28b1..97eb046c25d0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c @@ -22,29 +22,27 @@ #include "priv.h" static const struct nvkm_gsp_func -tu116_gsp_r535_113_01 = { +tu116_gsp = { .flcn = &tu102_gsp_flcn, .fwsec = &tu102_gsp_fwsec, .sig_section = ".fwsignature_tu11x", - .wpr_heap.base_size = 8 << 20, - .wpr_heap.min_size = 64 << 20, - .booter.ctor = tu102_gsp_booter_ctor, .dtor = r535_gsp_dtor, .oneinit = tu102_gsp_oneinit, - .init = r535_gsp_init, - .fini = r535_gsp_fini, + .init = tu102_gsp_init, + .fini = tu102_gsp_fini, .reset = tu102_gsp_reset, - .rm = &r535_gsp_rm, + .rm.gpu = &tu1xx_gpu, }; static struct nvkm_gsp_fwif tu116_gsps[] = { - { 0, r535_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" }, + { 1, tu102_gsp_load, &tu116_gsp, &r570_rm_tu102, "570.144" }, + { 0, tu102_gsp_load, &tu116_gsp, &r535_rm_tu102, "535.113.01" }, { -1, gv100_gsp_nofw, &gv100_gsp }, {} }; @@ -55,3 +53,9 @@ tu116_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, { return nvkm_gsp_new_(tu116_gsps, device, type, inst, pgsp); } + +NVKM_GSP_FIRMWARE_BOOTER(tu116, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(tu117, 535.113.01); + +NVKM_GSP_FIRMWARE_BOOTER(tu116, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(tu117, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild index 553d540f2736..fa7a2862dd1f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild @@ -4,5 +4,4 @@ nvkm-y += nvkm/subdev/instmem/nv04.o nvkm-y += nvkm/subdev/instmem/nv40.o nvkm-y += nvkm/subdev/instmem/nv50.o nvkm-y += nvkm/subdev/instmem/gk20a.o - -nvkm-y += nvkm/subdev/instmem/r535.o +nvkm-y += nvkm/subdev/instmem/gh100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c index a2cd3330efc6..2f55bab8e132 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c @@ -182,9 +182,11 @@ nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend) int ret; if (suspend) { - ret = imem->func->suspend(imem); - if (ret) - return ret; + if (imem->func->suspend) { + ret = imem->func->suspend(imem); + if (ret) + return ret; + } imem->suspend = true; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c new file mode 100644 index 000000000000..8d8dd5f8a6c7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include <nvhw/ref/gh100/pri_nv_xal_ep.h> + +static void +gh100_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr) +{ + nvkm_wr32(device, NV_XAL_EP_BAR0_WINDOW, addr >> NV_XAL_EP_BAR0_WINDOW_BASE_SHIFT); +} + +static const struct nvkm_instmem_func +gh100_instmem = { + .fini = nv50_instmem_fini, + .memory_new = nv50_instobj_new, + .memory_wrap = nv50_instobj_wrap, + .set_bar0_window_addr = gh100_instmem_set_bar0_window_addr, +}; + +int +gh100_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_instmem **pimem) +{ + return r535_instmem_new(&gh100_instmem, device, type, inst, pimem); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c index 6b462f960922..2544b9f0ec85 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c @@ -239,7 +239,6 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins struct nvkm_instmem **pimem) { struct nv40_instmem *imem; - int bar; if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) return -ENOMEM; @@ -247,13 +246,8 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins *pimem = &imem->base; /* map bar */ - if (device->func->resource_size(device, 2)) - bar = 2; - else - bar = 3; - - imem->iomem = ioremap_wc(device->func->resource_addr(device, bar), - device->func->resource_size(device, bar)); + imem->iomem = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST), + device->func->resource_size(device, NVKM_BAR2_INST)); if (!imem->iomem) { nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n"); return -EFAULT; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c index dd5b5a17ece0..4ca6fb30743d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -65,7 +65,7 @@ nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data) spin_lock_irqsave(&imem->base.lock, flags); if (unlikely(imem->addr != base)) { - nvkm_wr32(device, 0x001700, base >> 16); + imem->base.func->set_bar0_window_addr(device, base); imem->addr = base; } nvkm_wr32(device, 0x700000 + addr, data); @@ -85,7 +85,7 @@ nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset) spin_lock_irqsave(&imem->base.lock, flags); if (unlikely(imem->addr != base)) { - nvkm_wr32(device, 0x001700, base >> 16); + imem->base.func->set_bar0_window_addr(device, base); imem->addr = base; } data = nvkm_rd32(device, 0x700000 + addr); @@ -172,7 +172,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) /* Make the mapping visible to the host. */ iobj->bar = bar; - iobj->map = ioremap_wc(device->func->resource_addr(device, 3) + + iobj->map = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST) + (u32)iobj->bar->addr, size); if (!iobj->map) { nvkm_warn(subdev, "PRAMIN ioremap failed\n"); @@ -353,7 +353,7 @@ nv50_instobj_func = { .map = nv50_instobj_map, }; -static int +int nv50_instobj_wrap(struct nvkm_instmem *base, struct nvkm_memory *memory, struct nvkm_memory **pmemory) { @@ -373,7 +373,7 @@ nv50_instobj_wrap(struct nvkm_instmem *base, return 0; } -static int +int nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, struct nvkm_memory **pmemory) { @@ -395,6 +395,12 @@ nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, *****************************************************************************/ static void +nv50_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr) +{ + nvkm_wr32(device, 0x001700, addr >> 16); +} + +void nv50_instmem_fini(struct nvkm_instmem *base) { nv50_instmem(base)->addr = ~0ULL; @@ -415,6 +421,7 @@ nv50_instmem = { .memory_new = nv50_instobj_new, .memory_wrap = nv50_instobj_wrap, .zero = false, + .set_bar0_window_addr = nv50_instmem_set_bar0_window_addr, }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h index 4c14c96fb60a..87bbdd786eaa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h @@ -16,10 +16,16 @@ struct nvkm_instmem_func { bool zero, struct nvkm_memory **); int (*memory_wrap)(struct nvkm_instmem *, struct nvkm_memory *, struct nvkm_memory **); bool zero; + void (*set_bar0_window_addr)(struct nvkm_device *, u64 addr); }; int nv50_instmem_new_(const struct nvkm_instmem_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **); +void nv50_instmem_fini(struct nvkm_instmem *); +int nv50_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero, + struct nvkm_memory **); +int nv50_instobj_wrap(struct nvkm_instmem *, struct nvkm_memory *vram, + struct nvkm_memory **bar2); void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild index 7ba35ea59c06..ea4848931540 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild @@ -15,8 +15,7 @@ nvkm-y += nvkm/subdev/mmu/gp100.o nvkm-y += nvkm/subdev/mmu/gp10b.o nvkm-y += nvkm/subdev/mmu/gv100.o nvkm-y += nvkm/subdev/mmu/tu102.o - -nvkm-y += nvkm/subdev/mmu/r535.o +nvkm-y += nvkm/subdev/mmu/gh100.o nvkm-y += nvkm/subdev/mmu/mem.o nvkm-y += nvkm/subdev/mmu/memnv04.o @@ -38,6 +37,7 @@ nvkm-y += nvkm/subdev/mmu/vmmgp100.o nvkm-y += nvkm/subdev/mmu/vmmgp10b.o nvkm-y += nvkm/subdev/mmu/vmmgv100.o nvkm-y += nvkm/subdev/mmu/vmmtu102.o +nvkm-y += nvkm/subdev/mmu/vmmgh100.o nvkm-y += nvkm/subdev/mmu/umem.o nvkm-y += nvkm/subdev/mmu/ummu.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c new file mode 100644 index 000000000000..2918fb32cc91 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "mem.h" +#include "vmm.h" + +#include <nvif/class.h> + +static const struct nvkm_mmu_func +gh100_mmu = { + .dma_bits = 52, + .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, + .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, + .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gh100_vmm_new }, + .kind = tu102_mmu_kind, + .kind_sys = true, +}; + +int +gh100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) +{ + return r535_mmu_new(&gh100_mmu, device, type, inst, pmmu); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c index d9c9bee45222..160a5749a29f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c @@ -60,7 +60,7 @@ gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, if (ret) return ret; - *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr; + *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr; *psize = (*pvma)->size; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c index 79a3b0cc9f5b..1e3db52de6cb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c @@ -41,7 +41,7 @@ nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, if ((ret = nvif_unvers(ret, &argv, &argc, args->vn))) return ret; - *paddr = device->func->resource_addr(device, 1) + addr; + *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + addr; *psize = nvkm_memory_size(memory); *pvma = ERR_PTR(-ENODEV); return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c index 46759b89fc1f..33b2321e9d87 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c @@ -57,7 +57,7 @@ nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, if (ret) return ret; - *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr; + *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr; *psize = (*pvma)->size; return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm)); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h index e9ca6537778c..90efef8f0b54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h @@ -53,6 +53,8 @@ const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid); const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *); +const u8 *tu102_mmu_kind(struct nvkm_mmu *, int *, u8 *); + struct nvkm_mmu_pt { union { struct nvkm_mmu_ptc *ptc; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c index df662ce4a4b0..7acff3642e20 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c @@ -28,7 +28,7 @@ #include <nvif/class.h> -static const u8 * +const u8 * tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) { static const u8 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index 9c97800fe037..f95c58b67633 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -19,7 +19,7 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#define NVKM_VMM_LEVELS_MAX 5 +#define NVKM_VMM_LEVELS_MAX 6 #include "vmm.h" #include <subdev/fb.h> @@ -1030,12 +1030,8 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm) struct nvkm_vma *vma; struct rb_node *node; - if (vmm->rm.client.gsp) { - nvkm_gsp_rm_free(&vmm->rm.object); - nvkm_gsp_device_dtor(&vmm->rm.device); - nvkm_gsp_client_dtor(&vmm->rm.client); - nvkm_vmm_put(vmm, &vmm->rm.rsvd); - } + if (vmm->rm.client.gsp) + r535_mmu_vaspace_del(vmm); if (0) nvkm_vmm_dump(vmm); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h index f9bc30cdb2b3..4586a425dbe4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h @@ -143,6 +143,8 @@ struct nvkm_vmm_func { int (*aper)(enum nvkm_memory_target); int (*valid)(struct nvkm_vmm *, void *argv, u32 argc, struct nvkm_vmm_map *); + int (*valid2)(struct nvkm_vmm *, bool ro, bool priv, u8 kind, u8 comp, + struct nvkm_vmm_map *); void (*flush)(struct nvkm_vmm *, int depth); int (*mthd)(struct nvkm_vmm *, struct nvkm_client *, @@ -254,6 +256,8 @@ void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr); int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *); +void tu102_vmm_flush(struct nvkm_vmm *, int depth); + int nv04_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, struct lock_class_key *, const char *, struct nvkm_vmm **); int nv41_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, @@ -296,6 +300,9 @@ int gv100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, struct lock_class_key *, const char *, struct nvkm_vmm **); +int gh100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, + struct lock_class_key *, const char *, + struct nvkm_vmm **); #define VMM_PRINT(l,v,p,f,a...) do { \ struct nvkm_vmm *_vmm = (v); \ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c new file mode 100644 index 000000000000..5614df3432da --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "vmm.h" + +#include <subdev/fb.h> + +#include <nvhw/drf.h> +#include <nvhw/ref/gh100/dev_mmu.h> + +static inline void +gh100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes, + struct nvkm_vmm_map *map, u64 addr) +{ + u64 data = addr | map->type; + + while (ptes--) { + VMM_WO064(pt, vmm, ptei++ * NV_MMU_VER3_PTE__SIZE, data); + data += map->next; + } +} + +static void +gh100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes, + struct nvkm_vmm_map *map) +{ + VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte); +} + +static void +gh100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes, + struct nvkm_vmm_map *map) +{ + if (map->page->shift == PAGE_SHIFT) { + VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); + + nvkm_kmap(pt->memory); + while (ptes--) { + const u64 data = *map->dma++ | map->type; + + VMM_WO064(pt, vmm, ptei++ * NV_MMU_VER3_PTE__SIZE, data); + } + nvkm_done(pt->memory); + return; + } + + VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte); +} + +static void +gh100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes, + struct nvkm_vmm_map *map) +{ + VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte); +} + +static void +gh100_vmm_pgt_sparse(struct nvkm_vmm *vmm, + struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) +{ + const u64 data = NVDEF(NV_MMU, VER3_PTE, PCF, SPARSE); + + VMM_FO064(pt, vmm, ptei * NV_MMU_VER3_PTE__SIZE, data, ptes); +} + +static const struct nvkm_vmm_desc_func +gh100_vmm_desc_spt = { + .unmap = gf100_vmm_pgt_unmap, + .sparse = gh100_vmm_pgt_sparse, + .mem = gh100_vmm_pgt_mem, + .dma = gh100_vmm_pgt_dma, + .sgl = gh100_vmm_pgt_sgl, +}; + +static void +gh100_vmm_lpt_invalid(struct nvkm_vmm *vmm, + struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) +{ + const u64 data = NVDEF(NV_MMU, VER3_PTE, PCF, NO_VALID_4KB_PAGE); + + VMM_FO064(pt, vmm, ptei * NV_MMU_VER3_PTE__SIZE, data, ptes); +} + +static const struct nvkm_vmm_desc_func +gh100_vmm_desc_lpt = { + .invalid = gh100_vmm_lpt_invalid, + .unmap = gf100_vmm_pgt_unmap, + .sparse = gh100_vmm_pgt_sparse, + .mem = gh100_vmm_pgt_mem, +}; + +static inline void +gh100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, + u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) +{ + u64 data = addr | map->type; + + while (ptes--) { + VMM_WO128(pt, vmm, ptei++ * NV_MMU_VER3_DUAL_PDE__SIZE, data, 0ULL); + data += map->next; + } +} + +static void +gh100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, + u32 ptei, u32 ptes, struct nvkm_vmm_map *map) +{ + VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pd0_pte); +} + +static inline bool +gh100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data) +{ + switch (nvkm_memory_target(pt->memory)) { + case NVKM_MEM_TARGET_VRAM: + *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, VIDEO_MEMORY); + *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_CACHED_ATS_NOT_ALLOWED); + break; + case NVKM_MEM_TARGET_HOST: + *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, SYSTEM_COHERENT_MEMORY); + *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_UNCACHED_ATS_ALLOWED); + break; + case NVKM_MEM_TARGET_NCOH: + *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, SYSTEM_NON_COHERENT_MEMORY); + *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_CACHED_ATS_ALLOWED); + break; + default: + WARN_ON(1); + return false; + } + + *data |= pt->addr; + return true; +} + +static void +gh100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei) +{ + struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; + struct nvkm_mmu_pt *pd = pgd->pt[0]; + u64 data[2] = {}; + + if (pgt->pt[0] && !gh100_vmm_pde(pgt->pt[0], &data[0])) + return; + if (pgt->pt[1] && !gh100_vmm_pde(pgt->pt[1], &data[1])) + return; + + nvkm_kmap(pd->memory); + VMM_WO128(pd, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, data[0], data[1]); + nvkm_done(pd->memory); +} + +static void +gh100_vmm_pd0_sparse(struct nvkm_vmm *vmm, + struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes) +{ + const u64 data = NVDEF(NV_MMU, VER3_DUAL_PDE, PCF_BIG, SPARSE_ATS_ALLOWED); + + VMM_FO128(pt, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, data, 0ULL, pdes); +} + +static void +gh100_vmm_pd0_unmap(struct nvkm_vmm *vmm, + struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes) +{ + VMM_FO128(pt, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, 0ULL, 0ULL, pdes); +} + +static const struct nvkm_vmm_desc_func +gh100_vmm_desc_pd0 = { + .unmap = gh100_vmm_pd0_unmap, + .sparse = gh100_vmm_pd0_sparse, + .pde = gh100_vmm_pd0_pde, + .mem = gh100_vmm_pd0_mem, +}; + +static void +gh100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei) +{ + struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; + struct nvkm_mmu_pt *pd = pgd->pt[0]; + u64 data = 0; + + if (!gh100_vmm_pde(pgt->pt[0], &data)) + return; + + nvkm_kmap(pd->memory); + VMM_WO064(pd, vmm, pdei * NV_MMU_VER3_PDE__SIZE, data); + nvkm_done(pd->memory); +} + +static const struct nvkm_vmm_desc_func +gh100_vmm_desc_pd1 = { + .unmap = gf100_vmm_pgt_unmap, + .sparse = gh100_vmm_pgt_sparse, + .pde = gh100_vmm_pd1_pde, +}; + +static const struct nvkm_vmm_desc +gh100_vmm_desc_16[] = { + { LPT, 5, 8, 0x0100, &gh100_vmm_desc_lpt }, + { PGD, 8, 16, 0x1000, &gh100_vmm_desc_pd0 }, + { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 }, + { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 }, + { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 }, + { PGD, 1, 8, 0x1000, &gh100_vmm_desc_pd1 }, + {} +}; + +static const struct nvkm_vmm_desc +gh100_vmm_desc_12[] = { + { SPT, 9, 8, 0x1000, &gh100_vmm_desc_spt }, + { PGD, 8, 16, 0x1000, &gh100_vmm_desc_pd0 }, + { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 }, + { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 }, + { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 }, + { PGD, 1, 8, 0x1000, &gh100_vmm_desc_pd1 }, + {} +}; + +static int +gh100_vmm_valid(struct nvkm_vmm *vmm, bool ro, bool priv, u8 kind, u8 comp, + struct nvkm_vmm_map *map) +{ + const enum nvkm_memory_target target = nvkm_memory_target(map->memory); + const bool vol = target == NVKM_MEM_TARGET_HOST; + const struct nvkm_vmm_page *page = map->page; + u8 kind_inv, pcf; + int kindn, aper; + const u8 *kindm; + + map->next = 1ULL << page->shift; + map->type = 0; + + aper = vmm->func->aper(target); + if (WARN_ON(aper < 0)) + return aper; + + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); + if (kind >= kindn || kindm[kind] == kind_inv) { + VMM_DEBUG(vmm, "kind %02x", kind); + return -EINVAL; + } + + if (priv) { + if (ro) { + if (vol) + pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACD; + else + pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACD; + } else { + if (vol) + pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD; + else + pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD; + } + } else { + if (ro) { + if (vol) + pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD; + else + pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD; + } else { + if (vol) + pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD; + else + pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD; + } + } + + map->type |= NVDEF(NV_MMU, VER3_PTE, VALID, TRUE); + map->type |= NVVAL(NV_MMU, VER3_PTE, APERTURE, aper); + map->type |= NVVAL(NV_MMU, VER3_PTE, PCF, pcf); + map->type |= NVVAL(NV_MMU, VER3_PTE, KIND, kind); + return 0; +} + +static const struct nvkm_vmm_func +gh100_vmm = { + .join = gv100_vmm_join, + .part = gf100_vmm_part, + .aper = gf100_vmm_aper, + .valid = gp100_vmm_valid, + .valid2 = gh100_vmm_valid, + .flush = tu102_vmm_flush, + .page = { + { 56, &gh100_vmm_desc_16[5], NVKM_VMM_PAGE_Sxxx }, + { 47, &gh100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx }, + { 38, &gh100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx }, + { 29, &gh100_vmm_desc_16[2], NVKM_VMM_PAGE_SVxC }, + { 21, &gh100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC }, + { 16, &gh100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC }, + { 12, &gh100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx }, + {} + } +}; + +int +gh100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, + void *argv, u32 argc, struct lock_class_key *key, + const char *name, struct nvkm_vmm **pvmm) +{ + return gp100_vmm_new_(&gh100_vmm, mmu, managed, addr, size, + argv, argc, key, name, pvmm); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c index bddac77f48f0..851fd847a2a9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c @@ -436,6 +436,9 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, return ret; } + if (vmm->func->valid2) + return vmm->func->valid2(vmm, ro, priv, kind, 0, map); + aper = vmm->func->aper(target); if (WARN_ON(aper < 0)) return aper; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c index 8379e72d77ab..4b30eab40bba 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c @@ -23,7 +23,7 @@ #include <subdev/timer.h> -static void +void tu102_vmm_flush(struct nvkm_vmm *vmm, int depth) { struct nvkm_device *device = vmm->mmu->subdev.device; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild index 174bdf995271..a14ea0f7b1c8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild @@ -13,3 +13,4 @@ nvkm-y += nvkm/subdev/pci/gf100.o nvkm-y += nvkm/subdev/pci/gf106.o nvkm-y += nvkm/subdev/pci/gk104.o nvkm-y += nvkm/subdev/pci/gp100.o +nvkm-y += nvkm/subdev/pci/gh100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c index 5a0de45d36ce..6867934256a7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c @@ -39,26 +39,26 @@ nvkm_pci_msi_rearm(struct nvkm_device *device) u32 nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr) { - return pci->func->rd32(pci, addr); + return nvkm_rd32(pci->subdev.device, pci->func->cfg.addr + addr); } void nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data) { - pci->func->wr08(pci, addr, data); + nvkm_wr08(pci->subdev.device, pci->func->cfg.addr + addr, data); } void nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data) { - pci->func->wr32(pci, addr, data); + nvkm_wr32(pci->subdev.device, pci->func->cfg.addr + addr, data); } u32 nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value) { - u32 data = pci->func->rd32(pci, addr); - pci->func->wr32(pci, addr, (data & ~mask) | value); + u32 data = nvkm_pci_rd32(pci, addr); + nvkm_pci_wr32(pci, addr, (data & ~mask) | value); return data; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c index 5b29aacedef3..5308f6539a3f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c @@ -132,10 +132,9 @@ g84_pcie_init(struct nvkm_pci *pci) static const struct nvkm_pci_func g84_pci_func = { + .cfg = { .addr = 0x088000, .size = 0x1000 }, + .init = g84_pci_init, - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, .msi_rearm = nv46_pci_msi_rearm, .pcie.init = g84_pcie_init, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c index a9e0674009c6..8ae7aa02e675 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c @@ -33,10 +33,9 @@ g92_pcie_version_supported(struct nvkm_pci *pci) static const struct nvkm_pci_func g92_pci_func = { + .cfg = { .addr = 0x088000, .size = 0x1000 }, + .init = g84_pci_init, - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, .msi_rearm = nv46_pci_msi_rearm, .pcie.init = g84_pcie_init, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c index 7bacd0693283..df745d0690ca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c @@ -25,10 +25,9 @@ static const struct nvkm_pci_func g94_pci_func = { + .cfg = { .addr = 0x088000, .size = 0x1000 }, + .init = g84_pci_init, - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, .msi_rearm = nv40_pci_msi_rearm, .pcie.init = g84_pcie_init, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c index 099906092fe1..6ce941df87b7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c @@ -78,10 +78,9 @@ gf100_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width) static const struct nvkm_pci_func gf100_pci_func = { + .cfg = { .addr = 0x088000, .size = 0x1000 }, + .init = g84_pci_init, - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, .msi_rearm = gf100_pci_msi_rearm, .pcie.init = gf100_pcie_init, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c index bcde609ba866..712ca7e0959a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c @@ -25,10 +25,9 @@ static const struct nvkm_pci_func gf106_pci_func = { + .cfg = { .addr = 0x088000, .size = 0x1000 }, + .init = g84_pci_init, - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, .msi_rearm = nv40_pci_msi_rearm, .pcie.init = gf100_pcie_init, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c new file mode 100644 index 000000000000..42da92d7a5fe --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include <nvhw/drf.h> +#include <nvhw/ref/gh100/dev_xtl_ep_pri.h> + +static void +gh100_pci_msi_rearm(struct nvkm_pci *pci) +{ + /* Handled by top-level intr ACK. */ +} + +static const struct nvkm_pci_func +gh100_pci = { + .cfg = { + .addr = DRF_LO(NV_EP_PCFGM), + .size = DRF_HI(NV_EP_PCFGM) - DRF_LO(NV_EP_PCFGM) + 1, + }, + .msi_rearm = gh100_pci_msi_rearm, +}; + +int +gh100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pci **ppci) +{ + return nvkm_pci_new_(&gh100_pci, device, type, inst, ppci); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c index 6be87ecffc89..ec6d0a7de995 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c @@ -204,10 +204,9 @@ gk104_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width) static const struct nvkm_pci_func gk104_pci_func = { + .cfg = { .addr = 0x088000, .size = 0x1000 }, + .init = g84_pci_init, - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, .msi_rearm = nv40_pci_msi_rearm, .pcie.init = gk104_pcie_init, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c index a5fafda0014d..4204316a544f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c @@ -31,9 +31,7 @@ gp100_pci_msi_rearm(struct nvkm_pci *pci) static const struct nvkm_pci_func gp100_pci_func = { - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, + .cfg = { .addr = 0x088000, .size = 0x1000 }, .msi_rearm = gp100_pci_msi_rearm, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c index 9ab64194b185..b8a3f6850fa7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c @@ -23,32 +23,9 @@ */ #include "priv.h" -static u32 -nv04_pci_rd32(struct nvkm_pci *pci, u16 addr) -{ - struct nvkm_device *device = pci->subdev.device; - return nvkm_rd32(device, 0x001800 + addr); -} - -static void -nv04_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data) -{ - struct nvkm_device *device = pci->subdev.device; - nvkm_wr08(device, 0x001800 + addr, data); -} - -static void -nv04_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data) -{ - struct nvkm_device *device = pci->subdev.device; - nvkm_wr32(device, 0x001800 + addr, data); -} - static const struct nvkm_pci_func nv04_pci_func = { - .rd32 = nv04_pci_rd32, - .wr08 = nv04_pci_wr08, - .wr32 = nv04_pci_wr32, + .cfg = { .addr = 0x001800, .size = 0x1000 }, }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c index 6a3c31cf0200..1971dbbdeb2b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c @@ -23,27 +23,6 @@ */ #include "priv.h" -u32 -nv40_pci_rd32(struct nvkm_pci *pci, u16 addr) -{ - struct nvkm_device *device = pci->subdev.device; - return nvkm_rd32(device, 0x088000 + addr); -} - -void -nv40_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data) -{ - struct nvkm_device *device = pci->subdev.device; - nvkm_wr08(device, 0x088000 + addr, data); -} - -void -nv40_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data) -{ - struct nvkm_device *device = pci->subdev.device; - nvkm_wr32(device, 0x088000 + addr, data); -} - void nv40_pci_msi_rearm(struct nvkm_pci *pci) { @@ -52,9 +31,7 @@ nv40_pci_msi_rearm(struct nvkm_pci *pci) static const struct nvkm_pci_func nv40_pci_func = { - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, + .cfg = { .addr = 0x088000, .size = 0x1000 }, .msi_rearm = nv40_pci_msi_rearm, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c index 9cad17f178ec..0093eabac9ae 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c @@ -38,9 +38,7 @@ nv46_pci_msi_rearm(struct nvkm_pci *pci) static const struct nvkm_pci_func nv46_pci_func = { - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, + .cfg = { .addr = 0x088000, .size = 0x1000 }, .msi_rearm = nv46_pci_msi_rearm, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c index 741e34bf307c..b445081bb80e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c @@ -25,9 +25,7 @@ static const struct nvkm_pci_func nv4c_pci_func = { - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, + .cfg = { .addr = 0x088000, .size = 0x1000 }, }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h index 9b7583532962..988eeee1471c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h @@ -8,10 +8,12 @@ int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *, enum nvkm_ struct nvkm_pci **); struct nvkm_pci_func { + struct { + u32 addr; + u16 size; + } cfg; + void (*init)(struct nvkm_pci *); - u32 (*rd32)(struct nvkm_pci *, u16 addr); - void (*wr08)(struct nvkm_pci *, u16 addr, u8 data); - void (*wr32)(struct nvkm_pci *, u16 addr, u32 data); void (*msi_rearm)(struct nvkm_pci *); struct { @@ -27,9 +29,6 @@ struct nvkm_pci_func { } pcie; }; -u32 nv40_pci_rd32(struct nvkm_pci *, u16); -void nv40_pci_wr08(struct nvkm_pci *, u16, u8); -void nv40_pci_wr32(struct nvkm_pci *, u16, u32); void nv40_pci_msi_rearm(struct nvkm_pci *); void nv46_pci_msi_rearm(struct nvkm_pci *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c index dce337306cab..d294844d9eae 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include <rm/gpu.h> + static void r535_vfn_dtor(struct nvkm_vfn *vfn) { @@ -32,6 +34,7 @@ r535_vfn_new(const struct nvkm_vfn_func *hw, struct nvkm_device *device, enum nvkm_subdev_type type, int inst, u32 addr, struct nvkm_vfn **pvfn) { + const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu; struct nvkm_vfn_func *rm; int ret; @@ -39,8 +42,12 @@ r535_vfn_new(const struct nvkm_vfn_func *hw, return -ENOMEM; rm->dtor = r535_vfn_dtor; - rm->intr = hw->intr; - rm->user = hw->user; + rm->intr = &tu102_vfn_intr; + rm->user.addr = 0x030000; + rm->user.size = 0x010000; + rm->user.base.minver = -1; + rm->user.base.maxver = -1; + rm->user.base.oclass = gpu->usermode.class; ret = nvkm_vfn_new_(rm, device, type, inst, addr, pvfn); if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c index c5460a14c541..4e64d8843373 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c @@ -36,7 +36,7 @@ nvkm_uvfn_map(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_vfn *vfn = nvkm_uvfn(object)->vfn; struct nvkm_device *device = vfn->subdev.device; - *addr = device->func->resource_addr(device, 0) + vfn->addr.user; + *addr = device->func->resource_addr(device, NVKM_BAR0_PRI) + vfn->addr.user; *size = vfn->func->user.size; *type = NVKM_OBJECT_MAP_IO; return 0; |