diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/core')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/Kbuild | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/client.c | 130 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/engine.c | 85 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/event.c | 158 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/firmware.c | 127 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/intr.c | 442 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/ioctl.c | 115 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/notify.c | 163 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/oproxy.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/subdev.c | 117 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/uevent.c | 157 |
11 files changed, 1045 insertions, 472 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild index 2b471ab585b4..e40712023c73 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild @@ -5,12 +5,13 @@ nvkm-y += nvkm/core/enum.o nvkm-y += nvkm/core/event.o nvkm-y += nvkm/core/firmware.o nvkm-y += nvkm/core/gpuobj.o +nvkm-y += nvkm/core/intr.o nvkm-y += nvkm/core/ioctl.o nvkm-y += nvkm/core/memory.o nvkm-y += nvkm/core/mm.o -nvkm-y += nvkm/core/notify.o nvkm-y += nvkm/core/object.o nvkm-y += nvkm/core/oproxy.o nvkm-y += nvkm/core/option.o nvkm-y += nvkm/core/ramht.o nvkm-y += nvkm/core/subdev.o +nvkm-y += nvkm/core/uevent.o diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c index 0c8c55c73b12..ebdeb8eb9e77 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/client.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c @@ -23,7 +23,6 @@ */ #include <core/client.h> #include <core/device.h> -#include <core/notify.h> #include <core/option.h> #include <nvif/class.h> @@ -44,7 +43,7 @@ nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))){ args->v0.name[sizeof(args->v0.name) - 1] = 0; ret = nvkm_client_new(args->v0.name, args->v0.device, NULL, - NULL, oclass->client->ntfy, &client); + NULL, oclass->client->event, &client); if (ret) return ret; } else @@ -68,113 +67,6 @@ nvkm_uclient_sclass = { .ctor = nvkm_uclient_new, }; -struct nvkm_client_notify { - struct nvkm_client *client; - struct nvkm_notify n; - u8 version; - u8 size; - union { - struct nvif_notify_rep_v0 v0; - } rep; -}; - -static int -nvkm_client_notify(struct nvkm_notify *n) -{ - struct nvkm_client_notify *notify = container_of(n, typeof(*notify), n); - struct nvkm_client *client = notify->client; - return client->ntfy(¬ify->rep, notify->size, n->data, n->size); -} - -int -nvkm_client_notify_put(struct nvkm_client *client, int index) -{ - if (index < ARRAY_SIZE(client->notify)) { - if (client->notify[index]) { - nvkm_notify_put(&client->notify[index]->n); - return 0; - } - } - return -ENOENT; -} - -int -nvkm_client_notify_get(struct nvkm_client *client, int index) -{ - if (index < ARRAY_SIZE(client->notify)) { - if (client->notify[index]) { - nvkm_notify_get(&client->notify[index]->n); - return 0; - } - } - return -ENOENT; -} - -int -nvkm_client_notify_del(struct nvkm_client *client, int index) -{ - if (index < ARRAY_SIZE(client->notify)) { - if (client->notify[index]) { - nvkm_notify_fini(&client->notify[index]->n); - kfree(client->notify[index]); - client->notify[index] = NULL; - return 0; - } - } - return -ENOENT; -} - -int -nvkm_client_notify_new(struct nvkm_object *object, - struct nvkm_event *event, void *data, u32 size) -{ - struct nvkm_client *client = object->client; - struct nvkm_client_notify *notify; - union { - struct nvif_notify_req_v0 v0; - } *req = data; - u8 index, reply; - int ret = -ENOSYS; - - for (index = 0; index < ARRAY_SIZE(client->notify); index++) { - if (!client->notify[index]) - break; - } - - if (index == ARRAY_SIZE(client->notify)) - return -ENOSPC; - - notify = kzalloc(sizeof(*notify), GFP_KERNEL); - if (!notify) - return -ENOMEM; - - nvif_ioctl(object, "notify new size %d\n", size); - if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, true))) { - nvif_ioctl(object, "notify new vers %d reply %d route %02x " - "token %llx\n", req->v0.version, - req->v0.reply, req->v0.route, req->v0.token); - notify->version = req->v0.version; - notify->size = sizeof(notify->rep.v0); - notify->rep.v0.version = req->v0.version; - notify->rep.v0.route = req->v0.route; - notify->rep.v0.token = req->v0.token; - reply = req->v0.reply; - } - - if (ret == 0) { - ret = nvkm_notify_init(object, event, nvkm_client_notify, - false, data, size, reply, ¬ify->n); - if (ret == 0) { - client->notify[index] = notify; - notify->client = client; - return index; - } - } - - kfree(notify); - return ret; -} - static const struct nvkm_object_func nvkm_client; struct nvkm_client * nvkm_client_search(struct nvkm_client *client, u64 handle) @@ -255,23 +147,13 @@ nvkm_client_child_get(struct nvkm_object *object, int index, static int nvkm_client_fini(struct nvkm_object *object, bool suspend) { - struct nvkm_client *client = nvkm_client(object); - const char *name[2] = { "fini", "suspend" }; - int i; - nvif_debug(object, "%s notify\n", name[suspend]); - for (i = 0; i < ARRAY_SIZE(client->notify); i++) - nvkm_client_notify_put(client, i); return 0; } static void * nvkm_client_dtor(struct nvkm_object *object) { - struct nvkm_client *client = nvkm_client(object); - int i; - for (i = 0; i < ARRAY_SIZE(client->notify); i++) - nvkm_client_notify_del(client, i); - return client; + return nvkm_client(object); } static const struct nvkm_object_func @@ -283,10 +165,8 @@ nvkm_client = { }; int -nvkm_client_new(const char *name, u64 device, const char *cfg, - const char *dbg, - int (*ntfy)(const void *, u32, const void *, u32), - struct nvkm_client **pclient) +nvkm_client_new(const char *name, u64 device, const char *cfg, const char *dbg, + int (*event)(u64, void *, u32), struct nvkm_client **pclient) { struct nvkm_oclass oclass = { .base = nvkm_uclient_sclass }; struct nvkm_client *client; @@ -300,7 +180,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg, client->device = device; client->debug = nvkm_dbgopt(dbg, "CLIENT"); client->objroot = RB_ROOT; - client->ntfy = ntfy; + client->event = event; INIT_LIST_HEAD(&client->umem); spin_lock_init(&client->lock); return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c index e41a39ae1597..36a31e9eea22 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c @@ -35,16 +35,23 @@ nvkm_engine_chsw_load(struct nvkm_engine *engine) return false; } +int +nvkm_engine_reset(struct nvkm_engine *engine) +{ + if (engine->func->reset) + return engine->func->reset(engine); + + nvkm_subdev_fini(&engine->subdev, false); + return nvkm_subdev_init(&engine->subdev); +} + void nvkm_engine_unref(struct nvkm_engine **pengine) { struct nvkm_engine *engine = *pengine; + if (engine) { - if (refcount_dec_and_mutex_lock(&engine->use.refcount, &engine->use.mutex)) { - nvkm_subdev_fini(&engine->subdev, false); - engine->use.enabled = false; - mutex_unlock(&engine->use.mutex); - } + nvkm_subdev_unref(&engine->subdev); *pengine = NULL; } } @@ -53,21 +60,13 @@ struct nvkm_engine * nvkm_engine_ref(struct nvkm_engine *engine) { int ret; + if (engine) { - if (!refcount_inc_not_zero(&engine->use.refcount)) { - mutex_lock(&engine->use.mutex); - if (!refcount_inc_not_zero(&engine->use.refcount)) { - engine->use.enabled = true; - if ((ret = nvkm_subdev_init(&engine->subdev))) { - engine->use.enabled = false; - mutex_unlock(&engine->use.mutex); - return ERR_PTR(ret); - } - refcount_set(&engine->use.refcount, 1); - } - mutex_unlock(&engine->use.mutex); - } + ret = nvkm_subdev_ref(&engine->subdev); + if (ret) + return ERR_PTR(ret); } + return engine; } @@ -91,14 +90,10 @@ static int nvkm_engine_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data) { struct nvkm_engine *engine = nvkm_engine(subdev); - if (engine->func->info) { - if (!IS_ERR((engine = nvkm_engine_ref(engine)))) { - int ret = engine->func->info(engine, mthd, data); - nvkm_engine_unref(&engine); - return ret; - } - return PTR_ERR(engine); - } + + if (engine->func->info) + return engine->func->info(engine, mthd, data); + return -ENOSYS; } @@ -117,26 +112,6 @@ nvkm_engine_init(struct nvkm_subdev *subdev) struct nvkm_engine *engine = nvkm_engine(subdev); struct nvkm_fb *fb = subdev->device->fb; int ret = 0, i; - s64 time; - - if (!engine->use.enabled) { - nvkm_trace(subdev, "init skipped, engine has no users\n"); - return ret; - } - - if (engine->func->oneinit && !engine->subdev.oneinit) { - nvkm_trace(subdev, "one-time init running...\n"); - time = ktime_to_us(ktime_get()); - ret = engine->func->oneinit(engine); - if (ret) { - nvkm_trace(subdev, "one-time init failed, %d\n", ret); - return ret; - } - - engine->subdev.oneinit = true; - time = ktime_to_us(ktime_get()) - time; - nvkm_trace(subdev, "one-time init completed in %lldus\n", time); - } if (engine->func->init) ret = engine->func->init(engine); @@ -147,6 +122,17 @@ nvkm_engine_init(struct nvkm_subdev *subdev) } static int +nvkm_engine_oneinit(struct nvkm_subdev *subdev) +{ + struct nvkm_engine *engine = nvkm_engine(subdev); + + if (engine->func->oneinit) + return engine->func->oneinit(engine); + + return 0; +} + +static int nvkm_engine_preinit(struct nvkm_subdev *subdev) { struct nvkm_engine *engine = nvkm_engine(subdev); @@ -161,7 +147,6 @@ nvkm_engine_dtor(struct nvkm_subdev *subdev) struct nvkm_engine *engine = nvkm_engine(subdev); if (engine->func->dtor) return engine->func->dtor(engine); - mutex_destroy(&engine->use.mutex); return engine; } @@ -169,6 +154,7 @@ const struct nvkm_subdev_func nvkm_engine = { .dtor = nvkm_engine_dtor, .preinit = nvkm_engine_preinit, + .oneinit = nvkm_engine_oneinit, .init = nvkm_engine_init, .fini = nvkm_engine_fini, .info = nvkm_engine_info, @@ -179,10 +165,9 @@ int nvkm_engine_ctor(const struct nvkm_engine_func *func, struct nvkm_device *device, enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_engine *engine) { - nvkm_subdev_ctor(&nvkm_engine, device, type, inst, &engine->subdev); engine->func = func; - refcount_set(&engine->use.refcount, 0); - mutex_init(&engine->use.mutex); + nvkm_subdev_ctor(&nvkm_engine, device, type, inst, &engine->subdev); + refcount_set(&engine->subdev.use.refcount, 0); if (!nvkm_boolopt(device->cfgopt, engine->subdev.name, enable)) { nvkm_debug(&engine->subdev, "disabled\n"); diff --git a/drivers/gpu/drm/nouveau/nvkm/core/event.c b/drivers/gpu/drm/nouveau/nvkm/core/event.c index 006618d77aa4..a6c877135598 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/event.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/event.c @@ -20,54 +20,171 @@ * OTHER DEALINGS IN THE SOFTWARE. */ #include <core/event.h> -#include <core/notify.h> +#include <core/subdev.h> -void +static void nvkm_event_put(struct nvkm_event *event, u32 types, int index) { assert_spin_locked(&event->refs_lock); + + nvkm_trace(event->subdev, "event: decr %08x on %d\n", types, index); + while (types) { int type = __ffs(types); types &= ~(1 << type); if (--event->refs[index * event->types_nr + type] == 0) { + nvkm_trace(event->subdev, "event: blocking %d on %d\n", type, index); if (event->func->fini) event->func->fini(event, 1 << type, index); } } } -void +static void nvkm_event_get(struct nvkm_event *event, u32 types, int index) { assert_spin_locked(&event->refs_lock); + + nvkm_trace(event->subdev, "event: incr %08x on %d\n", types, index); + while (types) { int type = __ffs(types); types &= ~(1 << type); if (++event->refs[index * event->types_nr + type] == 1) { + nvkm_trace(event->subdev, "event: allowing %d on %d\n", type, index); if (event->func->init) event->func->init(event, 1 << type, index); } } } +static void +nvkm_event_ntfy_state(struct nvkm_event_ntfy *ntfy) +{ + struct nvkm_event *event = ntfy->event; + unsigned long flags; + + nvkm_trace(event->subdev, "event: ntfy state changed\n"); + spin_lock_irqsave(&event->refs_lock, flags); + + if (atomic_read(&ntfy->allowed) != ntfy->running) { + if (ntfy->running) { + nvkm_event_put(ntfy->event, ntfy->bits, ntfy->id); + ntfy->running = false; + } else { + nvkm_event_get(ntfy->event, ntfy->bits, ntfy->id); + ntfy->running = true; + } + } + + spin_unlock_irqrestore(&event->refs_lock, flags); +} + +static void +nvkm_event_ntfy_remove(struct nvkm_event_ntfy *ntfy) +{ + spin_lock_irq(&ntfy->event->list_lock); + list_del_init(&ntfy->head); + spin_unlock_irq(&ntfy->event->list_lock); +} + +static void +nvkm_event_ntfy_insert(struct nvkm_event_ntfy *ntfy) +{ + spin_lock_irq(&ntfy->event->list_lock); + list_add_tail(&ntfy->head, &ntfy->event->ntfy); + spin_unlock_irq(&ntfy->event->list_lock); +} + +static void +nvkm_event_ntfy_block_(struct nvkm_event_ntfy *ntfy, bool wait) +{ + struct nvkm_subdev *subdev = ntfy->event->subdev; + + nvkm_trace(subdev, "event: ntfy block %08x on %d wait:%d\n", ntfy->bits, ntfy->id, wait); + + if (atomic_xchg(&ntfy->allowed, 0) == 1) { + nvkm_event_ntfy_state(ntfy); + if (wait) + nvkm_event_ntfy_remove(ntfy); + } +} + void -nvkm_event_send(struct nvkm_event *event, u32 types, int index, - void *data, u32 size) +nvkm_event_ntfy_block(struct nvkm_event_ntfy *ntfy) { - struct nvkm_notify *notify; + if (ntfy->event) + nvkm_event_ntfy_block_(ntfy, ntfy->wait); +} + +void +nvkm_event_ntfy_allow(struct nvkm_event_ntfy *ntfy) +{ + nvkm_trace(ntfy->event->subdev, "event: ntfy allow %08x on %d\n", ntfy->bits, ntfy->id); + + if (atomic_xchg(&ntfy->allowed, 1) == 0) { + nvkm_event_ntfy_state(ntfy); + if (ntfy->wait) + nvkm_event_ntfy_insert(ntfy); + } +} + +void +nvkm_event_ntfy_del(struct nvkm_event_ntfy *ntfy) +{ + struct nvkm_event *event = ntfy->event; + + if (!event) + return; + + nvkm_trace(event->subdev, "event: ntfy del %08x on %d\n", ntfy->bits, ntfy->id); + + nvkm_event_ntfy_block_(ntfy, false); + nvkm_event_ntfy_remove(ntfy); + ntfy->event = NULL; +} + +void +nvkm_event_ntfy_add(struct nvkm_event *event, int id, u32 bits, bool wait, nvkm_event_func func, + struct nvkm_event_ntfy *ntfy) +{ + nvkm_trace(event->subdev, "event: ntfy add %08x on %d wait:%d\n", id, bits, wait); + + ntfy->event = event; + ntfy->id = id; + ntfy->bits = bits; + ntfy->wait = wait; + ntfy->func = func; + atomic_set(&ntfy->allowed, 0); + ntfy->running = false; + INIT_LIST_HEAD(&ntfy->head); + if (!ntfy->wait) + nvkm_event_ntfy_insert(ntfy); +} + +bool +nvkm_event_ntfy_valid(struct nvkm_event *event, int id, u32 bits) +{ + return true; +} + +void +nvkm_event_ntfy(struct nvkm_event *event, int id, u32 bits) +{ + struct nvkm_event_ntfy *ntfy, *ntmp; unsigned long flags; - if (!event->refs || WARN_ON(index >= event->index_nr)) + if (!event->refs || WARN_ON(id >= event->index_nr)) return; + nvkm_trace(event->subdev, "event: ntfy %08x on %d\n", bits, id); spin_lock_irqsave(&event->list_lock, flags); - list_for_each_entry(notify, &event->list, head) { - if (notify->index == index && (notify->types & types)) { - if (event->func->send) { - event->func->send(data, size, notify); - continue; - } - nvkm_notify_send(notify, data, size); + + list_for_each_entry_safe(ntfy, ntmp, &event->ntfy, head) { + if (ntfy->id == id && ntfy->bits & bits) { + if (atomic_read(&ntfy->allowed)) + ntfy->func(ntfy, ntfy->bits & bits); } } + spin_unlock_irqrestore(&event->list_lock, flags); } @@ -81,20 +198,17 @@ nvkm_event_fini(struct nvkm_event *event) } int -nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr, - struct nvkm_event *event) +__nvkm_event_init(const struct nvkm_event_func *func, struct nvkm_subdev *subdev, + int types_nr, int index_nr, struct nvkm_event *event) { - event->refs = kzalloc(array3_size(index_nr, types_nr, - sizeof(*event->refs)), - GFP_KERNEL); + event->refs = kzalloc(array3_size(index_nr, types_nr, sizeof(*event->refs)), GFP_KERNEL); if (!event->refs) return -ENOMEM; event->func = func; + event->subdev = subdev; event->types_nr = types_nr; event->index_nr = index_nr; - spin_lock_init(&event->refs_lock); - spin_lock_init(&event->list_lock); - INIT_LIST_HEAD(&event->list); + INIT_LIST_HEAD(&event->ntfy); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c index ca1f8463cff5..fcf2a002f6cb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c @@ -22,6 +22,9 @@ #include <core/device.h> #include <core/firmware.h> +#include <subdev/fb.h> +#include <subdev/mmu.h> + int nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *base, const char *name, int ver, const struct firmware **pfw) @@ -107,3 +110,127 @@ nvkm_firmware_put(const struct firmware *fw) { release_firmware(fw); } + +#define nvkm_firmware_mem(p) container_of((p), struct nvkm_firmware, mem.memory) + +static int +nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, + struct nvkm_vma *vma, void *argv, u32 argc) +{ + struct nvkm_firmware *fw = nvkm_firmware_mem(memory); + struct nvkm_vmm_map map = { + .memory = &fw->mem.memory, + .offset = offset, + .sgl = &fw->mem.sgl, + }; + + if (WARN_ON(fw->func->type != NVKM_FIRMWARE_IMG_DMA)) + return -ENOSYS; + + return nvkm_vmm_map(vmm, vma, argv, argc, &map); +} + +static u64 +nvkm_firmware_mem_size(struct nvkm_memory *memory) +{ + return sg_dma_len(&nvkm_firmware_mem(memory)->mem.sgl); +} + +static u64 +nvkm_firmware_mem_addr(struct nvkm_memory *memory) +{ + return nvkm_firmware_mem(memory)->phys; +} + +static u8 +nvkm_firmware_mem_page(struct nvkm_memory *memory) +{ + return PAGE_SHIFT; +} + +static enum nvkm_memory_target +nvkm_firmware_mem_target(struct nvkm_memory *memory) +{ + return NVKM_MEM_TARGET_HOST; +} + +static void * +nvkm_firmware_mem_dtor(struct nvkm_memory *memory) +{ + return NULL; +} + +static const struct nvkm_memory_func +nvkm_firmware_mem = { + .dtor = nvkm_firmware_mem_dtor, + .target = nvkm_firmware_mem_target, + .page = nvkm_firmware_mem_page, + .addr = nvkm_firmware_mem_addr, + .size = nvkm_firmware_mem_size, + .map = nvkm_firmware_mem_map, +}; + +void +nvkm_firmware_dtor(struct nvkm_firmware *fw) +{ + struct nvkm_memory *memory = &fw->mem.memory; + + if (!fw->img) + return; + + switch (fw->func->type) { + case NVKM_FIRMWARE_IMG_RAM: + kfree(fw->img); + break; + case NVKM_FIRMWARE_IMG_DMA: + nvkm_memory_unref(&memory); + dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys); + break; + default: + WARN_ON(1); + break; + } + + fw->img = NULL; +} + +int +nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name, + struct nvkm_device *device, const void *src, int len, struct nvkm_firmware *fw) +{ + fw->func = func; + fw->name = name; + fw->device = device; + fw->len = len; + + switch (fw->func->type) { + case NVKM_FIRMWARE_IMG_RAM: + fw->img = kmemdup(src, fw->len, GFP_KERNEL); + break; + case NVKM_FIRMWARE_IMG_DMA: { + dma_addr_t addr; + + len = ALIGN(fw->len, PAGE_SIZE); + + fw->img = dma_alloc_coherent(fw->device->dev, len, &addr, GFP_KERNEL); + if (fw->img) { + memcpy(fw->img, src, fw->len); + fw->phys = addr; + } + + sg_init_one(&fw->mem.sgl, fw->img, len); + sg_dma_address(&fw->mem.sgl) = fw->phys; + sg_dma_len(&fw->mem.sgl) = len; + } + break; + default: + WARN_ON(1); + return -EINVAL; + } + + if (!fw->img) + return -ENOMEM; + + nvkm_memory_ctor(&nvkm_firmware_mem, &fw->mem.memory); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/intr.c b/drivers/gpu/drm/nouveau/nvkm/core/intr.c new file mode 100644 index 000000000000..e20b7ca218c3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/intr.c @@ -0,0 +1,442 @@ +/* + * Copyright 2021 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <core/intr.h> +#include <core/device.h> +#include <core/subdev.h> +#include <subdev/pci.h> +#include <subdev/top.h> + +static int +nvkm_intr_xlat(struct nvkm_subdev *subdev, struct nvkm_intr *intr, + enum nvkm_intr_type type, int *leaf, u32 *mask) +{ + struct nvkm_device *device = subdev->device; + + if (type < NVKM_INTR_VECTOR_0) { + if (type == NVKM_INTR_SUBDEV) { + const struct nvkm_intr_data *data = intr->data; + struct nvkm_top_device *tdev; + + while (data && data->mask) { + if (data->type == NVKM_SUBDEV_TOP) { + list_for_each_entry(tdev, &device->top->device, head) { + if (tdev->intr >= 0 && + tdev->type == subdev->type && + tdev->inst == subdev->inst) { + if (data->mask & BIT(tdev->intr)) { + *leaf = data->leaf; + *mask = BIT(tdev->intr); + return 0; + } + } + } + } else + if (data->type == subdev->type && data->inst == subdev->inst) { + *leaf = data->leaf; + *mask = data->mask; + return 0; + } + + data++; + } + } else { + return -ENOSYS; + } + } else { + if (type < intr->leaves * sizeof(*intr->stat) * 8) { + *leaf = type / 32; + *mask = BIT(type % 32); + return 0; + } + } + + return -EINVAL; +} + +static struct nvkm_intr * +nvkm_intr_find(struct nvkm_subdev *subdev, enum nvkm_intr_type type, int *leaf, u32 *mask) +{ + struct nvkm_intr *intr; + int ret; + + list_for_each_entry(intr, &subdev->device->intr.intr, head) { + ret = nvkm_intr_xlat(subdev, intr, type, leaf, mask); + if (ret == 0) + return intr; + } + + return NULL; +} + +static void +nvkm_intr_allow_locked(struct nvkm_intr *intr, int leaf, u32 mask) +{ + intr->mask[leaf] |= mask; + if (intr->func->allow) { + if (intr->func->reset) + intr->func->reset(intr, leaf, mask); + intr->func->allow(intr, leaf, mask); + } +} + +void +nvkm_intr_allow(struct nvkm_subdev *subdev, enum nvkm_intr_type type) +{ + struct nvkm_device *device = subdev->device; + struct nvkm_intr *intr; + unsigned long flags; + int leaf; + u32 mask; + + intr = nvkm_intr_find(subdev, type, &leaf, &mask); + if (intr) { + nvkm_debug(intr->subdev, "intr %d/%08x allowed by %s\n", leaf, mask, subdev->name); + spin_lock_irqsave(&device->intr.lock, flags); + nvkm_intr_allow_locked(intr, leaf, mask); + spin_unlock_irqrestore(&device->intr.lock, flags); + } +} + +static void +nvkm_intr_block_locked(struct nvkm_intr *intr, int leaf, u32 mask) +{ + intr->mask[leaf] &= ~mask; + if (intr->func->block) + intr->func->block(intr, leaf, mask); +} + +void +nvkm_intr_block(struct nvkm_subdev *subdev, enum nvkm_intr_type type) +{ + struct nvkm_device *device = subdev->device; + struct nvkm_intr *intr; + unsigned long flags; + int leaf; + u32 mask; + + intr = nvkm_intr_find(subdev, type, &leaf, &mask); + if (intr) { + nvkm_debug(intr->subdev, "intr %d/%08x blocked by %s\n", leaf, mask, subdev->name); + spin_lock_irqsave(&device->intr.lock, flags); + nvkm_intr_block_locked(intr, leaf, mask); + spin_unlock_irqrestore(&device->intr.lock, flags); + } +} + +static void +nvkm_intr_rearm_locked(struct nvkm_device *device) +{ + struct nvkm_intr *intr; + + list_for_each_entry(intr, &device->intr.intr, head) + intr->func->rearm(intr); +} + +static void +nvkm_intr_unarm_locked(struct nvkm_device *device) +{ + struct nvkm_intr *intr; + + list_for_each_entry(intr, &device->intr.intr, head) + intr->func->unarm(intr); +} + +static irqreturn_t +nvkm_intr(int irq, void *arg) +{ + struct nvkm_device *device = arg; + struct nvkm_intr *intr; + struct nvkm_inth *inth; + irqreturn_t ret = IRQ_NONE; + bool pending = false; + int prio, leaf; + + /* Disable all top-level interrupt sources, and re-arm MSI interrupts. */ + spin_lock(&device->intr.lock); + if (!device->intr.armed) + goto done_unlock; + + nvkm_intr_unarm_locked(device); + nvkm_pci_msi_rearm(device); + + /* Fetch pending interrupt masks. */ + list_for_each_entry(intr, &device->intr.intr, head) { + if (intr->func->pending(intr)) + pending = true; + } + + if (!pending) + goto done; + + /* Check that GPU is still on the bus by reading NV_PMC_BOOT_0. */ + if (WARN_ON(nvkm_rd32(device, 0x000000) == 0xffffffff)) + goto done; + + /* Execute handlers. */ + for (prio = 0; prio < ARRAY_SIZE(device->intr.prio); prio++) { + list_for_each_entry(inth, &device->intr.prio[prio], head) { + struct nvkm_intr *intr = inth->intr; + + if (intr->stat[inth->leaf] & inth->mask) { + if (atomic_read(&inth->allowed)) { + if (intr->func->reset) + intr->func->reset(intr, inth->leaf, inth->mask); + if (inth->func(inth) == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + } + } + } + + /* Nothing handled? Some debugging/protection from IRQ storms is in order... */ + if (ret == IRQ_NONE) { + list_for_each_entry(intr, &device->intr.intr, head) { + for (leaf = 0; leaf < intr->leaves; leaf++) { + if (intr->stat[leaf]) { + nvkm_warn(intr->subdev, "intr%d: %08x\n", + leaf, intr->stat[leaf]); + nvkm_intr_block_locked(intr, leaf, intr->stat[leaf]); + } + } + } + } + +done: + /* Re-enable all top-level interrupt sources. */ + nvkm_intr_rearm_locked(device); +done_unlock: + spin_unlock(&device->intr.lock); + return ret; +} + +int +nvkm_intr_add(const struct nvkm_intr_func *func, const struct nvkm_intr_data *data, + struct nvkm_subdev *subdev, int leaves, struct nvkm_intr *intr) +{ + struct nvkm_device *device = subdev->device; + int i; + + intr->func = func; + intr->data = data; + intr->subdev = subdev; + intr->leaves = leaves; + intr->stat = kcalloc(leaves, sizeof(*intr->stat), GFP_KERNEL); + intr->mask = kcalloc(leaves, sizeof(*intr->mask), GFP_KERNEL); + if (!intr->stat || !intr->mask) { + kfree(intr->stat); + return -ENOMEM; + } + + if (intr->subdev->debug >= NV_DBG_DEBUG) { + for (i = 0; i < intr->leaves; i++) + intr->mask[i] = ~0; + } + + spin_lock_irq(&device->intr.lock); + list_add_tail(&intr->head, &device->intr.intr); + spin_unlock_irq(&device->intr.lock); + return 0; +} + +static irqreturn_t +nvkm_intr_subdev(struct nvkm_inth *inth) +{ + struct nvkm_subdev *subdev = container_of(inth, typeof(*subdev), inth); + + nvkm_subdev_intr(subdev); + return IRQ_HANDLED; +} + +static void +nvkm_intr_subdev_add_dev(struct nvkm_intr *intr, enum nvkm_subdev_type type, int inst) +{ + struct nvkm_subdev *subdev; + enum nvkm_intr_prio prio; + int ret; + + subdev = nvkm_device_subdev(intr->subdev->device, type, inst); + if (!subdev || !subdev->func->intr) + return; + + if (type == NVKM_ENGINE_DISP) + prio = NVKM_INTR_PRIO_VBLANK; + else + prio = NVKM_INTR_PRIO_NORMAL; + + ret = nvkm_inth_add(intr, NVKM_INTR_SUBDEV, prio, subdev, nvkm_intr_subdev, &subdev->inth); + if (WARN_ON(ret)) + return; + + nvkm_inth_allow(&subdev->inth); +} + +static void +nvkm_intr_subdev_add(struct nvkm_intr *intr) +{ + const struct nvkm_intr_data *data; + struct nvkm_device *device = intr->subdev->device; + struct nvkm_top_device *tdev; + + for (data = intr->data; data && data->mask; data++) { + if (data->legacy) { + if (data->type == NVKM_SUBDEV_TOP) { + list_for_each_entry(tdev, &device->top->device, head) { + if (tdev->intr < 0 || !(data->mask & BIT(tdev->intr))) + continue; + + nvkm_intr_subdev_add_dev(intr, tdev->type, tdev->inst); + } + } else { + nvkm_intr_subdev_add_dev(intr, data->type, data->inst); + } + } + } +} + +void +nvkm_intr_rearm(struct nvkm_device *device) +{ + struct nvkm_intr *intr; + int i; + + if (unlikely(!device->intr.legacy_done)) { + list_for_each_entry(intr, &device->intr.intr, head) + nvkm_intr_subdev_add(intr); + device->intr.legacy_done = true; + } + + spin_lock_irq(&device->intr.lock); + list_for_each_entry(intr, &device->intr.intr, head) { + for (i = 0; intr->func->block && i < intr->leaves; i++) { + intr->func->block(intr, i, ~0); + intr->func->allow(intr, i, intr->mask[i]); + } + } + + nvkm_intr_rearm_locked(device); + device->intr.armed = true; + spin_unlock_irq(&device->intr.lock); +} + +void +nvkm_intr_unarm(struct nvkm_device *device) +{ + spin_lock_irq(&device->intr.lock); + nvkm_intr_unarm_locked(device); + device->intr.armed = false; + spin_unlock_irq(&device->intr.lock); +} + +int +nvkm_intr_install(struct nvkm_device *device) +{ + int ret; + + device->intr.irq = device->func->irq(device); + if (device->intr.irq < 0) + return device->intr.irq; + + ret = request_irq(device->intr.irq, nvkm_intr, IRQF_SHARED, "nvkm", device); + if (ret) + return ret; + + device->intr.alloc = true; + return 0; +} + +void +nvkm_intr_dtor(struct nvkm_device *device) +{ + struct nvkm_intr *intr, *intt; + + list_for_each_entry_safe(intr, intt, &device->intr.intr, head) { + list_del(&intr->head); + kfree(intr->mask); + kfree(intr->stat); + } + + if (device->intr.alloc) + free_irq(device->intr.irq, device); +} + +void +nvkm_intr_ctor(struct nvkm_device *device) +{ + int i; + + INIT_LIST_HEAD(&device->intr.intr); + for (i = 0; i < ARRAY_SIZE(device->intr.prio); i++) + INIT_LIST_HEAD(&device->intr.prio[i]); + + spin_lock_init(&device->intr.lock); + device->intr.armed = false; +} + +void +nvkm_inth_block(struct nvkm_inth *inth) +{ + if (unlikely(!inth->intr)) + return; + + atomic_set(&inth->allowed, 0); +} + +void +nvkm_inth_allow(struct nvkm_inth *inth) +{ + struct nvkm_intr *intr = inth->intr; + unsigned long flags; + + if (unlikely(!inth->intr)) + return; + + spin_lock_irqsave(&intr->subdev->device->intr.lock, flags); + if (!atomic_xchg(&inth->allowed, 1)) { + if ((intr->mask[inth->leaf] & inth->mask) != inth->mask) + nvkm_intr_allow_locked(intr, inth->leaf, inth->mask); + } + spin_unlock_irqrestore(&intr->subdev->device->intr.lock, flags); +} + +int +nvkm_inth_add(struct nvkm_intr *intr, enum nvkm_intr_type type, enum nvkm_intr_prio prio, + struct nvkm_subdev *subdev, nvkm_inth_func func, struct nvkm_inth *inth) +{ + struct nvkm_device *device = subdev->device; + int ret; + + if (WARN_ON(inth->mask)) + return -EBUSY; + + ret = nvkm_intr_xlat(subdev, intr, type, &inth->leaf, &inth->mask); + if (ret) + return ret; + + nvkm_debug(intr->subdev, "intr %d/%08x requested by %s\n", + inth->leaf, inth->mask, subdev->name); + + inth->intr = intr; + inth->func = func; + atomic_set(&inth->allowed, 0); + list_add_tail(&inth->head, &device->intr.prio[prio]); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c index 45f920da89af..0b33287e43a7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c @@ -47,6 +47,26 @@ nvkm_ioctl_nop(struct nvkm_client *client, return ret; } +#include <nvif/class.h> + +static int +nvkm_ioctl_sclass_(struct nvkm_object *object, int index, struct nvkm_oclass *oclass) +{ + if ( object->func->uevent && + !object->func->uevent(object, NULL, 0, NULL) && index-- == 0) { + oclass->ctor = nvkm_uevent_new; + oclass->base.minver = 0; + oclass->base.maxver = 0; + oclass->base.oclass = NVIF_CLASS_EVENT; + return 0; + } + + if (object->func->sclass) + return object->func->sclass(object, index, oclass); + + return -ENOSYS; +} + static int nvkm_ioctl_sclass(struct nvkm_client *client, struct nvkm_object *object, void *data, u32 size) @@ -64,8 +84,7 @@ nvkm_ioctl_sclass(struct nvkm_client *client, if (size != args->v0.count * sizeof(args->v0.oclass[0])) return -EINVAL; - while (object->func->sclass && - object->func->sclass(object, i, &oclass) >= 0) { + while (nvkm_ioctl_sclass_(object, i, &oclass) >= 0) { if (i < args->v0.count) { args->v0.oclass[i].oclass = oclass.base.oclass; args->v0.oclass[i].minver = oclass.base.minver; @@ -100,7 +119,7 @@ nvkm_ioctl_new(struct nvkm_client *client, } else return ret; - if (!parent->func->sclass) { + if (!parent->func->sclass && !parent->func->uevent) { nvif_ioctl(parent, "cannot have children\n"); return -EINVAL; } @@ -113,7 +132,7 @@ nvkm_ioctl_new(struct nvkm_client *client, oclass.object = args->v0.object; oclass.client = client; oclass.parent = parent; - ret = parent->func->sclass(parent, i++, &oclass); + ret = nvkm_ioctl_sclass_(parent, i++, &oclass); if (ret) return ret; } while (oclass.base.oclass != args->v0.oclass); @@ -294,90 +313,6 @@ nvkm_ioctl_unmap(struct nvkm_client *client, return ret; } -static int -nvkm_ioctl_ntfy_new(struct nvkm_client *client, - struct nvkm_object *object, void *data, u32 size) -{ - union { - struct nvif_ioctl_ntfy_new_v0 v0; - } *args = data; - struct nvkm_event *event; - int ret = -ENOSYS; - - nvif_ioctl(object, "ntfy new size %d\n", size); - if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { - nvif_ioctl(object, "ntfy new vers %d event %02x\n", - args->v0.version, args->v0.event); - ret = nvkm_object_ntfy(object, args->v0.event, &event); - if (ret == 0) { - ret = nvkm_client_notify_new(object, event, data, size); - if (ret >= 0) { - args->v0.index = ret; - ret = 0; - } - } - } - - return ret; -} - -static int -nvkm_ioctl_ntfy_del(struct nvkm_client *client, - struct nvkm_object *object, void *data, u32 size) -{ - union { - struct nvif_ioctl_ntfy_del_v0 v0; - } *args = data; - int ret = -ENOSYS; - - nvif_ioctl(object, "ntfy del size %d\n", size); - if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { - nvif_ioctl(object, "ntfy del vers %d index %d\n", - args->v0.version, args->v0.index); - ret = nvkm_client_notify_del(client, args->v0.index); - } - - return ret; -} - -static int -nvkm_ioctl_ntfy_get(struct nvkm_client *client, - struct nvkm_object *object, void *data, u32 size) -{ - union { - struct nvif_ioctl_ntfy_get_v0 v0; - } *args = data; - int ret = -ENOSYS; - - nvif_ioctl(object, "ntfy get size %d\n", size); - if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { - nvif_ioctl(object, "ntfy get vers %d index %d\n", - args->v0.version, args->v0.index); - ret = nvkm_client_notify_get(client, args->v0.index); - } - - return ret; -} - -static int -nvkm_ioctl_ntfy_put(struct nvkm_client *client, - struct nvkm_object *object, void *data, u32 size) -{ - union { - struct nvif_ioctl_ntfy_put_v0 v0; - } *args = data; - int ret = -ENOSYS; - - nvif_ioctl(object, "ntfy put size %d\n", size); - if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { - nvif_ioctl(object, "ntfy put vers %d index %d\n", - args->v0.version, args->v0.index); - ret = nvkm_client_notify_put(client, args->v0.index); - } - - return ret; -} - static struct { int version; int (*func)(struct nvkm_client *, struct nvkm_object *, void *, u32); @@ -392,10 +327,6 @@ nvkm_ioctl_v0[] = { { 0x00, nvkm_ioctl_wr }, { 0x00, nvkm_ioctl_map }, { 0x00, nvkm_ioctl_unmap }, - { 0x00, nvkm_ioctl_ntfy_new }, - { 0x00, nvkm_ioctl_ntfy_del }, - { 0x00, nvkm_ioctl_ntfy_get }, - { 0x00, nvkm_ioctl_ntfy_put }, }; static int diff --git a/drivers/gpu/drm/nouveau/nvkm/core/notify.c b/drivers/gpu/drm/nouveau/nvkm/core/notify.c deleted file mode 100644 index 023610d01458..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/core/notify.c +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright 2014 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs <bskeggs@redhat.com> - */ -#include <core/notify.h> -#include <core/event.h> - -static inline void -nvkm_notify_put_locked(struct nvkm_notify *notify) -{ - if (notify->block++ == 0) - nvkm_event_put(notify->event, notify->types, notify->index); -} - -void -nvkm_notify_put(struct nvkm_notify *notify) -{ - struct nvkm_event *event = notify->event; - unsigned long flags; - if (likely(event) && - test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { - spin_lock_irqsave(&event->refs_lock, flags); - nvkm_notify_put_locked(notify); - spin_unlock_irqrestore(&event->refs_lock, flags); - if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags)) - flush_work(¬ify->work); - } -} - -static inline void -nvkm_notify_get_locked(struct nvkm_notify *notify) -{ - if (--notify->block == 0) - nvkm_event_get(notify->event, notify->types, notify->index); -} - -void -nvkm_notify_get(struct nvkm_notify *notify) -{ - struct nvkm_event *event = notify->event; - unsigned long flags; - if (likely(event) && - !test_and_set_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { - spin_lock_irqsave(&event->refs_lock, flags); - nvkm_notify_get_locked(notify); - spin_unlock_irqrestore(&event->refs_lock, flags); - } -} - -static inline void -nvkm_notify_func(struct nvkm_notify *notify) -{ - struct nvkm_event *event = notify->event; - int ret = notify->func(notify); - unsigned long flags; - if ((ret == NVKM_NOTIFY_KEEP) || - !test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { - spin_lock_irqsave(&event->refs_lock, flags); - nvkm_notify_get_locked(notify); - spin_unlock_irqrestore(&event->refs_lock, flags); - } -} - -static void -nvkm_notify_work(struct work_struct *work) -{ - struct nvkm_notify *notify = container_of(work, typeof(*notify), work); - nvkm_notify_func(notify); -} - -void -nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size) -{ - struct nvkm_event *event = notify->event; - unsigned long flags; - - assert_spin_locked(&event->list_lock); - BUG_ON(size != notify->size); - - spin_lock_irqsave(&event->refs_lock, flags); - if (notify->block) { - spin_unlock_irqrestore(&event->refs_lock, flags); - return; - } - nvkm_notify_put_locked(notify); - spin_unlock_irqrestore(&event->refs_lock, flags); - - if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags)) { - memcpy((void *)notify->data, data, size); - schedule_work(¬ify->work); - } else { - notify->data = data; - nvkm_notify_func(notify); - notify->data = NULL; - } -} - -void -nvkm_notify_fini(struct nvkm_notify *notify) -{ - unsigned long flags; - if (notify->event) { - nvkm_notify_put(notify); - spin_lock_irqsave(¬ify->event->list_lock, flags); - list_del(¬ify->head); - spin_unlock_irqrestore(¬ify->event->list_lock, flags); - kfree((void *)notify->data); - notify->event = NULL; - } -} - -int -nvkm_notify_init(struct nvkm_object *object, struct nvkm_event *event, - int (*func)(struct nvkm_notify *), bool work, - void *data, u32 size, u32 reply, - struct nvkm_notify *notify) -{ - unsigned long flags; - int ret = -ENODEV; - if ((notify->event = event), event->refs) { - ret = event->func->ctor(object, data, size, notify); - if (ret == 0 && (ret = -EINVAL, notify->size == reply)) { - notify->flags = 0; - notify->block = 1; - notify->func = func; - notify->data = NULL; - if (ret = 0, work) { - INIT_WORK(¬ify->work, nvkm_notify_work); - set_bit(NVKM_NOTIFY_WORK, ¬ify->flags); - notify->data = kmalloc(reply, GFP_KERNEL); - if (!notify->data) - ret = -ENOMEM; - } - } - if (ret == 0) { - spin_lock_irqsave(&event->list_lock, flags); - list_add_tail(¬ify->head, &event->list); - spin_unlock_irqrestore(&event->list_lock, flags); - } - } - if (ret) - notify->event = NULL; - return ret; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c index 16299837a296..3385528da650 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c @@ -47,7 +47,12 @@ nvkm_oproxy_map(struct nvkm_object *object, void *argv, u32 argc, static int nvkm_oproxy_unmap(struct nvkm_object *object) { - return nvkm_object_unmap(nvkm_oproxy(object)->object); + struct nvkm_oproxy *oproxy = nvkm_oproxy(object); + + if (unlikely(!oproxy->object)) + return 0; + + return nvkm_object_unmap(oproxy->object); } static int @@ -106,6 +111,18 @@ nvkm_oproxy_sclass(struct nvkm_object *object, int index, } static int +nvkm_oproxy_uevent(struct nvkm_object *object, void *argv, u32 argc, + struct nvkm_uevent *uevent) +{ + struct nvkm_oproxy *oproxy = nvkm_oproxy(object); + + if (!oproxy->object->func->uevent) + return -ENOSYS; + + return oproxy->object->func->uevent(oproxy->object, argv, argc, uevent); +} + +static int nvkm_oproxy_fini(struct nvkm_object *object, bool suspend) { struct nvkm_oproxy *oproxy = nvkm_oproxy(object); @@ -188,6 +205,7 @@ nvkm_oproxy_func = { .wr32 = nvkm_oproxy_wr32, .bind = nvkm_oproxy_bind, .sclass = nvkm_oproxy_sclass, + .uevent = nvkm_oproxy_uevent, }; void diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c index a74b7acb6832..6c20e827a069 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c @@ -54,7 +54,7 @@ int nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_device *device = subdev->device; - const char *action = suspend ? "suspend" : "fini"; + const char *action = suspend ? "suspend" : subdev->use.enabled ? "fini" : "reset"; s64 time; nvkm_trace(subdev, "%s running...\n", action); @@ -68,6 +68,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) return ret; } } + subdev->use.enabled = false; nvkm_mc_reset(device, subdev->type, subdev->inst); @@ -97,30 +98,49 @@ nvkm_subdev_preinit(struct nvkm_subdev *subdev) return 0; } -int -nvkm_subdev_init(struct nvkm_subdev *subdev) +static int +nvkm_subdev_oneinit_(struct nvkm_subdev *subdev) { s64 time; int ret; - nvkm_trace(subdev, "init running...\n"); + if (!subdev->func->oneinit || subdev->oneinit) + return 0; + + nvkm_trace(subdev, "one-time init running...\n"); time = ktime_to_us(ktime_get()); + ret = subdev->func->oneinit(subdev); + if (ret) { + nvkm_error(subdev, "one-time init failed, %d\n", ret); + return ret; + } - if (subdev->func->oneinit && !subdev->oneinit) { - s64 time; - nvkm_trace(subdev, "one-time init running...\n"); - time = ktime_to_us(ktime_get()); - ret = subdev->func->oneinit(subdev); - if (ret) { - nvkm_error(subdev, "one-time init failed, %d\n", ret); - return ret; - } + subdev->oneinit = true; + time = ktime_to_us(ktime_get()) - time; + nvkm_trace(subdev, "one-time init completed in %lldus\n", time); + return 0; +} - subdev->oneinit = true; - time = ktime_to_us(ktime_get()) - time; - nvkm_trace(subdev, "one-time init completed in %lldus\n", time); +static int +nvkm_subdev_init_(struct nvkm_subdev *subdev) +{ + s64 time; + int ret; + + if (subdev->use.enabled) { + nvkm_trace(subdev, "init skipped, already running\n"); + return 0; } + nvkm_trace(subdev, "init running...\n"); + time = ktime_to_us(ktime_get()); + + ret = nvkm_subdev_oneinit_(subdev); + if (ret) + return ret; + + subdev->use.enabled = true; + if (subdev->func->init) { ret = subdev->func->init(subdev); if (ret) { @@ -134,6 +154,64 @@ nvkm_subdev_init(struct nvkm_subdev *subdev) return 0; } +int +nvkm_subdev_init(struct nvkm_subdev *subdev) +{ + int ret; + + mutex_lock(&subdev->use.mutex); + if (refcount_read(&subdev->use.refcount) == 0) { + nvkm_trace(subdev, "init skipped, no users\n"); + mutex_unlock(&subdev->use.mutex); + return 0; + } + + ret = nvkm_subdev_init_(subdev); + mutex_unlock(&subdev->use.mutex); + return ret; +} + +int +nvkm_subdev_oneinit(struct nvkm_subdev *subdev) +{ + int ret; + + mutex_lock(&subdev->use.mutex); + ret = nvkm_subdev_oneinit_(subdev); + mutex_unlock(&subdev->use.mutex); + return ret; +} + +void +nvkm_subdev_unref(struct nvkm_subdev *subdev) +{ + if (refcount_dec_and_mutex_lock(&subdev->use.refcount, &subdev->use.mutex)) { + nvkm_subdev_fini(subdev, false); + mutex_unlock(&subdev->use.mutex); + } +} + +int +nvkm_subdev_ref(struct nvkm_subdev *subdev) +{ + int ret; + + if (subdev && !refcount_inc_not_zero(&subdev->use.refcount)) { + mutex_lock(&subdev->use.mutex); + if (!refcount_inc_not_zero(&subdev->use.refcount)) { + if ((ret = nvkm_subdev_init_(subdev))) { + mutex_unlock(&subdev->use.mutex); + return ret; + } + + refcount_set(&subdev->use.refcount, 1); + } + mutex_unlock(&subdev->use.mutex); + } + + return 0; +} + void nvkm_subdev_del(struct nvkm_subdev **psubdev) { @@ -146,6 +224,7 @@ nvkm_subdev_del(struct nvkm_subdev **psubdev) list_del(&subdev->head); if (subdev->func->dtor) *psubdev = subdev->func->dtor(subdev); + mutex_destroy(&subdev->use.mutex); time = ktime_to_us(ktime_get()) - time; nvkm_trace(subdev, "destroy completed in %lldus\n", time); kfree(*psubdev); @@ -167,8 +246,8 @@ nvkm_subdev_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int } void -nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_subdev *subdev) +__nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_subdev *subdev) { subdev->func = func; subdev->device = device; @@ -180,6 +259,8 @@ nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device else strscpy(subdev->name, nvkm_subdev_type[type], sizeof(subdev->name)); subdev->debug = nvkm_dbgopt(device->dbgopt, subdev->name); + + refcount_set(&subdev->use.refcount, 1); list_add_tail(&subdev->head, &device->subdev); } diff --git a/drivers/gpu/drm/nouveau/nvkm/core/uevent.c b/drivers/gpu/drm/nouveau/nvkm/core/uevent.c new file mode 100644 index 000000000000..ba9d9edaec75 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/uevent.c @@ -0,0 +1,157 @@ +/* + * Copyright 2021 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#define nvkm_uevent(p) container_of((p), struct nvkm_uevent, object) +#include <core/event.h> +#include <core/client.h> + +#include <nvif/if000e.h> + +struct nvkm_uevent { + struct nvkm_object object; + struct nvkm_object *parent; + nvkm_uevent_func func; + bool wait; + + struct nvkm_event_ntfy ntfy; + atomic_t allowed; +}; + +static int +nvkm_uevent_mthd_block(struct nvkm_uevent *uevent, union nvif_event_block_args *args, u32 argc) +{ + if (argc != sizeof(args->vn)) + return -ENOSYS; + + nvkm_event_ntfy_block(&uevent->ntfy); + atomic_set(&uevent->allowed, 0); + return 0; +} + +static int +nvkm_uevent_mthd_allow(struct nvkm_uevent *uevent, union nvif_event_allow_args *args, u32 argc) +{ + if (argc != sizeof(args->vn)) + return -ENOSYS; + + nvkm_event_ntfy_allow(&uevent->ntfy); + atomic_set(&uevent->allowed, 1); + return 0; +} + +static int +nvkm_uevent_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc) +{ + struct nvkm_uevent *uevent = nvkm_uevent(object); + + switch (mthd) { + case NVIF_EVENT_V0_ALLOW: return nvkm_uevent_mthd_allow(uevent, argv, argc); + case NVIF_EVENT_V0_BLOCK: return nvkm_uevent_mthd_block(uevent, argv, argc); + default: + break; + } + + return -EINVAL; +} + +static int +nvkm_uevent_fini(struct nvkm_object *object, bool suspend) +{ + struct nvkm_uevent *uevent = nvkm_uevent(object); + + nvkm_event_ntfy_block(&uevent->ntfy); + return 0; +} + +static int +nvkm_uevent_init(struct nvkm_object *object) +{ + struct nvkm_uevent *uevent = nvkm_uevent(object); + + if (atomic_read(&uevent->allowed)) + nvkm_event_ntfy_allow(&uevent->ntfy); + + return 0; +} + +static void * +nvkm_uevent_dtor(struct nvkm_object *object) +{ + struct nvkm_uevent *uevent = nvkm_uevent(object); + + nvkm_event_ntfy_del(&uevent->ntfy); + return uevent; +} + +static const struct nvkm_object_func +nvkm_uevent = { + .dtor = nvkm_uevent_dtor, + .init = nvkm_uevent_init, + .fini = nvkm_uevent_fini, + .mthd = nvkm_uevent_mthd, +}; + +static int +nvkm_uevent_ntfy(struct nvkm_event_ntfy *ntfy, u32 bits) +{ + struct nvkm_uevent *uevent = container_of(ntfy, typeof(*uevent), ntfy); + struct nvkm_client *client = uevent->object.client; + + if (uevent->func) + return uevent->func(uevent->parent, uevent->object.token, bits); + + return client->event(uevent->object.token, NULL, 0); +} + +int +nvkm_uevent_add(struct nvkm_uevent *uevent, struct nvkm_event *event, int id, u32 bits, + nvkm_uevent_func func) +{ + if (WARN_ON(uevent->func)) + return -EBUSY; + + nvkm_event_ntfy_add(event, id, bits, uevent->wait, nvkm_uevent_ntfy, &uevent->ntfy); + uevent->func = func; + return 0; +} + +int +nvkm_uevent_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct nvkm_object *parent = oclass->parent; + struct nvkm_uevent *uevent; + union nvif_event_args *args = argv; + + if (argc < sizeof(args->v0) || args->v0.version != 0) + return -ENOSYS; + + if (!(uevent = kzalloc(sizeof(*uevent), GFP_KERNEL))) + return -ENOMEM; + *pobject = &uevent->object; + + nvkm_object_ctor(&nvkm_uevent, oclass, &uevent->object); + uevent->parent = parent; + uevent->func = NULL; + uevent->wait = args->v0.wait; + uevent->ntfy.event = NULL; + return parent->func->uevent(parent, &args->v0.data, argc - sizeof(args->v0), uevent); +} |