summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nvkm/core/intr.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-13 11:59:58 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-13 11:59:58 -0800
commita594533df0f6ca391da003f43d53b336a2d23ffa (patch)
treeec984c693b0bfc208519c43134f21365797f90ee /drivers/gpu/drm/nouveau/nvkm/core/intr.c
parentcdb9d3537711939e4d8fd0de2889c966f88346eb (diff)
parent66efff515a6500d4b4976fbab3bee8b92a1137fb (diff)
Merge tag 'drm-next-2022-12-13' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "The biggest highlight is that the accel subsystem framework is merged. Hopefully for 6.3 we will be able to line up a driver to use it. In drivers land, i915 enables DG2 support by default now, and nouveau has a big stability refactoring and initial ampere support, AMD includes new hw IP support and should build on ARM again. There is also an ofdrm driver to take over offb on platforms it's used. Stuff outside my tree, the dma-buf patches hit a few places, the vc4 firmware changes also do, and i915 has some interactions with MEI for discrete GPUs. I think all of those should have been acked/reviewed by relevant parties. New driver: - ofdrm - replacement for offb fbdev: - add support for nomodeset fourcc: - add Vivante tiled modifier core: - atomic-helpers: CRTC primary plane test fixes, fb access hooks - connector: TV API consistency, cmdline parser improvements - send connector hotplug on cleanup - sort makefile objects tests: - sort kunit tests - improve DP-MST tests - add kunit helpers to create a device sched: - module param for scheduling policy - refcounting fix buddy: - add back random seed log ttm: - convert ttm_resource to size_t - optimize pool allocations edid: - HFVSDB parsing support fixes - logging/debug improvements - DSC quirks dma-buf: - Add unlocked vmap and attachment mapping - move drivers to common locking convention - locking improvements firmware: - new API for rPI firmware and vc4 xilinx: - zynqmp: displayport bridge support - dpsub fix bridge: - adv7533: Remove dynamic lane switching - it6505: Runtime PM support, sync improvements - ps8640: Handle AUX defer messages - tc358775: Drop soft-reset over I2C panel: - panel-edp: Add INX N116BGE-EA2 C2 and C4 support. - Jadard JD9365DA-H3 - NewVision NV3051D amdgpu: - DCN support on ARM - DCN 2.1 secure display - Sienna Cichlid mode2 reset fixes - new GC 11.x firmware versions - drop AMD specific DSC workarounds in favour of drm code - clang warning fixes - scheduler rework - SR-IOV fixes - GPUVM locking fixes - fix memory leak in CS IOCTL error path - flexible array updates - enable new GC/PSP/SMU/NBIO IP - GFX preemption support for gfx9 amdkfd: - cache size fixes - userptr fixes - enable cooperative launch on gfx 10.3 - enable GC 11.0.4 KFD support radeon: - replace kmap with kmap_local_page - ACPI ref count fix - HDA audio notifier support i915: - DG2 enabled by default - MTL enablement work - hotplug refactoring - VBT improvements - Display and watermark refactoring - ADL-P workaround - temp disable runtime_pm for discrete- - fix for A380 as a secondary GPU - Wa_18017747507 for DG2 - CS timestamp support fixes for gen5 and earlier - never purge busy TTM objects - use i915_sg_dma_sizes for all backends - demote GuC kernel contexts to normal priority - gvt: refactor for new MDEV interface - enable DC power states on eDP ports - fix gen 2/3 workarounds nouveau: - fix page fault handling - Ampere acceleration support - driver stability improvements - nva3 backlight support msm: - MSM_INFO_GET_FLAGS support - DPU: XR30 and P010 image formats - Qualcomm SM6115 support - DSI PHY support for QCM2290 - HDMI: refactored dev init path - remove exclusive-fence hack - fix speed-bin detection - enable clamp to idle on 7c3 - improved hangcheck detection vmwgfx: - fb and cursor refactoring - convert to generic hashtable - cursor improvements etnaviv: - hw workarounds - softpin MMU fixes ast: - atomic gamma LUT support - convert to SHMEM lcdif: - support YUV planes - Increase DMA burst size - FIFO threshold tuning meson: - fix return type of cvbs mode_valid mgag200: - fix PLL setup on some revisions sun4i: - A100 and D1 support udl: - modesetting improvements - hot unplug support vc4: - support PAL-M - fix regression preventing 4K @ 60Hz - fix NULL ptr deref v3d: - switch to drm managed resources renesas: - RZ/G2L DSI support - DU Kconfig cleanup mediatek: - fixup dpi and hdmi - MT8188 dpi support - MT8195 AFBC support tegra: - NVDEC hardware on Tegra234 SoC hdlcd: - switch to drm managed resources ingenic: - fix registration error path hisilicon: - convert to drm_mode_init maildp: - use managed resources mtk: - use drm_mode_init rockchip: - use drm_mode_copy" * tag 'drm-next-2022-12-13' of git://anongit.freedesktop.org/drm/drm: (1397 commits) drm/amdgpu: fix mmhub register base coding error drm/amdgpu: add tmz support for GC IP v11.0.4 drm/amdgpu: enable GFX Clock Gating control for GC IP v11.0.4 drm/amdgpu: enable GFX Power Gating for GC IP v11.0.4 drm/amdgpu: enable GFX IP v11.0.4 CG support drm/amdgpu: Make amdgpu_ring_mux functions as static drm/amdgpu: generally allow over-commit during BO allocation drm/amd/display: fix array index out of bound error in DCN32 DML drm/amd/display: 3.2.215 drm/amd/display: set optimized required for comp buf changes drm/amd/display: Add debug option to skip PSR CRTC disable drm/amd/display: correct DML calc error of UrgentLatency drm/amd/display: correct static_screen_event_mask drm/amd/display: Ensure commit_streams returns the DC return code drm/amd/display: read invalid ddc pin status cause engine busy drm/amd/display: Bypass DET swath fill check for max clocks drm/amd/display: Disable uclk pstate for subvp pipes drm/amd/display: Fix DCN2.1 default DSC clocks drm/amd/display: Enable dp_hdmi21_pcon support drm/amd/display: prevent seamless boot on displays that don't have the preferred dig ...
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/core/intr.c')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/intr.c442
1 files changed, 442 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/intr.c b/drivers/gpu/drm/nouveau/nvkm/core/intr.c
new file mode 100644
index 000000000000..e20b7ca218c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/intr.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/intr.h>
+#include <core/device.h>
+#include <core/subdev.h>
+#include <subdev/pci.h>
+#include <subdev/top.h>
+
+static int
+nvkm_intr_xlat(struct nvkm_subdev *subdev, struct nvkm_intr *intr,
+ enum nvkm_intr_type type, int *leaf, u32 *mask)
+{
+ struct nvkm_device *device = subdev->device;
+
+ if (type < NVKM_INTR_VECTOR_0) {
+ if (type == NVKM_INTR_SUBDEV) {
+ const struct nvkm_intr_data *data = intr->data;
+ struct nvkm_top_device *tdev;
+
+ while (data && data->mask) {
+ if (data->type == NVKM_SUBDEV_TOP) {
+ list_for_each_entry(tdev, &device->top->device, head) {
+ if (tdev->intr >= 0 &&
+ tdev->type == subdev->type &&
+ tdev->inst == subdev->inst) {
+ if (data->mask & BIT(tdev->intr)) {
+ *leaf = data->leaf;
+ *mask = BIT(tdev->intr);
+ return 0;
+ }
+ }
+ }
+ } else
+ if (data->type == subdev->type && data->inst == subdev->inst) {
+ *leaf = data->leaf;
+ *mask = data->mask;
+ return 0;
+ }
+
+ data++;
+ }
+ } else {
+ return -ENOSYS;
+ }
+ } else {
+ if (type < intr->leaves * sizeof(*intr->stat) * 8) {
+ *leaf = type / 32;
+ *mask = BIT(type % 32);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static struct nvkm_intr *
+nvkm_intr_find(struct nvkm_subdev *subdev, enum nvkm_intr_type type, int *leaf, u32 *mask)
+{
+ struct nvkm_intr *intr;
+ int ret;
+
+ list_for_each_entry(intr, &subdev->device->intr.intr, head) {
+ ret = nvkm_intr_xlat(subdev, intr, type, leaf, mask);
+ if (ret == 0)
+ return intr;
+ }
+
+ return NULL;
+}
+
+static void
+nvkm_intr_allow_locked(struct nvkm_intr *intr, int leaf, u32 mask)
+{
+ intr->mask[leaf] |= mask;
+ if (intr->func->allow) {
+ if (intr->func->reset)
+ intr->func->reset(intr, leaf, mask);
+ intr->func->allow(intr, leaf, mask);
+ }
+}
+
+void
+nvkm_intr_allow(struct nvkm_subdev *subdev, enum nvkm_intr_type type)
+{
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_intr *intr;
+ unsigned long flags;
+ int leaf;
+ u32 mask;
+
+ intr = nvkm_intr_find(subdev, type, &leaf, &mask);
+ if (intr) {
+ nvkm_debug(intr->subdev, "intr %d/%08x allowed by %s\n", leaf, mask, subdev->name);
+ spin_lock_irqsave(&device->intr.lock, flags);
+ nvkm_intr_allow_locked(intr, leaf, mask);
+ spin_unlock_irqrestore(&device->intr.lock, flags);
+ }
+}
+
+static void
+nvkm_intr_block_locked(struct nvkm_intr *intr, int leaf, u32 mask)
+{
+ intr->mask[leaf] &= ~mask;
+ if (intr->func->block)
+ intr->func->block(intr, leaf, mask);
+}
+
+void
+nvkm_intr_block(struct nvkm_subdev *subdev, enum nvkm_intr_type type)
+{
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_intr *intr;
+ unsigned long flags;
+ int leaf;
+ u32 mask;
+
+ intr = nvkm_intr_find(subdev, type, &leaf, &mask);
+ if (intr) {
+ nvkm_debug(intr->subdev, "intr %d/%08x blocked by %s\n", leaf, mask, subdev->name);
+ spin_lock_irqsave(&device->intr.lock, flags);
+ nvkm_intr_block_locked(intr, leaf, mask);
+ spin_unlock_irqrestore(&device->intr.lock, flags);
+ }
+}
+
+static void
+nvkm_intr_rearm_locked(struct nvkm_device *device)
+{
+ struct nvkm_intr *intr;
+
+ list_for_each_entry(intr, &device->intr.intr, head)
+ intr->func->rearm(intr);
+}
+
+static void
+nvkm_intr_unarm_locked(struct nvkm_device *device)
+{
+ struct nvkm_intr *intr;
+
+ list_for_each_entry(intr, &device->intr.intr, head)
+ intr->func->unarm(intr);
+}
+
+static irqreturn_t
+nvkm_intr(int irq, void *arg)
+{
+ struct nvkm_device *device = arg;
+ struct nvkm_intr *intr;
+ struct nvkm_inth *inth;
+ irqreturn_t ret = IRQ_NONE;
+ bool pending = false;
+ int prio, leaf;
+
+ /* Disable all top-level interrupt sources, and re-arm MSI interrupts. */
+ spin_lock(&device->intr.lock);
+ if (!device->intr.armed)
+ goto done_unlock;
+
+ nvkm_intr_unarm_locked(device);
+ nvkm_pci_msi_rearm(device);
+
+ /* Fetch pending interrupt masks. */
+ list_for_each_entry(intr, &device->intr.intr, head) {
+ if (intr->func->pending(intr))
+ pending = true;
+ }
+
+ if (!pending)
+ goto done;
+
+ /* Check that GPU is still on the bus by reading NV_PMC_BOOT_0. */
+ if (WARN_ON(nvkm_rd32(device, 0x000000) == 0xffffffff))
+ goto done;
+
+ /* Execute handlers. */
+ for (prio = 0; prio < ARRAY_SIZE(device->intr.prio); prio++) {
+ list_for_each_entry(inth, &device->intr.prio[prio], head) {
+ struct nvkm_intr *intr = inth->intr;
+
+ if (intr->stat[inth->leaf] & inth->mask) {
+ if (atomic_read(&inth->allowed)) {
+ if (intr->func->reset)
+ intr->func->reset(intr, inth->leaf, inth->mask);
+ if (inth->func(inth) == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ }
+ }
+ }
+
+ /* Nothing handled? Some debugging/protection from IRQ storms is in order... */
+ if (ret == IRQ_NONE) {
+ list_for_each_entry(intr, &device->intr.intr, head) {
+ for (leaf = 0; leaf < intr->leaves; leaf++) {
+ if (intr->stat[leaf]) {
+ nvkm_warn(intr->subdev, "intr%d: %08x\n",
+ leaf, intr->stat[leaf]);
+ nvkm_intr_block_locked(intr, leaf, intr->stat[leaf]);
+ }
+ }
+ }
+ }
+
+done:
+ /* Re-enable all top-level interrupt sources. */
+ nvkm_intr_rearm_locked(device);
+done_unlock:
+ spin_unlock(&device->intr.lock);
+ return ret;
+}
+
+int
+nvkm_intr_add(const struct nvkm_intr_func *func, const struct nvkm_intr_data *data,
+ struct nvkm_subdev *subdev, int leaves, struct nvkm_intr *intr)
+{
+ struct nvkm_device *device = subdev->device;
+ int i;
+
+ intr->func = func;
+ intr->data = data;
+ intr->subdev = subdev;
+ intr->leaves = leaves;
+ intr->stat = kcalloc(leaves, sizeof(*intr->stat), GFP_KERNEL);
+ intr->mask = kcalloc(leaves, sizeof(*intr->mask), GFP_KERNEL);
+ if (!intr->stat || !intr->mask) {
+ kfree(intr->stat);
+ return -ENOMEM;
+ }
+
+ if (intr->subdev->debug >= NV_DBG_DEBUG) {
+ for (i = 0; i < intr->leaves; i++)
+ intr->mask[i] = ~0;
+ }
+
+ spin_lock_irq(&device->intr.lock);
+ list_add_tail(&intr->head, &device->intr.intr);
+ spin_unlock_irq(&device->intr.lock);
+ return 0;
+}
+
+static irqreturn_t
+nvkm_intr_subdev(struct nvkm_inth *inth)
+{
+ struct nvkm_subdev *subdev = container_of(inth, typeof(*subdev), inth);
+
+ nvkm_subdev_intr(subdev);
+ return IRQ_HANDLED;
+}
+
+static void
+nvkm_intr_subdev_add_dev(struct nvkm_intr *intr, enum nvkm_subdev_type type, int inst)
+{
+ struct nvkm_subdev *subdev;
+ enum nvkm_intr_prio prio;
+ int ret;
+
+ subdev = nvkm_device_subdev(intr->subdev->device, type, inst);
+ if (!subdev || !subdev->func->intr)
+ return;
+
+ if (type == NVKM_ENGINE_DISP)
+ prio = NVKM_INTR_PRIO_VBLANK;
+ else
+ prio = NVKM_INTR_PRIO_NORMAL;
+
+ ret = nvkm_inth_add(intr, NVKM_INTR_SUBDEV, prio, subdev, nvkm_intr_subdev, &subdev->inth);
+ if (WARN_ON(ret))
+ return;
+
+ nvkm_inth_allow(&subdev->inth);
+}
+
+static void
+nvkm_intr_subdev_add(struct nvkm_intr *intr)
+{
+ const struct nvkm_intr_data *data;
+ struct nvkm_device *device = intr->subdev->device;
+ struct nvkm_top_device *tdev;
+
+ for (data = intr->data; data && data->mask; data++) {
+ if (data->legacy) {
+ if (data->type == NVKM_SUBDEV_TOP) {
+ list_for_each_entry(tdev, &device->top->device, head) {
+ if (tdev->intr < 0 || !(data->mask & BIT(tdev->intr)))
+ continue;
+
+ nvkm_intr_subdev_add_dev(intr, tdev->type, tdev->inst);
+ }
+ } else {
+ nvkm_intr_subdev_add_dev(intr, data->type, data->inst);
+ }
+ }
+ }
+}
+
+void
+nvkm_intr_rearm(struct nvkm_device *device)
+{
+ struct nvkm_intr *intr;
+ int i;
+
+ if (unlikely(!device->intr.legacy_done)) {
+ list_for_each_entry(intr, &device->intr.intr, head)
+ nvkm_intr_subdev_add(intr);
+ device->intr.legacy_done = true;
+ }
+
+ spin_lock_irq(&device->intr.lock);
+ list_for_each_entry(intr, &device->intr.intr, head) {
+ for (i = 0; intr->func->block && i < intr->leaves; i++) {
+ intr->func->block(intr, i, ~0);
+ intr->func->allow(intr, i, intr->mask[i]);
+ }
+ }
+
+ nvkm_intr_rearm_locked(device);
+ device->intr.armed = true;
+ spin_unlock_irq(&device->intr.lock);
+}
+
+void
+nvkm_intr_unarm(struct nvkm_device *device)
+{
+ spin_lock_irq(&device->intr.lock);
+ nvkm_intr_unarm_locked(device);
+ device->intr.armed = false;
+ spin_unlock_irq(&device->intr.lock);
+}
+
+int
+nvkm_intr_install(struct nvkm_device *device)
+{
+ int ret;
+
+ device->intr.irq = device->func->irq(device);
+ if (device->intr.irq < 0)
+ return device->intr.irq;
+
+ ret = request_irq(device->intr.irq, nvkm_intr, IRQF_SHARED, "nvkm", device);
+ if (ret)
+ return ret;
+
+ device->intr.alloc = true;
+ return 0;
+}
+
+void
+nvkm_intr_dtor(struct nvkm_device *device)
+{
+ struct nvkm_intr *intr, *intt;
+
+ list_for_each_entry_safe(intr, intt, &device->intr.intr, head) {
+ list_del(&intr->head);
+ kfree(intr->mask);
+ kfree(intr->stat);
+ }
+
+ if (device->intr.alloc)
+ free_irq(device->intr.irq, device);
+}
+
+void
+nvkm_intr_ctor(struct nvkm_device *device)
+{
+ int i;
+
+ INIT_LIST_HEAD(&device->intr.intr);
+ for (i = 0; i < ARRAY_SIZE(device->intr.prio); i++)
+ INIT_LIST_HEAD(&device->intr.prio[i]);
+
+ spin_lock_init(&device->intr.lock);
+ device->intr.armed = false;
+}
+
+void
+nvkm_inth_block(struct nvkm_inth *inth)
+{
+ if (unlikely(!inth->intr))
+ return;
+
+ atomic_set(&inth->allowed, 0);
+}
+
+void
+nvkm_inth_allow(struct nvkm_inth *inth)
+{
+ struct nvkm_intr *intr = inth->intr;
+ unsigned long flags;
+
+ if (unlikely(!inth->intr))
+ return;
+
+ spin_lock_irqsave(&intr->subdev->device->intr.lock, flags);
+ if (!atomic_xchg(&inth->allowed, 1)) {
+ if ((intr->mask[inth->leaf] & inth->mask) != inth->mask)
+ nvkm_intr_allow_locked(intr, inth->leaf, inth->mask);
+ }
+ spin_unlock_irqrestore(&intr->subdev->device->intr.lock, flags);
+}
+
+int
+nvkm_inth_add(struct nvkm_intr *intr, enum nvkm_intr_type type, enum nvkm_intr_prio prio,
+ struct nvkm_subdev *subdev, nvkm_inth_func func, struct nvkm_inth *inth)
+{
+ struct nvkm_device *device = subdev->device;
+ int ret;
+
+ if (WARN_ON(inth->mask))
+ return -EBUSY;
+
+ ret = nvkm_intr_xlat(subdev, intr, type, &inth->leaf, &inth->mask);
+ if (ret)
+ return ret;
+
+ nvkm_debug(intr->subdev, "intr %d/%08x requested by %s\n",
+ inth->leaf, inth->mask, subdev->name);
+
+ inth->intr = intr;
+ inth->func = func;
+ atomic_set(&inth->allowed, 0);
+ list_add_tail(&inth->head, &device->intr.prio[prio]);
+ return 0;
+}