diff options
Diffstat (limited to 'drivers/gpu/drm/panthor/panthor_device.h')
-rw-r--r-- | drivers/gpu/drm/panthor/panthor_device.h | 89 |
1 files changed, 89 insertions, 0 deletions
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h index 465d3ab1b79e..4fc7cf2aeed5 100644 --- a/drivers/gpu/drm/panthor/panthor_device.h +++ b/drivers/gpu/drm/panthor/panthor_device.h @@ -230,6 +230,24 @@ struct panthor_file { /** @ptdev: Device attached to this file. */ struct panthor_device *ptdev; + /** @user_mmio: User MMIO related fields. */ + struct { + /** + * @offset: Offset used for user MMIO mappings. + * + * This offset should not be used to check the type of mapping + * except in panthor_mmap(). After that point, MMIO mapping + * offsets have been adjusted to match + * DRM_PANTHOR_USER_MMIO_OFFSET and that macro should be used + * instead. + * Make sure this rule is followed at all times, because + * userspace is in control of the offset, and can change the + * value behind our back. Otherwise it can lead to erroneous + * branching happening in kernel space. + */ + u64 offset; + } user_mmio; + /** @vms: VM pool attached to this file. */ struct panthor_vm_pool *vms; @@ -437,4 +455,75 @@ static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \ extern struct workqueue_struct *panthor_cleanup_wq; +static inline void gpu_write(struct panthor_device *ptdev, u32 reg, u32 data) +{ + writel(data, ptdev->iomem + reg); +} + +static inline u32 gpu_read(struct panthor_device *ptdev, u32 reg) +{ + return readl(ptdev->iomem + reg); +} + +static inline u32 gpu_read_relaxed(struct panthor_device *ptdev, u32 reg) +{ + return readl_relaxed(ptdev->iomem + reg); +} + +static inline void gpu_write64(struct panthor_device *ptdev, u32 reg, u64 data) +{ + gpu_write(ptdev, reg, lower_32_bits(data)); + gpu_write(ptdev, reg + 4, upper_32_bits(data)); +} + +static inline u64 gpu_read64(struct panthor_device *ptdev, u32 reg) +{ + return (gpu_read(ptdev, reg) | ((u64)gpu_read(ptdev, reg + 4) << 32)); +} + +static inline u64 gpu_read64_relaxed(struct panthor_device *ptdev, u32 reg) +{ + return (gpu_read_relaxed(ptdev, reg) | + ((u64)gpu_read_relaxed(ptdev, reg + 4) << 32)); +} + +static inline u64 gpu_read64_counter(struct panthor_device *ptdev, u32 reg) +{ + u32 lo, hi1, hi2; + do { + hi1 = gpu_read(ptdev, reg + 4); + lo = gpu_read(ptdev, reg); + hi2 = gpu_read(ptdev, reg + 4); + } while (hi1 != hi2); + return lo | ((u64)hi2 << 32); +} + +#define gpu_read_poll_timeout(dev, reg, val, cond, delay_us, timeout_us) \ + read_poll_timeout(gpu_read, val, cond, delay_us, timeout_us, false, \ + dev, reg) + +#define gpu_read_poll_timeout_atomic(dev, reg, val, cond, delay_us, \ + timeout_us) \ + read_poll_timeout_atomic(gpu_read, val, cond, delay_us, timeout_us, \ + false, dev, reg) + +#define gpu_read64_poll_timeout(dev, reg, val, cond, delay_us, timeout_us) \ + read_poll_timeout(gpu_read64, val, cond, delay_us, timeout_us, false, \ + dev, reg) + +#define gpu_read64_poll_timeout_atomic(dev, reg, val, cond, delay_us, \ + timeout_us) \ + read_poll_timeout_atomic(gpu_read64, val, cond, delay_us, timeout_us, \ + false, dev, reg) + +#define gpu_read_relaxed_poll_timeout_atomic(dev, reg, val, cond, delay_us, \ + timeout_us) \ + read_poll_timeout_atomic(gpu_read_relaxed, val, cond, delay_us, \ + timeout_us, false, dev, reg) + +#define gpu_read64_relaxed_poll_timeout(dev, reg, val, cond, delay_us, \ + timeout_us) \ + read_poll_timeout(gpu_read64_relaxed, val, cond, delay_us, timeout_us, \ + false, dev, reg) + #endif |