diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 1029 |
1 files changed, 790 insertions, 239 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 536005bff24a..62d43b8cbe58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -27,7 +27,6 @@ #include <linux/pci.h> #include <linux/uaccess.h> #include <linux/pm_runtime.h> -#include <linux/poll.h> #include "amdgpu.h" #include "amdgpu_pm.h" @@ -36,87 +35,13 @@ #include "amdgpu_rap.h" #include "amdgpu_securedisplay.h" #include "amdgpu_fw_attestation.h" +#include "amdgpu_umr.h" -int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev) -{ -#if defined(CONFIG_DEBUG_FS) - unsigned long timeout = 600 * HZ; - int ret; - - wake_up_interruptible(&adev->autodump.gpu_hang); - - ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout); - if (ret == 0) { - pr_err("autodump: timeout, move on to gpu recovery\n"); - return -ETIMEDOUT; - } -#endif - return 0; -} +#include "amdgpu_reset.h" +#include "amdgpu_psp_ta.h" #if defined(CONFIG_DEBUG_FS) -static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file) -{ - struct amdgpu_device *adev = inode->i_private; - int ret; - - file->private_data = adev; - - ret = down_read_killable(&adev->reset_sem); - if (ret) - return ret; - - if (adev->autodump.dumping.done) { - reinit_completion(&adev->autodump.dumping); - ret = 0; - } else { - ret = -EBUSY; - } - - up_read(&adev->reset_sem); - - return ret; -} - -static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file) -{ - struct amdgpu_device *adev = file->private_data; - - complete_all(&adev->autodump.dumping); - return 0; -} - -static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table) -{ - struct amdgpu_device *adev = file->private_data; - - poll_wait(file, &adev->autodump.gpu_hang, poll_table); - - if (amdgpu_in_reset(adev)) - return POLLIN | POLLRDNORM | POLLWRNORM; - - return 0; -} - -static const struct file_operations autodump_debug_fops = { - .owner = THIS_MODULE, - .open = amdgpu_debugfs_autodump_open, - .poll = amdgpu_debugfs_autodump_poll, - .release = amdgpu_debugfs_autodump_release, -}; - -static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev) -{ - init_completion(&adev->autodump.dumping); - complete_all(&adev->autodump.dumping); - init_waitqueue_head(&adev->autodump.gpu_hang); - - debugfs_create_file("amdgpu_autodump", 0600, - adev_to_drm(adev)->primary->debugfs_root, - adev, &autodump_debug_fops); -} - /** * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes * @@ -131,14 +56,14 @@ static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev) * * Bit 62: Indicates a GRBM bank switch is needed * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is - * zero) + * zero) * Bits 24..33: The SE or ME selector if needed * Bits 34..43: The SH (or SA) or PIPE selector if needed * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed * * Bit 23: Indicates that the PM power gating lock should be held - * This is necessary to read registers that might be - * unreliable during a power gating transistion. + * This is necessary to read registers that might be + * unreliable during a power gating transistion. * * The lower bits are the BYTE offset of the register to read. This * allows reading multiple registers in a single call and having @@ -151,7 +76,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, ssize_t result = 0; int r; bool pm_pg_lock, use_bank, use_ring; - unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid; + unsigned int instance_bank, sh_bank, se_bank, me, pipe, queue, vmid; pm_pg_lock = use_bank = use_ring = false; instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0; @@ -204,17 +129,16 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; } mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se_bank, - sh_bank, instance_bank); + sh_bank, instance_bank, 0); } else if (use_ring) { mutex_lock(&adev->srbm_mutex); - amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid); + amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid, 0); } if (pm_pg_lock) @@ -229,7 +153,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, } else { r = get_user(value, (uint32_t *)buf); if (!r) - amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value); + amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0); } if (r) { result = r; @@ -244,17 +168,16 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, end: if (use_bank) { - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } else if (use_ring) { - amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); + amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } if (pm_pg_lock) mutex_unlock(&adev->pm.mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); @@ -279,6 +202,301 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); } +static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_regs2_data *rd; + + rd = kzalloc(sizeof(*rd), GFP_KERNEL); + if (!rd) + return -ENOMEM; + rd->adev = file_inode(file)->i_private; + file->private_data = rd; + mutex_init(&rd->lock); + + return 0; +} + +static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_regs2_data *rd = file->private_data; + + mutex_destroy(&rd->lock); + kfree(file->private_data); + return 0; +} + +static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en) +{ + struct amdgpu_debugfs_regs2_data *rd = f->private_data; + struct amdgpu_device *adev = rd->adev; + ssize_t result = 0; + int r; + uint32_t value; + + if (size & 0x3 || offset & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + mutex_lock(&rd->lock); + + if (rd->id.use_grbm) { + if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) || + (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); + mutex_unlock(&rd->lock); + return -EINVAL; + } + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se, + rd->id.grbm.sh, + rd->id.grbm.instance, rd->id.xcc_id); + } + + if (rd->id.use_srbm) { + mutex_lock(&adev->srbm_mutex); + amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe, + rd->id.srbm.queue, rd->id.srbm.vmid, rd->id.xcc_id); + } + + if (rd->id.pg_lock) + mutex_lock(&adev->pm.mutex); + + while (size) { + if (!write_en) { + value = RREG32(offset >> 2); + r = put_user(value, (uint32_t *)buf); + } else { + r = get_user(value, (uint32_t *)buf); + if (!r) + amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value, rd->id.xcc_id); + } + if (r) { + result = r; + goto end; + } + offset += 4; + size -= 4; + result += 4; + buf += 4; + } +end: + if (rd->id.use_grbm) { + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, rd->id.xcc_id); + mutex_unlock(&adev->grbm_idx_mutex); + } + + if (rd->id.use_srbm) { + amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, rd->id.xcc_id); + mutex_unlock(&adev->srbm_mutex); + } + + if (rd->id.pg_lock) + mutex_unlock(&adev->pm.mutex); + + mutex_unlock(&rd->lock); + + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + amdgpu_virt_disable_access_debugfs(adev); + return result; +} + +static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data) +{ + struct amdgpu_debugfs_regs2_data *rd = f->private_data; + struct amdgpu_debugfs_regs2_iocdata v1_data; + int r; + + mutex_lock(&rd->lock); + + switch (cmd) { + case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE_V2: + r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata_v2 *)data, + sizeof(rd->id)); + if (r) + r = -EINVAL; + goto done; + case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE: + r = copy_from_user(&v1_data, (struct amdgpu_debugfs_regs2_iocdata *)data, + sizeof(v1_data)); + if (r) { + r = -EINVAL; + goto done; + } + goto v1_copy; + default: + r = -EINVAL; + goto done; + } + +v1_copy: + rd->id.use_srbm = v1_data.use_srbm; + rd->id.use_grbm = v1_data.use_grbm; + rd->id.pg_lock = v1_data.pg_lock; + rd->id.grbm.se = v1_data.grbm.se; + rd->id.grbm.sh = v1_data.grbm.sh; + rd->id.grbm.instance = v1_data.grbm.instance; + rd->id.srbm.me = v1_data.srbm.me; + rd->id.srbm.pipe = v1_data.srbm.pipe; + rd->id.srbm.queue = v1_data.srbm.queue; + rd->id.xcc_id = 0; +done: + mutex_unlock(&rd->lock); + return r; +} + +static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos) +{ + return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0); +} + +static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) +{ + return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1); +} + +static int amdgpu_debugfs_gprwave_open(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_gprwave_data *rd; + + rd = kzalloc(sizeof(*rd), GFP_KERNEL); + if (!rd) + return -ENOMEM; + rd->adev = file_inode(file)->i_private; + file->private_data = rd; + mutex_init(&rd->lock); + + return 0; +} + +static int amdgpu_debugfs_gprwave_release(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_gprwave_data *rd = file->private_data; + + mutex_destroy(&rd->lock); + kfree(file->private_data); + return 0; +} + +static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, size_t size, loff_t *pos) +{ + struct amdgpu_debugfs_gprwave_data *rd = f->private_data; + struct amdgpu_device *adev = rd->adev; + ssize_t result = 0; + int r; + uint32_t *data, x; + + if (size > 4096 || size & 0x3 || *pos & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + data = kcalloc(1024, sizeof(*data), GFP_KERNEL); + if (!data) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); + return -ENOMEM; + } + + /* switch to the specific se/sh/cu */ + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, rd->id.se, rd->id.sh, rd->id.cu, rd->id.xcc_id); + + if (!rd->id.gpr_or_wave) { + x = 0; + if (adev->gfx.funcs->read_wave_data) + adev->gfx.funcs->read_wave_data(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, data, &x); + } else { + x = size >> 2; + if (rd->id.gpr.vpgr_or_sgpr) { + if (adev->gfx.funcs->read_wave_vgprs) + adev->gfx.funcs->read_wave_vgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, rd->id.gpr.thread, *pos, size>>2, data); + } else { + if (adev->gfx.funcs->read_wave_sgprs) + adev->gfx.funcs->read_wave_sgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, *pos, size>>2, data); + } + } + + amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id); + mutex_unlock(&adev->grbm_idx_mutex); + + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + if (!x) { + result = -EINVAL; + goto done; + } + + while (size && (*pos < x * 4)) { + uint32_t value; + + value = data[*pos >> 2]; + r = put_user(value, (uint32_t *)buf); + if (r) { + result = r; + goto done; + } + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + +done: + amdgpu_virt_disable_access_debugfs(adev); + kfree(data); + return result; +} + +static long amdgpu_debugfs_gprwave_ioctl(struct file *f, unsigned int cmd, unsigned long data) +{ + struct amdgpu_debugfs_gprwave_data *rd = f->private_data; + int r = 0; + + mutex_lock(&rd->lock); + + switch (cmd) { + case AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE: + if (copy_from_user(&rd->id, + (struct amdgpu_debugfs_gprwave_iocdata *)data, + sizeof(rd->id))) + r = -EFAULT; + goto done; + default: + r = -EINVAL; + goto done; + } + +done: + mutex_unlock(&rd->lock); + return r; +} + + + /** * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register @@ -317,14 +535,14 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, while (size) { uint32_t value; - value = RREG32_PCIE(*pos); + if (upper_32_bits(*pos)) + value = RREG32_PCIE_EXT(*pos); + else + value = RREG32_PCIE(*pos); + r = put_user(value, (uint32_t *)buf); - if (r) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - amdgpu_virt_disable_access_debugfs(adev); - return r; - } + if (r) + goto out; result += 4; buf += 4; @@ -332,11 +550,11 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, size -= 4; } - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + r = result; +out: pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - amdgpu_virt_disable_access_debugfs(adev); - return result; + return r; } /** @@ -377,14 +595,13 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user uint32_t value; r = get_user(value, (uint32_t *)buf); - if (r) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - amdgpu_virt_disable_access_debugfs(adev); - return r; - } + if (r) + goto out; - WREG32_PCIE(*pos, value); + if (upper_32_bits(*pos)) + WREG32_PCIE_EXT(*pos, value); + else + WREG32_PCIE(*pos, value); result += 4; buf += 4; @@ -392,11 +609,11 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user size -= 4; } - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + r = result; +out: pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - amdgpu_virt_disable_access_debugfs(adev); - return result; + return r; } /** @@ -421,6 +638,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; + if (!adev->didt_rreg) + return -EOPNOTSUPP; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -438,12 +658,8 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, value = RREG32_DIDT(*pos >> 2); r = put_user(value, (uint32_t *)buf); - if (r) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - amdgpu_virt_disable_access_debugfs(adev); - return r; - } + if (r) + goto out; result += 4; buf += 4; @@ -451,11 +667,11 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, size -= 4; } - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + r = result; +out: pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - amdgpu_virt_disable_access_debugfs(adev); - return result; + return r; } /** @@ -480,6 +696,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user if (size & 0x3 || *pos & 0x3) return -EINVAL; + if (!adev->didt_wreg) + return -EOPNOTSUPP; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -496,12 +715,8 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user uint32_t value; r = get_user(value, (uint32_t *)buf); - if (r) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - amdgpu_virt_disable_access_debugfs(adev); - return r; - } + if (r) + goto out; WREG32_DIDT(*pos >> 2, value); @@ -511,11 +726,11 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user size -= 4; } - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + r = result; +out: pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - amdgpu_virt_disable_access_debugfs(adev); - return result; + return r; } /** @@ -537,6 +752,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, ssize_t result = 0; int r; + if (!adev->smc_rreg) + return -EOPNOTSUPP; + if (size & 0x3 || *pos & 0x3) return -EINVAL; @@ -557,12 +775,8 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, value = RREG32_SMC(*pos); r = put_user(value, (uint32_t *)buf); - if (r) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - amdgpu_virt_disable_access_debugfs(adev); - return r; - } + if (r) + goto out; result += 4; buf += 4; @@ -570,11 +784,11 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, size -= 4; } - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + r = result; +out: pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - amdgpu_virt_disable_access_debugfs(adev); - return result; + return r; } /** @@ -596,6 +810,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * ssize_t result = 0; int r; + if (!adev->smc_wreg) + return -EOPNOTSUPP; + if (size & 0x3 || *pos & 0x3) return -EINVAL; @@ -615,12 +832,8 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * uint32_t value; r = get_user(value, (uint32_t *)buf); - if (r) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - amdgpu_virt_disable_access_debugfs(adev); - return r; - } + if (r) + goto out; WREG32_SMC(*pos, value); @@ -630,11 +843,11 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * size -= 4; } - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + r = result; +out: pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - amdgpu_virt_disable_access_debugfs(adev); - return result; + return r; } /** @@ -667,7 +880,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, return -ENOMEM; /* version, increment each time something is added */ - config[no_regs++] = 3; + config[no_regs++] = 5; config[no_regs++] = adev->gfx.config.max_shader_engines; config[no_regs++] = adev->gfx.config.max_tile_pipes; config[no_regs++] = adev->gfx.config.max_cu_per_sh; @@ -695,7 +908,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, /* rev==1 */ config[no_regs++] = adev->rev_id; config[no_regs++] = adev->pg_flags; - config[no_regs++] = adev->cg_flags; + config[no_regs++] = lower_32_bits(adev->cg_flags); /* rev==2 */ config[no_regs++] = adev->family; @@ -707,6 +920,13 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, config[no_regs++] = adev->pdev->subsystem_device; config[no_regs++] = adev->pdev->subsystem_vendor; + /* rev==4 APU flag */ + config[no_regs++] = adev->flags & AMD_IS_APU ? 1 : 0; + + /* rev==5 PG/CG flag upper 32bit */ + config[no_regs++] = 0; + config[no_regs++] = upper_32_bits(adev->cg_flags); + while (size && (*pos < no_regs * 4)) { uint32_t value; @@ -772,7 +992,6 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (r) { @@ -810,7 +1029,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, * The offset being sought changes which wave that the status data * will be returned for. The bits are used as follows: * - * Bits 0..6: Byte offset into data + * Bits 0..6: Byte offset into data * Bits 7..14: SE selector * Bits 15..22: SH/SA selector * Bits 23..30: CU/{WGP+SIMD} selector @@ -854,16 +1073,15 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); - amdgpu_gfx_select_se_sh(adev, se, sh, cu); + amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0); x = 0; if (adev->gfx.funcs->read_wave_data) - adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); + adev->gfx.funcs->read_wave_data(adev, 0, simd, wave, data, &x); - amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); + amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (!x) { @@ -948,20 +1166,19 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); - amdgpu_gfx_select_se_sh(adev, se, sh, cu); + amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0); if (bank == 0) { if (adev->gfx.funcs->read_wave_vgprs) - adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data); + adev->gfx.funcs->read_wave_vgprs(adev, 0, simd, wave, thread, offset, size>>2, data); } else { if (adev->gfx.funcs->read_wave_sgprs) - adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); + adev->gfx.funcs->read_wave_sgprs(adev, 0, simd, wave, offset, size>>2, data); } - amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); + amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); while (size) { @@ -990,6 +1207,154 @@ err: } /** + * amdgpu_debugfs_gfxoff_residency_read - Read GFXOFF residency + * + * @f: open file handle + * @buf: User buffer to store read data in + * @size: Number of bytes to read + * @pos: Offset to seek to + * + * Read the last residency value logged. It doesn't auto update, one needs to + * stop logging before getting the current value. + */ +static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + while (size) { + uint32_t value; + + r = amdgpu_get_gfx_off_residency(adev, &value); + if (r) + goto out; + + r = put_user(value, (uint32_t *)buf); + if (r) + goto out; + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + r = result; +out: + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + return r; +} + +/** + * amdgpu_debugfs_gfxoff_residency_write - Log GFXOFF Residency + * + * @f: open file handle + * @buf: User buffer to write data from + * @size: Number of bytes to write + * @pos: Offset to seek to + * + * Write a 32-bit non-zero to start logging; write a 32-bit zero to stop + */ +static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + while (size) { + u32 value; + + r = get_user(value, (uint32_t *)buf); + if (r) + goto out; + + amdgpu_set_gfx_off_residency(adev, value ? true : false); + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + r = result; +out: + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + return r; +} + + +/** + * amdgpu_debugfs_gfxoff_count_read - Read GFXOFF entry count + * + * @f: open file handle + * @buf: User buffer to store read data in + * @size: Number of bytes to read + * @pos: Offset to seek to + */ +static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + while (size) { + u64 value = 0; + + r = amdgpu_get_gfx_off_entrycount(adev, &value); + if (r) + goto out; + + r = put_user(value, (u64 *)buf); + if (r) + goto out; + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + r = result; +out: + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + return r; +} + +/** * amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF * * @f: open file handle @@ -1019,11 +1384,8 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu uint32_t value; r = get_user(value, (uint32_t *)buf); - if (r) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + if (r) + goto out; amdgpu_gfx_off_ctrl(adev, value ? true : false); @@ -1033,10 +1395,11 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu size -= 4; } - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + r = result; +out: pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return result; + return r; } @@ -1059,25 +1422,57 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, return -EINVAL; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; + } while (size) { - uint32_t value; + u32 value = adev->gfx.gfx_off_state; + + r = put_user(value, (u32 *)buf); + if (r) + goto out; + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + r = result; +out: + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + return r; +} + +static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + while (size) { + u32 value; r = amdgpu_get_gfx_off_status(adev, &value); - if (r) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + if (r) + goto out; - r = put_user(value, (uint32_t *)buf); - if (r) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + r = put_user(value, (u32 *)buf); + if (r) + goto out; result += 4; buf += 4; @@ -1085,12 +1480,32 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, size -= 4; } - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + r = result; +out: pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return result; + return r; } +static const struct file_operations amdgpu_debugfs_regs2_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl, + .read = amdgpu_debugfs_regs2_read, + .write = amdgpu_debugfs_regs2_write, + .open = amdgpu_debugfs_regs2_open, + .release = amdgpu_debugfs_regs2_release, + .llseek = default_llseek +}; + +static const struct file_operations amdgpu_debugfs_gprwave_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = amdgpu_debugfs_gprwave_ioctl, + .read = amdgpu_debugfs_gprwave_read, + .open = amdgpu_debugfs_gprwave_open, + .release = amdgpu_debugfs_gprwave_release, + .llseek = default_llseek +}; + static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_regs_read, @@ -1146,8 +1561,29 @@ static const struct file_operations amdgpu_debugfs_gfxoff_fops = { .llseek = default_llseek }; +static const struct file_operations amdgpu_debugfs_gfxoff_status_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_gfxoff_status_read, + .llseek = default_llseek +}; + +static const struct file_operations amdgpu_debugfs_gfxoff_count_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_gfxoff_count_read, + .llseek = default_llseek +}; + +static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_gfxoff_residency_read, + .write = amdgpu_debugfs_gfxoff_residency_write, + .llseek = default_llseek +}; + static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_regs_fops, + &amdgpu_debugfs_regs2_fops, + &amdgpu_debugfs_gprwave_fops, &amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_smc_fops, @@ -1156,10 +1592,15 @@ static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_wave_fops, &amdgpu_debugfs_gpr_fops, &amdgpu_debugfs_gfxoff_fops, + &amdgpu_debugfs_gfxoff_status_fops, + &amdgpu_debugfs_gfxoff_count_fops, + &amdgpu_debugfs_gfxoff_residency_fops, }; -static const char *debugfs_regs_names[] = { +static const char * const debugfs_regs_names[] = { "amdgpu_regs", + "amdgpu_regs2", + "amdgpu_gprwave", "amdgpu_regs_didt", "amdgpu_regs_pcie", "amdgpu_regs_smc", @@ -1168,11 +1609,14 @@ static const char *debugfs_regs_names[] = { "amdgpu_wave", "amdgpu_gpr", "amdgpu_gfxoff", + "amdgpu_gfxoff_status", + "amdgpu_gfxoff_count", + "amdgpu_gfxoff_residency", }; /** * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide - * register access. + * register access. * * @adev: The device to attach the debugfs entries to */ @@ -1184,7 +1628,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { ent = debugfs_create_file(debugfs_regs_names[i], - S_IFREG | S_IRUGO, root, + S_IFREG | 0400, root, adev, debugfs_regs[i]); if (!i && !IS_ERR_OR_NULL(ent)) i_size_write(ent->d_inode, adev->rmmio_size); @@ -1195,7 +1639,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) { - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct amdgpu_device *adev = m->private; struct drm_device *dev = adev_to_drm(adev); int r = 0, i; @@ -1206,7 +1650,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) } /* Avoid accidently unparking the sched thread during GPU reset */ - r = down_read_killable(&adev->reset_sem); + r = down_write_killable(&adev->reset_domain->sem); if (r) return r; @@ -1214,30 +1658,29 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; - kthread_park(ring->sched.thread); + drm_sched_wqueue_stop(&ring->sched); } - seq_printf(m, "run ib test:\n"); + seq_puts(m, "run ib test:\n"); r = amdgpu_ib_ring_tests(adev); if (r) seq_printf(m, "ib ring tests failed (%d).\n", r); else - seq_printf(m, "ib ring tests passed.\n"); + seq_puts(m, "ib ring tests passed.\n"); /* go on the scheduler */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; - kthread_unpark(ring->sched.thread); + drm_sched_wqueue_start(&ring->sched); } - up_read(&adev->reset_sem); + up_write(&adev->reset_domain->sem); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; @@ -1255,9 +1698,8 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val) return r; } - *val = amdgpu_bo_evict_vram(adev); + *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; @@ -1268,28 +1710,43 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val) { struct amdgpu_device *adev = (struct amdgpu_device *)data; struct drm_device *dev = adev_to_drm(adev); - struct ttm_resource_manager *man; int r; r = pm_runtime_get_sync(dev->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(dev->dev); return r; } - man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); - *val = ttm_resource_manager_evict_all(&adev->mman.bdev, man); + *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; } +static int amdgpu_debugfs_benchmark(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + struct drm_device *dev = adev_to_drm(adev); + int r; + + r = pm_runtime_get_sync(dev->dev); + if (r < 0) { + pm_runtime_put_autosuspend(dev->dev); + return r; + } + + r = amdgpu_benchmark(adev, val); + + pm_runtime_put_autosuspend(dev->dev); + + return r; +} static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused) { - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct amdgpu_device *adev = m->private; struct drm_device *dev = adev_to_drm(adev); struct drm_file *file; int r; @@ -1301,9 +1758,14 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused) list_for_each_entry(file, &dev->filelist, lhead) { struct amdgpu_fpriv *fpriv = file->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; + struct amdgpu_task_info *ti; + + ti = amdgpu_vm_get_task_info_vm(vm); + if (ti) { + seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name); + amdgpu_vm_put_task_info(ti); + } - seq_printf(m, "pid:%d\tProcess:%s ----------\n", - vm->task_info.pid, vm->task_info.process_name); r = amdgpu_bo_reserve(vm->root.bo, true); if (r) break; @@ -1322,6 +1784,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram, NULL, "%lld\n"); DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt, NULL, "%lld\n"); +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_benchmark_fops, NULL, amdgpu_debugfs_benchmark, + "%lld\n"); static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring, struct dma_fence **fences) @@ -1414,7 +1878,7 @@ no_preempt: continue; } job = to_amdgpu_job(s_job); - if (preempted && job->fence == fence) + if (preempted && (&job->hw_fence->base) == fence) /* mark the job as preempted */ job->preemption_status |= AMDGPU_IB_PREEMPTED; } @@ -1423,7 +1887,7 @@ no_preempt: static int amdgpu_debugfs_ib_preempt(void *data, u64 val) { - int r, resched, length; + int r, length; struct amdgpu_ring *ring; struct dma_fence **fences = NULL; struct amdgpu_device *adev = (struct amdgpu_device *)data; @@ -1433,7 +1897,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) ring = adev->rings[val]; - if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring) || + !ring->funcs->preempt_ib) return -EINVAL; /* the last preemption failed */ @@ -1446,14 +1911,12 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) return -ENOMEM; /* Avoid accidently unparking the sched thread during GPU reset */ - r = down_read_killable(&adev->reset_sem); + r = down_read_killable(&adev->reset_domain->sem); if (r) goto pro_end; /* stop the scheduler */ - kthread_park(ring->sched.thread); - - resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); + drm_sched_wqueue_stop(&ring->sched); /* preempt the IB */ r = amdgpu_ring_preempt_ib(ring); @@ -1487,11 +1950,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) failure: /* restart the scheduler */ - kthread_unpark(ring->sched.thread); + drm_sched_wqueue_start(&ring->sched); - up_read(&adev->reset_sem); - - ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); + up_read(&adev->reset_domain->sem); pro_end: kfree(fences); @@ -1505,7 +1966,7 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val) uint32_t max_freq, min_freq; struct amdgpu_device *adev = (struct amdgpu_device *)data; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + if (amdgpu_sriov_multi_vf_mode(adev)) return -EINVAL; ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); @@ -1514,22 +1975,24 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val) return ret; } - if (is_support_sw_smu(adev)) { - ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq); - if (ret || val > max_freq || val < min_freq) - return -EINVAL; - ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val); - } else { - return 0; + ret = amdgpu_dpm_get_dpm_freq_range(adev, PP_SCLK, &min_freq, &max_freq); + if (ret == -EOPNOTSUPP) { + ret = 0; + goto out; + } + if (ret || val > max_freq || val < min_freq) { + ret = -EINVAL; + goto out; } - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - + ret = amdgpu_dpm_set_soft_freq_range(adev, PP_SCLK, (uint32_t)val, (uint32_t)val); if (ret) - return -EINVAL; + ret = -EINVAL; - return 0; +out: + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + return ret; } DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL, @@ -1544,20 +2007,24 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) struct dentry *ent; int r, i; + if (!debugfs_initialized()) + return 0; + debugfs_create_x32("amdgpu_smu_debug", 0600, root, + &adev->pm.smu_debug_mask); ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, &fops_ib_preempt); - if (!ent) { + if (IS_ERR(ent)) { DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); - return -EIO; + return PTR_ERR(ent); } ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, &fops_sclk_set); - if (!ent) { + if (IS_ERR(ent)) { DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); - return -EIO; + return PTR_ERR(ent); } /* Register debugfs entries for amdgpu_ttm */ @@ -1572,9 +2039,12 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) DRM_ERROR("registering register debugfs failed (%d).\n", r); amdgpu_debugfs_firmware_init(adev); + amdgpu_ta_if_debugfs_init(adev); + + amdgpu_debugfs_mes_event_log_init(adev); #if defined(CONFIG_DRM_AMD_DC) - if (amdgpu_device_has_dc_support(adev)) + if (adev->dc_enabled) dtn_debugfs_init(adev); #endif @@ -1584,34 +2054,112 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) if (!ring) continue; - if (amdgpu_debugfs_ring_init(adev, ring)) { - DRM_ERROR("Failed to register debugfs file for rings !\n"); - } + amdgpu_debugfs_ring_init(adev, ring); } + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (!amdgpu_vcnfw_log) + break; + + if (adev->vcn.harvest_config & (1 << i)) + continue; + + amdgpu_debugfs_vcn_fwlog_init(adev, i, &adev->vcn.inst[i]); + } + + if (amdgpu_umsch_mm & amdgpu_umsch_mm_fwlog) + amdgpu_debugfs_umsch_fwlog_init(adev, &adev->umsch_mm); + + amdgpu_debugfs_vcn_sched_mask_init(adev); + amdgpu_debugfs_jpeg_sched_mask_init(adev); + amdgpu_debugfs_gfx_sched_mask_init(adev); + amdgpu_debugfs_compute_sched_mask_init(adev); + amdgpu_debugfs_sdma_sched_mask_init(adev); + amdgpu_ras_debugfs_create_all(adev); - amdgpu_debugfs_autodump_init(adev); amdgpu_rap_debugfs_init(adev); amdgpu_securedisplay_debugfs_init(adev); amdgpu_fw_attestation_debugfs_init(adev); + amdgpu_psp_debugfs_init(adev); - debugfs_create_file("amdgpu_evict_vram", 0444, root, adev, + debugfs_create_file("amdgpu_evict_vram", 0400, root, adev, &amdgpu_evict_vram_fops); - debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev, + debugfs_create_file("amdgpu_evict_gtt", 0400, root, adev, &amdgpu_evict_gtt_fops); - debugfs_create_file("amdgpu_test_ib", 0444, root, adev, + debugfs_create_file("amdgpu_test_ib", 0400, root, adev, &amdgpu_debugfs_test_ib_fops); debugfs_create_file("amdgpu_vm_info", 0444, root, adev, &amdgpu_debugfs_vm_info_fops); + debugfs_create_file("amdgpu_benchmark", 0200, root, adev, + &amdgpu_benchmark_fops); adev->debugfs_vbios_blob.data = adev->bios; adev->debugfs_vbios_blob.size = adev->bios_size; debugfs_create_blob("amdgpu_vbios", 0444, root, &adev->debugfs_vbios_blob); + if (adev->discovery.debugfs_blob.size) + debugfs_create_blob("amdgpu_discovery", 0444, root, + &adev->discovery.debugfs_blob); + return 0; } +static int amdgpu_pt_info_read(struct seq_file *m, void *unused) +{ + struct drm_file *file; + struct amdgpu_fpriv *fpriv; + struct amdgpu_bo *root_bo; + struct amdgpu_device *adev; + int r; + + file = m->private; + if (!file) + return -EINVAL; + + adev = drm_to_adev(file->minor->dev); + fpriv = file->driver_priv; + if (!fpriv || !fpriv->vm.root.bo) + return -ENODEV; + + root_bo = amdgpu_bo_ref(fpriv->vm.root.bo); + r = amdgpu_bo_reserve(root_bo, true); + if (r) { + amdgpu_bo_unref(&root_bo); + return -EINVAL; + } + + seq_printf(m, "pd_address: 0x%llx\n", amdgpu_gmc_pd_addr(fpriv->vm.root.bo)); + seq_printf(m, "max_pfn: 0x%llx\n", adev->vm_manager.max_pfn); + seq_printf(m, "num_level: 0x%x\n", adev->vm_manager.num_level); + seq_printf(m, "block_size: 0x%x\n", adev->vm_manager.block_size); + seq_printf(m, "fragment_size: 0x%x\n", adev->vm_manager.fragment_size); + + amdgpu_bo_unreserve(root_bo); + amdgpu_bo_unref(&root_bo); + + return 0; +} + +static int amdgpu_pt_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, amdgpu_pt_info_read, inode->i_private); +} + +static const struct file_operations amdgpu_pt_info_fops = { + .owner = THIS_MODULE, + .open = amdgpu_pt_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void amdgpu_debugfs_vm_init(struct drm_file *file) +{ + debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file, + &amdgpu_pt_info_fops); +} + #else int amdgpu_debugfs_init(struct amdgpu_device *adev) { @@ -1621,4 +2169,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { return 0; } +void amdgpu_debugfs_vm_init(struct drm_file *file) +{ +} #endif |
