summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2020-11-27 17:41:21 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2021-01-12 14:26:30 +0100
commit96667f8a4382db9ed042332ca6ee165ae9b91307 (patch)
treebf161a6d1dedaeeddc224ddf63ad33a68eb5de20
parenteb83b8e3e6478845f8130622a2048ed4ec3b3be3 (diff)
mm: Close race in generic_access_phys
Way back it was a reasonable assumptions that iomem mappings never change the pfn range they point at. But this has changed: - gpu drivers dynamically manage their memory nowadays, invalidating ptes with unmap_mapping_range when buffers get moved - contiguous dma allocations have moved from dedicated carvetouts to cma regions. This means if we miss the unmap the pfn might contain pagecache or anon memory (well anything allocated with GFP_MOVEABLE) - even /dev/mem now invalidates mappings when the kernel requests that iomem region when CONFIG_IO_STRICT_DEVMEM is set, see 3234ac664a87 ("/dev/mem: Revoke mappings when a driver claims the region") Accessing pfns obtained from ptes without holding all the locks is therefore no longer a good idea. Fix this. Since ioremap might need to manipulate pagetables too we need to drop the pt lock and have a retry loop if we raced. While at it, also add kerneldoc and improve the comment for the vma_ops->access function. It's for accessing, not for moving the memory from iomem to system memory, as the old comment seemed to suggest. References: 28b2ee20c7cb ("access_process_vm device memory infrastructure") Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Kees Cook <keescook@chromium.org> Cc: Benjamin Herrensmidt <benh@kernel.crashing.org> Cc: Dave Airlie <airlied@linux.ie> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Jérôme Glisse <jglisse@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Dan Williams <dan.j.williams@intel.com> Cc: linux-mm@kvack.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-samsung-soc@vger.kernel.org Cc: linux-media@vger.kernel.org Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20201127164131.2244124-8-daniel.vetter@ffwll.ch
-rw-r--r--include/linux/mm.h3
-rw-r--r--mm/memory.c46
2 files changed, 45 insertions, 4 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 049439b65553..c9900aedc195 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -590,7 +590,8 @@ struct vm_operations_struct {
vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
- * for use by special VMAs that can switch between memory and hardware
+ * for use by special VMAs. See also generic_access_phys() for a generic
+ * implementation useful for any iomem mapping.
*/
int (*access)(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
diff --git a/mm/memory.c b/mm/memory.c
index feff48e1465a..8079c181efff 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4834,28 +4834,68 @@ out:
return ret;
}
+/**
+ * generic_access_phys - generic implementation for iomem mmap access
+ * @vma: the vma to access
+ * @addr: userspace addres, not relative offset within @vma
+ * @buf: buffer to read/write
+ * @len: length of transfer
+ * @write: set to FOLL_WRITE when writing, otherwise reading
+ *
+ * This is a generic implementation for &vm_operations_struct.access for an
+ * iomem mapping. This callback is used by access_process_vm() when the @vma is
+ * not page based.
+ */
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write)
{
resource_size_t phys_addr;
unsigned long prot = 0;
void __iomem *maddr;
- int offset = addr & (PAGE_SIZE-1);
+ pte_t *ptep, pte;
+ spinlock_t *ptl;
+ int offset = offset_in_page(addr);
+ int ret = -EINVAL;
+
+ if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+ return -EINVAL;
+
+retry:
+ if (follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl))
+ return -EINVAL;
+ pte = *ptep;
+ pte_unmap_unlock(ptep, ptl);
- if (follow_phys(vma, addr, write, &prot, &phys_addr))
+ prot = pgprot_val(pte_pgprot(pte));
+ phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
+
+ if ((write & FOLL_WRITE) && !pte_write(pte))
return -EINVAL;
maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
if (!maddr)
return -ENOMEM;
+ if (follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl))
+ goto out_unmap;
+
+ if (!pte_same(pte, *ptep)) {
+ pte_unmap_unlock(ptep, ptl);
+ iounmap(maddr);
+
+ goto retry;
+ }
+
if (write)
memcpy_toio(maddr + offset, buf, len);
else
memcpy_fromio(buf, maddr + offset, len);
+ ret = len;
+ pte_unmap_unlock(ptep, ptl);
+out_unmap:
iounmap(maddr);
- return len;
+ return ret;
}
EXPORT_SYMBOL_GPL(generic_access_phys);
#endif