diff options
Diffstat (limited to 'drivers/dax/device.c')
| -rw-r--r-- | drivers/dax/device.c | 105 |
1 files changed, 54 insertions, 51 deletions
diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 5494d745ced5..22999a402e02 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -4,7 +4,6 @@ #include <linux/pagemap.h> #include <linux/module.h> #include <linux/device.h> -#include <linux/pfn_t.h> #include <linux/cdev.h> #include <linux/slab.h> #include <linux/dax.h> @@ -14,8 +13,9 @@ #include "dax-private.h" #include "bus.h" -static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, - const char *func) +static int __check_vma(struct dev_dax *dev_dax, vm_flags_t vm_flags, + unsigned long start, unsigned long end, struct file *file, + const char *func) { struct device *dev = &dev_dax->dev; unsigned long mask; @@ -24,7 +24,7 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, return -ENXIO; /* prevent private mappings from being established */ - if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { + if ((vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { dev_info_ratelimited(dev, "%s: %s: fail, attempted private mapping\n", current->comm, func); @@ -32,15 +32,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, } mask = dev_dax->align - 1; - if (vma->vm_start & mask || vma->vm_end & mask) { + if (start & mask || end & mask) { dev_info_ratelimited(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", - current->comm, func, vma->vm_start, vma->vm_end, + current->comm, func, start, end, mask); return -EINVAL; } - if (!vma_is_dax(vma)) { + if (!file_is_dax(file)) { dev_info_ratelimited(dev, "%s: %s: fail, vma is not DAX capable\n", current->comm, func); @@ -50,6 +50,13 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, return 0; } +static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, + const char *func) +{ + return __check_vma(dev_dax, vma->vm_flags, vma->vm_start, vma->vm_end, + vma->vm_file, func); +} + /* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size) @@ -73,7 +80,7 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, return -1; } -static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn, +static void dax_set_mapping(struct vm_fault *vmf, unsigned long pfn, unsigned long fault_size) { unsigned long i, nr_pages = fault_size / PAGE_SIZE; @@ -86,17 +93,16 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn, nr_pages = 1; pgoff = linear_page_index(vmf->vma, - ALIGN(vmf->address, fault_size)); + ALIGN_DOWN(vmf->address, fault_size)); for (i = 0; i < nr_pages; i++) { - struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i); + struct folio *folio = pfn_folio(pfn + i); - page = compound_head(page); - if (page->mapping) + if (folio->mapping) continue; - page->mapping = filp->f_mapping; - page->index = pgoff + i; + folio->mapping = filp->f_mapping; + folio->index = pgoff + i; } } @@ -105,7 +111,7 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, { struct device *dev = &dev_dax->dev; phys_addr_t phys; - pfn_t pfn; + unsigned long pfn; unsigned int fault_size = PAGE_SIZE; if (check_vma(dev_dax, vmf->vma, __func__)) @@ -126,11 +132,12 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = PHYS_PFN(phys); dax_set_mapping(vmf, pfn, fault_size); - return vmf_insert_mixed(vmf->vma, vmf->address, pfn); + return vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), + vmf->flags & FAULT_FLAG_WRITE); } static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, @@ -140,7 +147,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct device *dev = &dev_dax->dev; phys_addr_t phys; pgoff_t pgoff; - pfn_t pfn; + unsigned long pfn; unsigned int fault_size = PMD_SIZE; if (check_vma(dev_dax, vmf->vma, __func__)) @@ -169,11 +176,12 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = PHYS_PFN(phys); dax_set_mapping(vmf, pfn, fault_size); - return vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE); + return vmf_insert_folio_pmd(vmf, page_folio(pfn_to_page(pfn)), + vmf->flags & FAULT_FLAG_WRITE); } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD @@ -184,7 +192,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, struct device *dev = &dev_dax->dev; phys_addr_t phys; pgoff_t pgoff; - pfn_t pfn; + unsigned long pfn; unsigned int fault_size = PUD_SIZE; @@ -214,11 +222,12 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = PHYS_PFN(phys); dax_set_mapping(vmf, pfn, fault_size); - return vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE); + return vmf_insert_folio_pud(vmf, page_folio(pfn_to_page(pfn)), + vmf->flags & FAULT_FLAG_WRITE); } #else static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, @@ -228,32 +237,26 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, } #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ -static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, - enum page_entry_size pe_size) +static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, unsigned int order) { struct file *filp = vmf->vma->vm_file; vm_fault_t rc = VM_FAULT_SIGBUS; int id; struct dev_dax *dev_dax = filp->private_data; - dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm, - (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read", - vmf->vma->vm_start, vmf->vma->vm_end, pe_size); + dev_dbg(&dev_dax->dev, "%s: op=%s addr=%#lx order=%d\n", current->comm, + (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read", + vmf->address & ~((1UL << (order + PAGE_SHIFT)) - 1), order); id = dax_read_lock(); - switch (pe_size) { - case PE_SIZE_PTE: + if (order == 0) rc = __dev_dax_pte_fault(dev_dax, vmf); - break; - case PE_SIZE_PMD: + else if (order == PMD_ORDER) rc = __dev_dax_pmd_fault(dev_dax, vmf); - break; - case PE_SIZE_PUD: + else if (order == PUD_ORDER) rc = __dev_dax_pud_fault(dev_dax, vmf); - break; - default: + else rc = VM_FAULT_SIGBUS; - } dax_read_unlock(id); @@ -262,7 +265,7 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, static vm_fault_t dev_dax_fault(struct vm_fault *vmf) { - return dev_dax_huge_fault(vmf, PE_SIZE_PTE); + return dev_dax_huge_fault(vmf, 0); } static int dev_dax_may_split(struct vm_area_struct *vma, unsigned long addr) @@ -290,8 +293,9 @@ static const struct vm_operations_struct dax_vm_ops = { .pagesize = dev_dax_pagesize, }; -static int dax_mmap(struct file *filp, struct vm_area_struct *vma) +static int dax_mmap_prepare(struct vm_area_desc *desc) { + struct file *filp = desc->file; struct dev_dax *dev_dax = filp->private_data; int rc, id; @@ -302,13 +306,14 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma) * fault time. */ id = dax_read_lock(); - rc = check_vma(dev_dax, vma, __func__); + rc = __check_vma(dev_dax, desc->vm_flags, desc->start, desc->end, filp, + __func__); dax_read_unlock(id); if (rc) return rc; - vma->vm_ops = &dax_vm_ops; - vma->vm_flags |= VM_HUGEPAGE; + desc->vm_ops = &dax_vm_ops; + desc->vm_flags |= VM_HUGEPAGE; return 0; } @@ -335,14 +340,13 @@ static unsigned long dax_get_unmapped_area(struct file *filp, if ((off + len_align) < off) goto out; - addr_align = current->mm->get_unmapped_area(filp, addr, len_align, - pgoff, flags); + addr_align = mm_get_unmapped_area(filp, addr, len_align, pgoff, flags); if (!IS_ERR_VALUE(addr_align)) { addr_align += (off - addr_align) & (align - 1); return addr_align; } out: - return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); + return mm_get_unmapped_area(filp, addr, len, pgoff, flags); } static const struct address_space_operations dev_dax_aops = { @@ -382,8 +386,8 @@ static const struct file_operations dax_fops = { .open = dax_open, .release = dax_release, .get_unmapped_area = dax_get_unmapped_area, - .mmap = dax_mmap, - .mmap_supported_flags = MAP_SYNC, + .mmap_prepare = dax_mmap_prepare, + .fop_flags = FOP_MMAP_SYNC, }; static void dev_dax_cdev_del(void *cdev) @@ -396,7 +400,7 @@ static void dev_dax_kill(void *dev_dax) kill_dev_dax(dev_dax); } -int dev_dax_probe(struct dev_dax *dev_dax) +static int dev_dax_probe(struct dev_dax *dev_dax) { struct dax_device *dax_dev = dev_dax->dax_dev; struct device *dev = &dev_dax->dev; @@ -471,12 +475,10 @@ int dev_dax_probe(struct dev_dax *dev_dax) run_dax(dax_dev); return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax); } -EXPORT_SYMBOL_GPL(dev_dax_probe); static struct dax_device_driver device_dax_driver = { .probe = dev_dax_probe, - /* all probe actions are unwound by devm, so .remove isn't necessary */ - .match_always = 1, + .type = DAXDRV_DEVICE_TYPE, }; static int __init dax_init(void) @@ -490,6 +492,7 @@ static void __exit dax_exit(void) } MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Device DAX: direct access device driver"); MODULE_LICENSE("GPL v2"); module_init(dax_init); module_exit(dax_exit); |
