diff options
Diffstat (limited to 'mm/hmm.c')
| -rw-r--r-- | mm/hmm.c | 1655 |
1 files changed, 655 insertions, 1000 deletions
@@ -1,24 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2013 Red Hat Inc. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Authors: Jérôme Glisse <jglisse@redhat.com> */ /* * Refer to include/linux/hmm.h for information about heterogeneous memory * management or HMM for short. */ -#include <linux/mm.h> +#include <linux/pagewalk.h> #include <linux/hmm.h> +#include <linux/hmm-dma.h> #include <linux/init.h> #include <linux/rmap.h> #include <linux/swap.h> @@ -26,559 +18,381 @@ #include <linux/sched.h> #include <linux/mmzone.h> #include <linux/pagemap.h> -#include <linux/swapops.h> +#include <linux/leafops.h> #include <linux/hugetlb.h> #include <linux/memremap.h> +#include <linux/sched/mm.h> #include <linux/jump_label.h> +#include <linux/dma-mapping.h> +#include <linux/pci-p2pdma.h> #include <linux/mmu_notifier.h> #include <linux/memory_hotplug.h> -#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT) - -#if IS_ENABLED(CONFIG_HMM_MIRROR) -static const struct mmu_notifier_ops hmm_mmu_notifier_ops; - -/* - * struct hmm - HMM per mm struct - * - * @mm: mm struct this HMM struct is bound to - * @lock: lock protecting ranges list - * @ranges: list of range being snapshotted - * @mirrors: list of mirrors for this mm - * @mmu_notifier: mmu notifier to track updates to CPU page table - * @mirrors_sem: read/write semaphore protecting the mirrors list - */ -struct hmm { - struct mm_struct *mm; - spinlock_t lock; - struct list_head ranges; - struct list_head mirrors; - struct mmu_notifier mmu_notifier; - struct rw_semaphore mirrors_sem; -}; - -/* - * hmm_register - register HMM against an mm (HMM internal) - * - * @mm: mm struct to attach to - * - * This is not intended to be used directly by device drivers. It allocates an - * HMM struct if mm does not have one, and initializes it. - */ -static struct hmm *hmm_register(struct mm_struct *mm) -{ - struct hmm *hmm = READ_ONCE(mm->hmm); - bool cleanup = false; - - /* - * The hmm struct can only be freed once the mm_struct goes away, - * hence we should always have pre-allocated an new hmm struct - * above. - */ - if (hmm) - return hmm; - - hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); - if (!hmm) - return NULL; - INIT_LIST_HEAD(&hmm->mirrors); - init_rwsem(&hmm->mirrors_sem); - hmm->mmu_notifier.ops = NULL; - INIT_LIST_HEAD(&hmm->ranges); - spin_lock_init(&hmm->lock); - hmm->mm = mm; - - spin_lock(&mm->page_table_lock); - if (!mm->hmm) - mm->hmm = hmm; - else - cleanup = true; - spin_unlock(&mm->page_table_lock); - - if (cleanup) - goto error; - - /* - * We should only get here if hold the mmap_sem in write mode ie on - * registration of first mirror through hmm_mirror_register() - */ - hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; - if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) - goto error_mm; - - return mm->hmm; - -error_mm: - spin_lock(&mm->page_table_lock); - if (mm->hmm == hmm) - mm->hmm = NULL; - spin_unlock(&mm->page_table_lock); -error: - kfree(hmm); - return NULL; -} - -void hmm_mm_destroy(struct mm_struct *mm) -{ - kfree(mm->hmm); -} - -static int hmm_invalidate_range(struct hmm *hmm, bool device, - const struct hmm_update *update) -{ - struct hmm_mirror *mirror; - struct hmm_range *range; - - spin_lock(&hmm->lock); - list_for_each_entry(range, &hmm->ranges, list) { - unsigned long addr, idx, npages; - - if (update->end < range->start || update->start >= range->end) - continue; - - range->valid = false; - addr = max(update->start, range->start); - idx = (addr - range->start) >> PAGE_SHIFT; - npages = (min(range->end, update->end) - addr) >> PAGE_SHIFT; - memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages); - } - spin_unlock(&hmm->lock); - - if (!device) - return 0; - - down_read(&hmm->mirrors_sem); - list_for_each_entry(mirror, &hmm->mirrors, list) { - int ret; - - ret = mirror->ops->sync_cpu_device_pagetables(mirror, update); - if (!update->blockable && ret == -EAGAIN) { - up_read(&hmm->mirrors_sem); - return -EAGAIN; - } - } - up_read(&hmm->mirrors_sem); - - return 0; -} - -static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) -{ - struct hmm_mirror *mirror; - struct hmm *hmm = mm->hmm; - - down_write(&hmm->mirrors_sem); - mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, - list); - while (mirror) { - list_del_init(&mirror->list); - if (mirror->ops->release) { - /* - * Drop mirrors_sem so callback can wait on any pending - * work that might itself trigger mmu_notifier callback - * and thus would deadlock with us. - */ - up_write(&hmm->mirrors_sem); - mirror->ops->release(mirror); - down_write(&hmm->mirrors_sem); - } - mirror = list_first_entry_or_null(&hmm->mirrors, - struct hmm_mirror, list); - } - up_write(&hmm->mirrors_sem); -} - -static int hmm_invalidate_range_start(struct mmu_notifier *mn, - const struct mmu_notifier_range *range) -{ - struct hmm_update update; - struct hmm *hmm = range->mm->hmm; - - VM_BUG_ON(!hmm); - - update.start = range->start; - update.end = range->end; - update.event = HMM_UPDATE_INVALIDATE; - update.blockable = range->blockable; - return hmm_invalidate_range(hmm, true, &update); -} - -static void hmm_invalidate_range_end(struct mmu_notifier *mn, - const struct mmu_notifier_range *range) -{ - struct hmm_update update; - struct hmm *hmm = range->mm->hmm; - - VM_BUG_ON(!hmm); - - update.start = range->start; - update.end = range->end; - update.event = HMM_UPDATE_INVALIDATE; - update.blockable = true; - hmm_invalidate_range(hmm, false, &update); -} - -static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { - .release = hmm_release, - .invalidate_range_start = hmm_invalidate_range_start, - .invalidate_range_end = hmm_invalidate_range_end, -}; - -/* - * hmm_mirror_register() - register a mirror against an mm - * - * @mirror: new mirror struct to register - * @mm: mm to register against - * - * To start mirroring a process address space, the device driver must register - * an HMM mirror struct. - * - * THE mm->mmap_sem MUST BE HELD IN WRITE MODE ! - */ -int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) -{ - /* Sanity check */ - if (!mm || !mirror || !mirror->ops) - return -EINVAL; - -again: - mirror->hmm = hmm_register(mm); - if (!mirror->hmm) - return -ENOMEM; - - down_write(&mirror->hmm->mirrors_sem); - if (mirror->hmm->mm == NULL) { - /* - * A racing hmm_mirror_unregister() is about to destroy the hmm - * struct. Try again to allocate a new one. - */ - up_write(&mirror->hmm->mirrors_sem); - mirror->hmm = NULL; - goto again; - } else { - list_add(&mirror->list, &mirror->hmm->mirrors); - up_write(&mirror->hmm->mirrors_sem); - } - - return 0; -} -EXPORT_SYMBOL(hmm_mirror_register); - -/* - * hmm_mirror_unregister() - unregister a mirror - * - * @mirror: new mirror struct to register - * - * Stop mirroring a process address space, and cleanup. - */ -void hmm_mirror_unregister(struct hmm_mirror *mirror) -{ - bool should_unregister = false; - struct mm_struct *mm; - struct hmm *hmm; - - if (mirror->hmm == NULL) - return; - - hmm = mirror->hmm; - down_write(&hmm->mirrors_sem); - list_del_init(&mirror->list); - should_unregister = list_empty(&hmm->mirrors); - mirror->hmm = NULL; - mm = hmm->mm; - hmm->mm = NULL; - up_write(&hmm->mirrors_sem); - - if (!should_unregister || mm == NULL) - return; - - mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm); - - spin_lock(&mm->page_table_lock); - if (mm->hmm == hmm) - mm->hmm = NULL; - spin_unlock(&mm->page_table_lock); - - kfree(hmm); -} -EXPORT_SYMBOL(hmm_mirror_unregister); +#include "internal.h" struct hmm_vma_walk { struct hmm_range *range; unsigned long last; - bool fault; - bool block; }; -static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, - bool write_fault, uint64_t *pfn) -{ - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE; - struct hmm_vma_walk *hmm_vma_walk = walk->private; - struct hmm_range *range = hmm_vma_walk->range; - struct vm_area_struct *vma = walk->vma; - vm_fault_t ret; - - flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; - flags |= write_fault ? FAULT_FLAG_WRITE : 0; - ret = handle_mm_fault(vma, addr, flags); - if (ret & VM_FAULT_RETRY) - return -EBUSY; - if (ret & VM_FAULT_ERROR) { - *pfn = range->values[HMM_PFN_ERROR]; - return -EFAULT; - } +enum { + HMM_NEED_FAULT = 1 << 0, + HMM_NEED_WRITE_FAULT = 1 << 1, + HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT, +}; - return -EAGAIN; -} +enum { + /* These flags are carried from input-to-output */ + HMM_PFN_INOUT_FLAGS = HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA | + HMM_PFN_P2PDMA_BUS, +}; -static int hmm_pfns_bad(unsigned long addr, - unsigned long end, - struct mm_walk *walk) +static int hmm_pfns_fill(unsigned long addr, unsigned long end, + struct hmm_range *range, unsigned long cpu_flags) { - struct hmm_vma_walk *hmm_vma_walk = walk->private; - struct hmm_range *range = hmm_vma_walk->range; - uint64_t *pfns = range->pfns; - unsigned long i; - - i = (addr - range->start) >> PAGE_SHIFT; - for (; addr < end; addr += PAGE_SIZE, i++) - pfns[i] = range->values[HMM_PFN_ERROR]; + unsigned long i = (addr - range->start) >> PAGE_SHIFT; + for (; addr < end; addr += PAGE_SIZE, i++) { + range->hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS; + range->hmm_pfns[i] |= cpu_flags; + } return 0; } /* - * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s) - * @start: range virtual start address (inclusive) + * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s) + * @addr: range virtual start address (inclusive) * @end: range virtual end address (exclusive) - * @fault: should we fault or not ? - * @write_fault: write fault ? + * @required_fault: HMM_NEED_* flags * @walk: mm_walk structure - * Returns: 0 on success, -EAGAIN after page fault, or page fault error + * Return: -EBUSY after page fault, or page fault error * * This function will be called whenever pmd_none() or pte_none() returns true, * or whenever there is no page directory covering the virtual address range. */ -static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, - bool fault, bool write_fault, - struct mm_walk *walk) +static int hmm_vma_fault(unsigned long addr, unsigned long end, + unsigned int required_fault, struct mm_walk *walk) { struct hmm_vma_walk *hmm_vma_walk = walk->private; - struct hmm_range *range = hmm_vma_walk->range; - uint64_t *pfns = range->pfns; - unsigned long i; + struct vm_area_struct *vma = walk->vma; + unsigned int fault_flags = FAULT_FLAG_REMOTE; + WARN_ON_ONCE(!required_fault); hmm_vma_walk->last = addr; - i = (addr - range->start) >> PAGE_SHIFT; - for (; addr < end; addr += PAGE_SIZE, i++) { - pfns[i] = range->values[HMM_PFN_NONE]; - if (fault || write_fault) { - int ret; - - ret = hmm_vma_do_fault(walk, addr, write_fault, - &pfns[i]); - if (ret != -EAGAIN) - return ret; - } + + if (required_fault & HMM_NEED_WRITE_FAULT) { + if (!(vma->vm_flags & VM_WRITE)) + return -EPERM; + fault_flags |= FAULT_FLAG_WRITE; } - return (fault || write_fault) ? -EAGAIN : 0; + for (; addr < end; addr += PAGE_SIZE) + if (handle_mm_fault(vma, addr, fault_flags, NULL) & + VM_FAULT_ERROR) + return -EFAULT; + return -EBUSY; } -static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, - uint64_t pfns, uint64_t cpu_flags, - bool *fault, bool *write_fault) +static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, + unsigned long pfn_req_flags, + unsigned long cpu_flags) { struct hmm_range *range = hmm_vma_walk->range; - *fault = *write_fault = false; - if (!hmm_vma_walk->fault) - return; + /* + * So we not only consider the individual per page request we also + * consider the default flags requested for the range. The API can + * be used 2 ways. The first one where the HMM user coalesces + * multiple page faults into one request and sets flags per pfn for + * those faults. The second one where the HMM user wants to pre- + * fault a range with specific flags. For the latter one it is a + * waste to have the user pre-fill the pfn arrays with a default + * flags value. + */ + pfn_req_flags &= range->pfn_flags_mask; + pfn_req_flags |= range->default_flags; /* We aren't ask to do anything ... */ - if (!(pfns & range->flags[HMM_PFN_VALID])) - return; - /* If this is device memory than only fault if explicitly requested */ - if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { - /* Do we fault on device memory ? */ - if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { - *write_fault = pfns & range->flags[HMM_PFN_WRITE]; - *fault = true; - } - return; - } + if (!(pfn_req_flags & HMM_PFN_REQ_FAULT)) + return 0; - /* If CPU page table is not valid then we need to fault */ - *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); /* Need to write fault ? */ - if ((pfns & range->flags[HMM_PFN_WRITE]) && - !(cpu_flags & range->flags[HMM_PFN_WRITE])) { - *write_fault = true; - *fault = true; - } + if ((pfn_req_flags & HMM_PFN_REQ_WRITE) && + !(cpu_flags & HMM_PFN_WRITE)) + return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT; + + /* If CPU page table is not valid then we need to fault */ + if (!(cpu_flags & HMM_PFN_VALID)) + return HMM_NEED_FAULT; + return 0; } -static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, - const uint64_t *pfns, unsigned long npages, - uint64_t cpu_flags, bool *fault, - bool *write_fault) +static unsigned int +hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, + const unsigned long hmm_pfns[], unsigned long npages, + unsigned long cpu_flags) { + struct hmm_range *range = hmm_vma_walk->range; + unsigned int required_fault = 0; unsigned long i; - if (!hmm_vma_walk->fault) { - *fault = *write_fault = false; - return; - } + /* + * If the default flags do not request to fault pages, and the mask does + * not allow for individual pages to be faulted, then + * hmm_pte_need_fault() will always return 0. + */ + if (!((range->default_flags | range->pfn_flags_mask) & + HMM_PFN_REQ_FAULT)) + return 0; for (i = 0; i < npages; ++i) { - hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, - fault, write_fault); - if ((*fault) || (*write_fault)) - return; + required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i], + cpu_flags); + if (required_fault == HMM_NEED_ALL_BITS) + return required_fault; } + return required_fault; } static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, - struct mm_walk *walk) + __always_unused int depth, struct mm_walk *walk) { struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; - bool fault, write_fault; + unsigned int required_fault; unsigned long i, npages; - uint64_t *pfns; + unsigned long *hmm_pfns; i = (addr - range->start) >> PAGE_SHIFT; npages = (end - addr) >> PAGE_SHIFT; - pfns = &range->pfns[i]; - hmm_range_need_fault(hmm_vma_walk, pfns, npages, - 0, &fault, &write_fault); - return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); + hmm_pfns = &range->hmm_pfns[i]; + required_fault = + hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0); + if (!walk->vma) { + if (required_fault) + return -EFAULT; + return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR); + } + if (required_fault) + return hmm_vma_fault(addr, end, required_fault, walk); + return hmm_pfns_fill(addr, end, range, 0); } -static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) +static inline unsigned long hmm_pfn_flags_order(unsigned long order) +{ + return order << HMM_PFN_ORDER_SHIFT; +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, + pmd_t pmd) { if (pmd_protnone(pmd)) return 0; - return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | - range->flags[HMM_PFN_WRITE] : - range->flags[HMM_PFN_VALID]; + return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : + HMM_PFN_VALID) | + hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT); } -static int hmm_vma_handle_pmd(struct mm_walk *walk, - unsigned long addr, - unsigned long end, - uint64_t *pfns, +static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, + unsigned long end, unsigned long hmm_pfns[], pmd_t pmd) { struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; unsigned long pfn, npages, i; - bool fault, write_fault; - uint64_t cpu_flags; + unsigned int required_fault; + unsigned long cpu_flags; npages = (end - addr) >> PAGE_SHIFT; cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); - hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, - &fault, &write_fault); - - if (pmd_protnone(pmd) || fault || write_fault) - return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); - - pfn = pmd_pfn(pmd) + pte_index(addr); - for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) - pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; - hmm_vma_walk->last = end; + required_fault = + hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags); + if (required_fault) + return hmm_vma_fault(addr, end, required_fault, walk); + + pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); + for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { + hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS; + hmm_pfns[i] |= pfn | cpu_flags; + } return 0; } +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +/* stub to allow the code below to compile */ +int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, + unsigned long end, unsigned long hmm_pfns[], pmd_t pmd); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) +static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, + pte_t pte) { - if (pte_none(pte) || !pte_present(pte)) + if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) return 0; - return pte_write(pte) ? range->flags[HMM_PFN_VALID] | - range->flags[HMM_PFN_WRITE] : - range->flags[HMM_PFN_VALID]; + return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; } static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, unsigned long end, pmd_t *pmdp, pte_t *ptep, - uint64_t *pfn) + unsigned long *hmm_pfn) { struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; - struct vm_area_struct *vma = walk->vma; - bool fault, write_fault; - uint64_t cpu_flags; - pte_t pte = *ptep; - uint64_t orig_pfn = *pfn; + unsigned int required_fault; + unsigned long cpu_flags; + pte_t pte = ptep_get(ptep); + uint64_t pfn_req_flags = *hmm_pfn; + uint64_t new_pfn_flags = 0; - *pfn = range->values[HMM_PFN_NONE]; - cpu_flags = pte_to_hmm_pfn_flags(range, pte); - hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, - &fault, &write_fault); - - if (pte_none(pte)) { - if (fault || write_fault) + /* + * Any other marker than a UFFD WP marker will result in a fault error + * that will be correctly handled, so we need only check for UFFD WP + * here. + */ + if (pte_none(pte) || pte_is_uffd_wp_marker(pte)) { + required_fault = + hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); + if (required_fault) goto fault; - return 0; + goto out; } if (!pte_present(pte)) { - swp_entry_t entry = pte_to_swp_entry(pte); - - if (!non_swap_entry(entry)) { - if (fault || write_fault) - goto fault; - return 0; - } + const softleaf_t entry = softleaf_from_pte(pte); /* - * This is a special swap entry, ignore migration, use - * device and report anything else as error. + * Don't fault in device private pages owned by the caller, + * just report the PFN. */ - if (is_device_private_entry(entry)) { - cpu_flags = range->flags[HMM_PFN_VALID] | - range->flags[HMM_PFN_DEVICE_PRIVATE]; - cpu_flags |= is_write_device_private_entry(entry) ? - range->flags[HMM_PFN_WRITE] : 0; - hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, - &fault, &write_fault); - if (fault || write_fault) - goto fault; - *pfn = hmm_pfn_from_pfn(range, swp_offset(entry)); - *pfn |= cpu_flags; - return 0; + if (softleaf_is_device_private(entry) && + page_pgmap(softleaf_to_page(entry))->owner == + range->dev_private_owner) { + cpu_flags = HMM_PFN_VALID; + if (softleaf_is_device_private_write(entry)) + cpu_flags |= HMM_PFN_WRITE; + new_pfn_flags = softleaf_to_pfn(entry) | cpu_flags; + goto out; } - if (is_migration_entry(entry)) { - if (fault || write_fault) { - pte_unmap(ptep); - hmm_vma_walk->last = addr; - migration_entry_wait(vma->vm_mm, - pmdp, addr); - return -EAGAIN; - } - return 0; + required_fault = + hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); + if (!required_fault) + goto out; + + if (softleaf_is_swap(entry)) + goto fault; + + if (softleaf_is_device_private(entry)) + goto fault; + + if (softleaf_is_device_exclusive(entry)) + goto fault; + + if (softleaf_is_migration(entry)) { + pte_unmap(ptep); + hmm_vma_walk->last = addr; + migration_entry_wait(walk->mm, pmdp, addr); + return -EBUSY; } /* Report error for everything else */ - *pfn = range->values[HMM_PFN_ERROR]; + pte_unmap(ptep); return -EFAULT; } - if (fault || write_fault) + cpu_flags = pte_to_hmm_pfn_flags(range, pte); + required_fault = + hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); + if (required_fault) goto fault; - *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags; + /* + * Since each architecture defines a struct page for the zero page, just + * fall through and treat it like a normal page. + */ + if (!vm_normal_page(walk->vma, addr, pte) && + !is_zero_pfn(pte_pfn(pte))) { + if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) { + pte_unmap(ptep); + return -EFAULT; + } + new_pfn_flags = HMM_PFN_ERROR; + goto out; + } + + new_pfn_flags = pte_pfn(pte) | cpu_flags; +out: + *hmm_pfn = (*hmm_pfn & HMM_PFN_INOUT_FLAGS) | new_pfn_flags; return 0; fault: pte_unmap(ptep); /* Fault any virtual address we were asked to fault */ - return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); + return hmm_vma_fault(addr, end, required_fault, walk); } +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION +static int hmm_vma_handle_absent_pmd(struct mm_walk *walk, unsigned long start, + unsigned long end, unsigned long *hmm_pfns, + pmd_t pmd) +{ + struct hmm_vma_walk *hmm_vma_walk = walk->private; + struct hmm_range *range = hmm_vma_walk->range; + unsigned long npages = (end - start) >> PAGE_SHIFT; + const softleaf_t entry = softleaf_from_pmd(pmd); + unsigned long addr = start; + unsigned int required_fault; + + if (softleaf_is_device_private(entry) && + softleaf_to_folio(entry)->pgmap->owner == + range->dev_private_owner) { + unsigned long cpu_flags = HMM_PFN_VALID | + hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT); + unsigned long pfn = softleaf_to_pfn(entry); + unsigned long i; + + if (softleaf_is_device_private_write(entry)) + cpu_flags |= HMM_PFN_WRITE; + + /* + * Fully populate the PFN list though subsequent PFNs could be + * inferred, because drivers which are not yet aware of large + * folios probably do not support sparsely populated PFN lists. + */ + for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { + hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS; + hmm_pfns[i] |= pfn | cpu_flags; + } + + return 0; + } + + required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns, + npages, 0); + if (required_fault) { + if (softleaf_is_device_private(entry)) + return hmm_vma_fault(addr, end, required_fault, walk); + else + return -EFAULT; + } + + return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); +} +#else +static int hmm_vma_handle_absent_pmd(struct mm_walk *walk, unsigned long start, + unsigned long end, unsigned long *hmm_pfns, + pmd_t pmd) +{ + struct hmm_vma_walk *hmm_vma_walk = walk->private; + struct hmm_range *range = hmm_vma_walk->range; + unsigned long npages = (end - start) >> PAGE_SHIFT; + + if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) + return -EFAULT; + return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); +} +#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ + static int hmm_vma_walk_pmd(pmd_t *pmdp, unsigned long start, unsigned long end, @@ -586,655 +400,496 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, { struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; - struct vm_area_struct *vma = walk->vma; - uint64_t *pfns = range->pfns; - unsigned long addr = start, i; + unsigned long *hmm_pfns = + &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; + unsigned long npages = (end - start) >> PAGE_SHIFT; + unsigned long addr = start; pte_t *ptep; pmd_t pmd; - again: - pmd = READ_ONCE(*pmdp); + pmd = pmdp_get_lockless(pmdp); if (pmd_none(pmd)) - return hmm_vma_walk_hole(start, end, walk); - - if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB)) - return hmm_pfns_bad(start, end, walk); - - if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { - bool fault, write_fault; - unsigned long npages; - uint64_t *pfns; + return hmm_vma_walk_hole(start, end, -1, walk); - i = (addr - range->start) >> PAGE_SHIFT; - npages = (end - addr) >> PAGE_SHIFT; - pfns = &range->pfns[i]; - - hmm_range_need_fault(hmm_vma_walk, pfns, npages, - 0, &fault, &write_fault); - if (fault || write_fault) { + if (thp_migration_supported() && pmd_is_migration_entry(pmd)) { + if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) { hmm_vma_walk->last = addr; - pmd_migration_entry_wait(vma->vm_mm, pmdp); - return -EAGAIN; + pmd_migration_entry_wait(walk->mm, pmdp); + return -EBUSY; } - return 0; - } else if (!pmd_present(pmd)) - return hmm_pfns_bad(start, end, walk); + return hmm_pfns_fill(start, end, range, 0); + } - if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { + if (!pmd_present(pmd)) + return hmm_vma_handle_absent_pmd(walk, start, end, hmm_pfns, + pmd); + + if (pmd_trans_huge(pmd)) { /* - * No need to take pmd_lock here, even if some other threads + * No need to take pmd_lock here, even if some other thread * is splitting the huge pmd we will get that event through * mmu_notifier callback. * - * So just read pmd value and check again its a transparent + * So just read pmd value and check again it's a transparent * huge or device mapping one and compute corresponding pfn * values. */ - pmd = pmd_read_atomic(pmdp); - barrier(); - if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) + pmd = pmdp_get_lockless(pmdp); + if (!pmd_trans_huge(pmd)) goto again; - i = (addr - range->start) >> PAGE_SHIFT; - return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); + return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd); } /* - * We have handled all the valid case above ie either none, migration, + * We have handled all the valid cases above ie either none, migration, * huge or transparent huge. At this point either it is a valid pmd * entry pointing to pte directory or it is a bad pmd that will not * recover. */ - if (pmd_bad(pmd)) - return hmm_pfns_bad(start, end, walk); + if (pmd_bad(pmd)) { + if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) + return -EFAULT; + return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); + } ptep = pte_offset_map(pmdp, addr); - i = (addr - range->start) >> PAGE_SHIFT; - for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { + if (!ptep) + goto again; + for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) { int r; - r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); + r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns); if (r) { - /* hmm_vma_handle_pte() did unmap pte directory */ - hmm_vma_walk->last = addr; + /* hmm_vma_handle_pte() did pte_unmap() */ return r; } } pte_unmap(ptep - 1); - - hmm_vma_walk->last = addr; return 0; } -static void hmm_pfns_clear(struct hmm_range *range, - uint64_t *pfns, - unsigned long addr, - unsigned long end) +#if defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) +static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, + pud_t pud) { - for (; addr < end; addr += PAGE_SIZE, pfns++) - *pfns = range->values[HMM_PFN_NONE]; + if (!pud_present(pud)) + return 0; + return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : + HMM_PFN_VALID) | + hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT); } -static void hmm_pfns_special(struct hmm_range *range) +static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, + struct mm_walk *walk) { - unsigned long addr = range->start, i = 0; + struct hmm_vma_walk *hmm_vma_walk = walk->private; + struct hmm_range *range = hmm_vma_walk->range; + unsigned long addr = start; + pud_t pud; + spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma); + + if (!ptl) + return 0; + + /* Normally we don't want to split the huge page */ + walk->action = ACTION_CONTINUE; + + pud = pudp_get(pudp); + if (!pud_present(pud)) { + spin_unlock(ptl); + return hmm_vma_walk_hole(start, end, -1, walk); + } + + if (pud_leaf(pud)) { + unsigned long i, npages, pfn; + unsigned int required_fault; + unsigned long *hmm_pfns; + unsigned long cpu_flags; + + i = (addr - range->start) >> PAGE_SHIFT; + npages = (end - addr) >> PAGE_SHIFT; + hmm_pfns = &range->hmm_pfns[i]; + + cpu_flags = pud_to_hmm_pfn_flags(range, pud); + required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns, + npages, cpu_flags); + if (required_fault) { + spin_unlock(ptl); + return hmm_vma_fault(addr, end, required_fault, walk); + } - for (; addr < range->end; addr += PAGE_SIZE, i++) - range->pfns[i] = range->values[HMM_PFN_SPECIAL]; + pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); + for (i = 0; i < npages; ++i, ++pfn) { + hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS; + hmm_pfns[i] |= pfn | cpu_flags; + } + goto out_unlock; + } + + /* Ask for the PUD to be split */ + walk->action = ACTION_SUBTREE; + +out_unlock: + spin_unlock(ptl); + return 0; } +#else +#define hmm_vma_walk_pud NULL +#endif -/* - * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses - * @range: range being snapshotted - * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid - * vma permission, 0 success - * - * This snapshots the CPU page table for a range of virtual addresses. Snapshot - * validity is tracked by range struct. See hmm_vma_range_done() for further - * information. - * - * The range struct is initialized here. It tracks the CPU page table, but only - * if the function returns success (0), in which case the caller must then call - * hmm_vma_range_done() to stop CPU page table update tracking on this range. - * - * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS - * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED ! - */ -int hmm_vma_get_pfns(struct hmm_range *range) +#ifdef CONFIG_HUGETLB_PAGE +static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, + unsigned long start, unsigned long end, + struct mm_walk *walk) { - struct vm_area_struct *vma = range->vma; - struct hmm_vma_walk hmm_vma_walk; - struct mm_walk mm_walk; - struct hmm *hmm; - - /* Sanity check, this really should not happen ! */ - if (range->start < vma->vm_start || range->start >= vma->vm_end) - return -EINVAL; - if (range->end < vma->vm_start || range->end > vma->vm_end) - return -EINVAL; - - hmm = hmm_register(vma->vm_mm); - if (!hmm) - return -ENOMEM; - /* Caller must have registered a mirror, via hmm_mirror_register() ! */ - if (!hmm->mmu_notifier.ops) - return -EINVAL; - - /* FIXME support hugetlb fs */ - if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || - vma_is_dax(vma)) { - hmm_pfns_special(range); - return -EINVAL; - } + unsigned long addr = start, i, pfn; + struct hmm_vma_walk *hmm_vma_walk = walk->private; + struct hmm_range *range = hmm_vma_walk->range; + struct vm_area_struct *vma = walk->vma; + unsigned int required_fault; + unsigned long pfn_req_flags; + unsigned long cpu_flags; + spinlock_t *ptl; + pte_t entry; + + ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); + entry = huge_ptep_get(walk->mm, addr, pte); + + i = (start - range->start) >> PAGE_SHIFT; + pfn_req_flags = range->hmm_pfns[i]; + cpu_flags = pte_to_hmm_pfn_flags(range, entry) | + hmm_pfn_flags_order(huge_page_order(hstate_vma(vma))); + required_fault = + hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); + if (required_fault) { + int ret; - if (!(vma->vm_flags & VM_READ)) { + spin_unlock(ptl); + hugetlb_vma_unlock_read(vma); /* - * If vma do not allow read access, then assume that it does - * not allow write access, either. Architecture that allow - * write without read access are not supported by HMM, because - * operations such has atomic access would not work. + * Avoid deadlock: drop the vma lock before calling + * hmm_vma_fault(), which will itself potentially take and + * drop the vma lock. This is also correct from a + * protection point of view, because there is no further + * use here of either pte or ptl after dropping the vma + * lock. */ - hmm_pfns_clear(range, range->pfns, range->start, range->end); - return -EPERM; + ret = hmm_vma_fault(addr, end, required_fault, walk); + hugetlb_vma_lock_read(vma); + return ret; + } + + pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); + for (; addr < end; addr += PAGE_SIZE, i++, pfn++) { + range->hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS; + range->hmm_pfns[i] |= pfn | cpu_flags; } - /* Initialize range to track CPU page table update */ - spin_lock(&hmm->lock); - range->valid = true; - list_add_rcu(&range->list, &hmm->ranges); - spin_unlock(&hmm->lock); - - hmm_vma_walk.fault = false; - hmm_vma_walk.range = range; - mm_walk.private = &hmm_vma_walk; - - mm_walk.vma = vma; - mm_walk.mm = vma->vm_mm; - mm_walk.pte_entry = NULL; - mm_walk.test_walk = NULL; - mm_walk.hugetlb_entry = NULL; - mm_walk.pmd_entry = hmm_vma_walk_pmd; - mm_walk.pte_hole = hmm_vma_walk_hole; - - walk_page_range(range->start, range->end, &mm_walk); + spin_unlock(ptl); return 0; } -EXPORT_SYMBOL(hmm_vma_get_pfns); +#else +#define hmm_vma_walk_hugetlb_entry NULL +#endif /* CONFIG_HUGETLB_PAGE */ -/* - * hmm_vma_range_done() - stop tracking change to CPU page table over a range - * @range: range being tracked - * Returns: false if range data has been invalidated, true otherwise - * - * Range struct is used to track updates to the CPU page table after a call to - * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done - * using the data, or wants to lock updates to the data it got from those - * functions, it must call the hmm_vma_range_done() function, which will then - * stop tracking CPU page table updates. - * - * Note that device driver must still implement general CPU page table update - * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using - * the mmu_notifier API directly. - * - * CPU page table update tracking done through hmm_range is only temporary and - * to be used while trying to duplicate CPU page table contents for a range of - * virtual addresses. - * - * There are two ways to use this : - * again: - * hmm_vma_get_pfns(range); or hmm_vma_fault(...); - * trans = device_build_page_table_update_transaction(pfns); - * device_page_table_lock(); - * if (!hmm_vma_range_done(range)) { - * device_page_table_unlock(); - * goto again; - * } - * device_commit_transaction(trans); - * device_page_table_unlock(); - * - * Or: - * hmm_vma_get_pfns(range); or hmm_vma_fault(...); - * device_page_table_lock(); - * hmm_vma_range_done(range); - * device_update_page_table(range->pfns); - * device_page_table_unlock(); - */ -bool hmm_vma_range_done(struct hmm_range *range) +static int hmm_vma_walk_test(unsigned long start, unsigned long end, + struct mm_walk *walk) { - unsigned long npages = (range->end - range->start) >> PAGE_SHIFT; - struct hmm *hmm; + struct hmm_vma_walk *hmm_vma_walk = walk->private; + struct hmm_range *range = hmm_vma_walk->range; + struct vm_area_struct *vma = walk->vma; - if (range->end <= range->start) { - BUG(); - return false; - } + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) && + vma->vm_flags & VM_READ) + return 0; - hmm = hmm_register(range->vma->vm_mm); - if (!hmm) { - memset(range->pfns, 0, sizeof(*range->pfns) * npages); - return false; - } + /* + * vma ranges that don't have struct page backing them or map I/O + * devices directly cannot be handled by hmm_range_fault(). + * + * If the vma does not allow read access, then assume that it does not + * allow write access either. HMM does not support architectures that + * allow write without read. + * + * If a fault is requested for an unsupported range then it is a hard + * failure. + */ + if (hmm_range_need_fault(hmm_vma_walk, + range->hmm_pfns + + ((start - range->start) >> PAGE_SHIFT), + (end - start) >> PAGE_SHIFT, 0)) + return -EFAULT; - spin_lock(&hmm->lock); - list_del_rcu(&range->list); - spin_unlock(&hmm->lock); + hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); - return range->valid; + /* Skip this vma and continue processing the next vma. */ + return 1; } -EXPORT_SYMBOL(hmm_vma_range_done); -/* - * hmm_vma_fault() - try to fault some address in a virtual address range - * @range: range being faulted - * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) - * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop) - * - * This is similar to a regular CPU page fault except that it will not trigger - * any memory migration if the memory being faulted is not accessible by CPUs. - * - * On error, for one virtual address in the range, the function will mark the - * corresponding HMM pfn entry with an error flag. +static const struct mm_walk_ops hmm_walk_ops = { + .pud_entry = hmm_vma_walk_pud, + .pmd_entry = hmm_vma_walk_pmd, + .pte_hole = hmm_vma_walk_hole, + .hugetlb_entry = hmm_vma_walk_hugetlb_entry, + .test_walk = hmm_vma_walk_test, + .walk_lock = PGWALK_RDLOCK, +}; + +/** + * hmm_range_fault - try to fault some address in a virtual address range + * @range: argument structure * - * Expected use pattern: - * retry: - * down_read(&mm->mmap_sem); - * // Find vma and address device wants to fault, initialize hmm_pfn_t - * // array accordingly - * ret = hmm_vma_fault(range, write, block); - * switch (ret) { - * case -EAGAIN: - * hmm_vma_range_done(range); - * // You might want to rate limit or yield to play nicely, you may - * // also commit any valid pfn in the array assuming that you are - * // getting true from hmm_vma_range_monitor_end() - * goto retry; - * case 0: - * break; - * case -ENOMEM: - * case -EINVAL: - * case -EPERM: - * default: - * // Handle error ! - * up_read(&mm->mmap_sem) - * return; - * } - * // Take device driver lock that serialize device page table update - * driver_lock_device_page_table_update(); - * hmm_vma_range_done(range); - * // Commit pfns we got from hmm_vma_fault() - * driver_unlock_device_page_table_update(); - * up_read(&mm->mmap_sem) + * Returns 0 on success or one of the following error codes: * - * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0) - * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION ! + * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma + * (e.g., device file vma). + * -ENOMEM: Out of memory. + * -EPERM: Invalid permission (e.g., asking for write and range is read + * only). + * -EBUSY: The range has been invalidated and the caller needs to wait for + * the invalidation to finish. + * -EFAULT: A page was requested to be valid and could not be made valid + * ie it has no backing VMA or it is illegal to access * - * YOU HAVE BEEN WARNED ! + * This is similar to get_user_pages(), except that it can read the page tables + * without mutating them (ie causing faults). */ -int hmm_vma_fault(struct hmm_range *range, bool block) +int hmm_range_fault(struct hmm_range *range) { - struct vm_area_struct *vma = range->vma; - unsigned long start = range->start; - struct hmm_vma_walk hmm_vma_walk; - struct mm_walk mm_walk; - struct hmm *hmm; + struct hmm_vma_walk hmm_vma_walk = { + .range = range, + .last = range->start, + }; + struct mm_struct *mm = range->notifier->mm; int ret; - /* Sanity check, this really should not happen ! */ - if (range->start < vma->vm_start || range->start >= vma->vm_end) - return -EINVAL; - if (range->end < vma->vm_start || range->end > vma->vm_end) - return -EINVAL; + mmap_assert_locked(mm); - hmm = hmm_register(vma->vm_mm); - if (!hmm) { - hmm_pfns_clear(range, range->pfns, range->start, range->end); - return -ENOMEM; - } - /* Caller must have registered a mirror using hmm_mirror_register() */ - if (!hmm->mmu_notifier.ops) - return -EINVAL; - - /* FIXME support hugetlb fs */ - if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || - vma_is_dax(vma)) { - hmm_pfns_special(range); - return -EINVAL; - } - - if (!(vma->vm_flags & VM_READ)) { + do { + /* If range is no longer valid force retry. */ + if (mmu_interval_check_retry(range->notifier, + range->notifier_seq)) + return -EBUSY; + ret = walk_page_range(mm, hmm_vma_walk.last, range->end, + &hmm_walk_ops, &hmm_vma_walk); /* - * If vma do not allow read access, then assume that it does - * not allow write access, either. Architecture that allow - * write without read access are not supported by HMM, because - * operations such has atomic access would not work. + * When -EBUSY is returned the loop restarts with + * hmm_vma_walk.last set to an address that has not been stored + * in pfns. All entries < last in the pfn array are set to their + * output, and all >= are still at their input values. */ - hmm_pfns_clear(range, range->pfns, range->start, range->end); - return -EPERM; - } - - /* Initialize range to track CPU page table update */ - spin_lock(&hmm->lock); - range->valid = true; - list_add_rcu(&range->list, &hmm->ranges); - spin_unlock(&hmm->lock); - - hmm_vma_walk.fault = true; - hmm_vma_walk.block = block; - hmm_vma_walk.range = range; - mm_walk.private = &hmm_vma_walk; - hmm_vma_walk.last = range->start; - - mm_walk.vma = vma; - mm_walk.mm = vma->vm_mm; - mm_walk.pte_entry = NULL; - mm_walk.test_walk = NULL; - mm_walk.hugetlb_entry = NULL; - mm_walk.pmd_entry = hmm_vma_walk_pmd; - mm_walk.pte_hole = hmm_vma_walk_hole; - - do { - ret = walk_page_range(start, range->end, &mm_walk); - start = hmm_vma_walk.last; - } while (ret == -EAGAIN); - - if (ret) { - unsigned long i; - - i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; - hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last, - range->end); - hmm_vma_range_done(range); - } + } while (ret == -EBUSY); return ret; } -EXPORT_SYMBOL(hmm_vma_fault); -#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ - - -#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) -struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, - unsigned long addr) -{ - struct page *page; - - page = alloc_page_vma(GFP_HIGHUSER, vma, addr); - if (!page) - return NULL; - lock_page(page); - return page; -} -EXPORT_SYMBOL(hmm_vma_alloc_locked_page); +EXPORT_SYMBOL(hmm_range_fault); - -static void hmm_devmem_ref_release(struct percpu_ref *ref) -{ - struct hmm_devmem *devmem; - - devmem = container_of(ref, struct hmm_devmem, ref); - complete(&devmem->completion); -} - -static void hmm_devmem_ref_exit(void *data) +/** + * hmm_dma_map_alloc - Allocate HMM map structure + * @dev: device to allocate structure for + * @map: HMM map to allocate + * @nr_entries: number of entries in the map + * @dma_entry_size: size of the DMA entry in the map + * + * Allocate the HMM map structure and all the lists it contains. + * Return 0 on success, -ENOMEM on failure. + */ +int hmm_dma_map_alloc(struct device *dev, struct hmm_dma_map *map, + size_t nr_entries, size_t dma_entry_size) { - struct percpu_ref *ref = data; - struct hmm_devmem *devmem; + bool dma_need_sync = false; + bool use_iova; - devmem = container_of(ref, struct hmm_devmem, ref); - wait_for_completion(&devmem->completion); - percpu_ref_exit(ref); -} + WARN_ON_ONCE(!(nr_entries * PAGE_SIZE / dma_entry_size)); -static void hmm_devmem_ref_kill(struct percpu_ref *ref) -{ - percpu_ref_kill(ref); -} + /* + * The HMM API violates our normal DMA buffer ownership rules and can't + * transfer buffer ownership. The dma_addressing_limited() check is a + * best approximation to ensure no swiotlb buffering happens. + */ +#ifdef CONFIG_DMA_NEED_SYNC + dma_need_sync = !dev->dma_skip_sync; +#endif /* CONFIG_DMA_NEED_SYNC */ + if (dma_need_sync || dma_addressing_limited(dev)) + return -EOPNOTSUPP; + + map->dma_entry_size = dma_entry_size; + map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list), + GFP_KERNEL | __GFP_NOWARN); + if (!map->pfn_list) + return -ENOMEM; -static int hmm_devmem_fault(struct vm_area_struct *vma, - unsigned long addr, - const struct page *page, - unsigned int flags, - pmd_t *pmdp) -{ - struct hmm_devmem *devmem = page->pgmap->data; + use_iova = dma_iova_try_alloc(dev, &map->state, 0, + nr_entries * PAGE_SIZE); + if (!use_iova && dma_need_unmap(dev)) { + map->dma_list = kvcalloc(nr_entries, sizeof(*map->dma_list), + GFP_KERNEL | __GFP_NOWARN); + if (!map->dma_list) + goto err_dma; + } + return 0; - return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp); +err_dma: + kvfree(map->pfn_list); + return -ENOMEM; } +EXPORT_SYMBOL_GPL(hmm_dma_map_alloc); -static void hmm_devmem_free(struct page *page, void *data) +/** + * hmm_dma_map_free - iFree HMM map structure + * @dev: device to free structure from + * @map: HMM map containing the various lists and state + * + * Free the HMM map structure and all the lists it contains. + */ +void hmm_dma_map_free(struct device *dev, struct hmm_dma_map *map) { - struct hmm_devmem *devmem = data; - - page->mapping = NULL; - - devmem->ops->free(devmem, page); + if (dma_use_iova(&map->state)) + dma_iova_free(dev, &map->state); + kvfree(map->pfn_list); + kvfree(map->dma_list); } +EXPORT_SYMBOL_GPL(hmm_dma_map_free); -/* - * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory +/** + * hmm_dma_map_pfn - Map a physical HMM page to DMA address + * @dev: Device to map the page for + * @map: HMM map + * @idx: Index into the PFN and dma address arrays + * @p2pdma_state: PCI P2P state. * - * @ops: memory event device driver callback (see struct hmm_devmem_ops) - * @device: device struct to bind the resource too - * @size: size in bytes of the device memory to add - * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise - * - * This function first finds an empty range of physical address big enough to - * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which - * in turn allocates struct pages. It does not do anything beyond that; all - * events affecting the memory will go through the various callbacks provided - * by hmm_devmem_ops struct. - * - * Device driver should call this function during device initialization and - * is then responsible of memory management. HMM only provides helpers. + * dma_alloc_iova() allocates IOVA based on the size specified by their use in + * iova->size. Call this function after IOVA allocation to link whole @page + * to get the DMA address. Note that very first call to this function + * will have @offset set to 0 in the IOVA space allocated from + * dma_alloc_iova(). For subsequent calls to this function on same @iova, + * @offset needs to be advanced by the caller with the size of previous + * page that was linked + DMA address returned for the previous page that was + * linked by this function. */ -struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, - struct device *device, - unsigned long size) -{ - struct hmm_devmem *devmem; - resource_size_t addr; - void *result; +dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map, + size_t idx, + struct pci_p2pdma_map_state *p2pdma_state) +{ + struct dma_iova_state *state = &map->state; + dma_addr_t *dma_addrs = map->dma_list; + unsigned long *pfns = map->pfn_list; + struct page *page = hmm_pfn_to_page(pfns[idx]); + phys_addr_t paddr = hmm_pfn_to_phys(pfns[idx]); + size_t offset = idx * map->dma_entry_size; + unsigned long attrs = 0; + dma_addr_t dma_addr; int ret; - dev_pagemap_get_ops(); - - devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); - if (!devmem) - return ERR_PTR(-ENOMEM); - - init_completion(&devmem->completion); - devmem->pfn_first = -1UL; - devmem->pfn_last = -1UL; - devmem->resource = NULL; - devmem->device = device; - devmem->ops = ops; - - ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, - 0, GFP_KERNEL); - if (ret) - return ERR_PTR(ret); + if ((pfns[idx] & HMM_PFN_DMA_MAPPED) && + !(pfns[idx] & HMM_PFN_P2PDMA_BUS)) { + /* + * We are in this flow when there is a need to resync flags, + * for example when page was already linked in prefetch call + * with READ flag and now we need to add WRITE flag + * + * This page was already programmed to HW and we don't want/need + * to unlink and link it again just to resync flags. + */ + if (dma_use_iova(state)) + return state->addr + offset; - ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); - if (ret) - return ERR_PTR(ret); + /* + * Without dma_need_unmap, the dma_addrs array is NULL, thus we + * need to regenerate the address below even if there already + * was a mapping. But !dma_need_unmap implies that the + * mapping stateless, so this is fine. + */ + if (dma_need_unmap(dev)) + return dma_addrs[idx]; - size = ALIGN(size, PA_SECTION_SIZE); - addr = min((unsigned long)iomem_resource.end, - (1UL << MAX_PHYSMEM_BITS) - 1); - addr = addr - size + 1UL; + /* Continue to remapping */ + } - /* - * FIXME add a new helper to quickly walk resource tree and find free - * range - * - * FIXME what about ioport_resource resource ? - */ - for (; addr > size && addr >= iomem_resource.start; addr -= size) { - ret = region_intersects(addr, size, 0, IORES_DESC_NONE); - if (ret != REGION_DISJOINT) - continue; - - devmem->resource = devm_request_mem_region(device, addr, size, - dev_name(device)); - if (!devmem->resource) - return ERR_PTR(-ENOMEM); + switch (pci_p2pdma_state(p2pdma_state, dev, page)) { + case PCI_P2PDMA_MAP_NONE: + break; + case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: + attrs |= DMA_ATTR_MMIO; + pfns[idx] |= HMM_PFN_P2PDMA; break; + case PCI_P2PDMA_MAP_BUS_ADDR: + pfns[idx] |= HMM_PFN_P2PDMA_BUS | HMM_PFN_DMA_MAPPED; + return pci_p2pdma_bus_addr_map(p2pdma_state->mem, paddr); + default: + return DMA_MAPPING_ERROR; } - if (!devmem->resource) - return ERR_PTR(-ERANGE); - - devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; - devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; - devmem->pfn_last = devmem->pfn_first + - (resource_size(devmem->resource) >> PAGE_SHIFT); - devmem->page_fault = hmm_devmem_fault; - - devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; - devmem->pagemap.res = *devmem->resource; - devmem->pagemap.page_free = hmm_devmem_free; - devmem->pagemap.altmap_valid = false; - devmem->pagemap.ref = &devmem->ref; - devmem->pagemap.data = devmem; - devmem->pagemap.kill = hmm_devmem_ref_kill; - - result = devm_memremap_pages(devmem->device, &devmem->pagemap); - if (IS_ERR(result)) - return result; - return devmem; -} -EXPORT_SYMBOL_GPL(hmm_devmem_add); - -struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, - struct device *device, - struct resource *res) -{ - struct hmm_devmem *devmem; - void *result; - int ret; - - if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) - return ERR_PTR(-EINVAL); - - dev_pagemap_get_ops(); - - devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); - if (!devmem) - return ERR_PTR(-ENOMEM); - - init_completion(&devmem->completion); - devmem->pfn_first = -1UL; - devmem->pfn_last = -1UL; - devmem->resource = res; - devmem->device = device; - devmem->ops = ops; - - ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, - 0, GFP_KERNEL); - if (ret) - return ERR_PTR(ret); - - ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, - &devmem->ref); - if (ret) - return ERR_PTR(ret); - - devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; - devmem->pfn_last = devmem->pfn_first + - (resource_size(devmem->resource) >> PAGE_SHIFT); - devmem->page_fault = hmm_devmem_fault; - - devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; - devmem->pagemap.res = *devmem->resource; - devmem->pagemap.page_free = hmm_devmem_free; - devmem->pagemap.altmap_valid = false; - devmem->pagemap.ref = &devmem->ref; - devmem->pagemap.data = devmem; - devmem->pagemap.kill = hmm_devmem_ref_kill; - - result = devm_memremap_pages(devmem->device, &devmem->pagemap); - if (IS_ERR(result)) - return result; - return devmem; -} -EXPORT_SYMBOL_GPL(hmm_devmem_add_resource); -/* - * A device driver that wants to handle multiple devices memory through a - * single fake device can use hmm_device to do so. This is purely a helper - * and it is not needed to make use of any HMM functionality. - */ -#define HMM_DEVICE_MAX 256 + if (dma_use_iova(state)) { + ret = dma_iova_link(dev, state, paddr, offset, + map->dma_entry_size, DMA_BIDIRECTIONAL, + attrs); + if (ret) + goto error; + + ret = dma_iova_sync(dev, state, offset, map->dma_entry_size); + if (ret) { + dma_iova_unlink(dev, state, offset, map->dma_entry_size, + DMA_BIDIRECTIONAL, attrs); + goto error; + } -static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX); -static DEFINE_SPINLOCK(hmm_device_lock); -static struct class *hmm_device_class; -static dev_t hmm_device_devt; + dma_addr = state->addr + offset; + } else { + if (WARN_ON_ONCE(dma_need_unmap(dev) && !dma_addrs)) + goto error; -static void hmm_device_release(struct device *device) -{ - struct hmm_device *hmm_device; + dma_addr = dma_map_phys(dev, paddr, map->dma_entry_size, + DMA_BIDIRECTIONAL, attrs); + if (dma_mapping_error(dev, dma_addr)) + goto error; - hmm_device = container_of(device, struct hmm_device, device); - spin_lock(&hmm_device_lock); - clear_bit(hmm_device->minor, hmm_device_mask); - spin_unlock(&hmm_device_lock); + if (dma_need_unmap(dev)) + dma_addrs[idx] = dma_addr; + } + pfns[idx] |= HMM_PFN_DMA_MAPPED; + return dma_addr; +error: + pfns[idx] &= ~HMM_PFN_P2PDMA; + return DMA_MAPPING_ERROR; - kfree(hmm_device); } +EXPORT_SYMBOL_GPL(hmm_dma_map_pfn); -struct hmm_device *hmm_device_new(void *drvdata) +/** + * hmm_dma_unmap_pfn - Unmap a physical HMM page from DMA address + * @dev: Device to unmap the page from + * @map: HMM map + * @idx: Index of the PFN to unmap + * + * Returns true if the PFN was mapped and has been unmapped, false otherwise. + */ +bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx) { - struct hmm_device *hmm_device; - - hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL); - if (!hmm_device) - return ERR_PTR(-ENOMEM); - - spin_lock(&hmm_device_lock); - hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX); - if (hmm_device->minor >= HMM_DEVICE_MAX) { - spin_unlock(&hmm_device_lock); - kfree(hmm_device); - return ERR_PTR(-EBUSY); - } - set_bit(hmm_device->minor, hmm_device_mask); - spin_unlock(&hmm_device_lock); - - dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor); - hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt), - hmm_device->minor); - hmm_device->device.release = hmm_device_release; - dev_set_drvdata(&hmm_device->device, drvdata); - hmm_device->device.class = hmm_device_class; - device_initialize(&hmm_device->device); - - return hmm_device; -} -EXPORT_SYMBOL(hmm_device_new); + const unsigned long valid_dma = HMM_PFN_VALID | HMM_PFN_DMA_MAPPED; + struct dma_iova_state *state = &map->state; + dma_addr_t *dma_addrs = map->dma_list; + unsigned long *pfns = map->pfn_list; + unsigned long attrs = 0; -void hmm_device_put(struct hmm_device *hmm_device) -{ - put_device(&hmm_device->device); -} -EXPORT_SYMBOL(hmm_device_put); + if ((pfns[idx] & valid_dma) != valid_dma) + return false; -static int __init hmm_init(void) -{ - int ret; + if (pfns[idx] & HMM_PFN_P2PDMA) + attrs |= DMA_ATTR_MMIO; - ret = alloc_chrdev_region(&hmm_device_devt, 0, - HMM_DEVICE_MAX, - "hmm_device"); - if (ret) - return ret; + if (pfns[idx] & HMM_PFN_P2PDMA_BUS) + ; /* no need to unmap bus address P2P mappings */ + else if (dma_use_iova(state)) + dma_iova_unlink(dev, state, idx * map->dma_entry_size, + map->dma_entry_size, DMA_BIDIRECTIONAL, attrs); + else if (dma_need_unmap(dev)) + dma_unmap_phys(dev, dma_addrs[idx], map->dma_entry_size, + DMA_BIDIRECTIONAL, attrs); - hmm_device_class = class_create(THIS_MODULE, "hmm_device"); - if (IS_ERR(hmm_device_class)) { - unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX); - return PTR_ERR(hmm_device_class); - } - return 0; + pfns[idx] &= + ~(HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA | HMM_PFN_P2PDMA_BUS); + return true; } - -device_initcall(hmm_init); -#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ +EXPORT_SYMBOL_GPL(hmm_dma_unmap_pfn); |
