summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2018-12-28 00:38:05 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 12:11:50 -0800
commit5d6527a784f7a6d247961e046e830de8d71b47d1 (patch)
tree9326a7abb7aab8983770840a4d52e07becb76c6d /mm
parentb15c87263a69272423771118c653e9a1d0672caa (diff)
mm/mmu_notifier: use structure for invalidate_range_start/end callback
Patch series "mmu notifier contextual informations", v2. This patchset adds contextual information, why an invalidation is happening, to mmu notifier callback. This is necessary for user of mmu notifier that wish to maintains their own data structure without having to add new fields to struct vm_area_struct (vma). For instance device can have they own page table that mirror the process address space. When a vma is unmap (munmap() syscall) the device driver can free the device page table for the range. Today we do not have any information on why a mmu notifier call back is happening and thus device driver have to assume that it is always an munmap(). This is inefficient at it means that it needs to re-allocate device page table on next page fault and rebuild the whole device driver data structure for the range. Other use case beside munmap() also exist, for instance it is pointless for device driver to invalidate the device page table when the invalidation is for the soft dirtyness tracking. Or device driver can optimize away mprotect() that change the page table permission access for the range. This patchset enables all this optimizations for device drivers. I do not include any of those in this series but another patchset I am posting will leverage this. The patchset is pretty simple from a code point of view. The first two patches consolidate all mmu notifier arguments into a struct so that it is easier to add/change arguments. The last patch adds the contextual information (munmap, protection, soft dirty, clear, ...). This patch (of 3): To avoid having to change many callback definition everytime we want to add a parameter use a structure to group all parameters for the mmu_notifier invalidate_range_start/end callback. No functional changes with this patch. [akpm@linux-foundation.org: fix drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c kerneldoc] Link: http://lkml.kernel.org/r/20181205053628.3210-2-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Acked-by: Jan Kara <jack@suse.cz> Acked-by: Jason Gunthorpe <jgg@mellanox.com> [infiniband] Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Ross Zwisler <zwisler@kernel.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krcmar <rkrcmar@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Christian Koenig <christian.koenig@amd.com> Cc: Felix Kuehling <felix.kuehling@amd.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hmm.c23
-rw-r--r--mm/mmu_notifier.c21
2 files changed, 28 insertions, 16 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 361f3706962f..789587731217 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -189,35 +189,30 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
}
static int hmm_invalidate_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ const struct mmu_notifier_range *range)
{
struct hmm_update update;
- struct hmm *hmm = mm->hmm;
+ struct hmm *hmm = range->mm->hmm;
VM_BUG_ON(!hmm);
- update.start = start;
- update.end = end;
+ update.start = range->start;
+ update.end = range->end;
update.event = HMM_UPDATE_INVALIDATE;
- update.blockable = blockable;
+ update.blockable = range->blockable;
return hmm_invalidate_range(hmm, true, &update);
}
static void hmm_invalidate_range_end(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+ const struct mmu_notifier_range *range)
{
struct hmm_update update;
- struct hmm *hmm = mm->hmm;
+ struct hmm *hmm = range->mm->hmm;
VM_BUG_ON(!hmm);
- update.start = start;
- update.end = end;
+ update.start = range->start;
+ update.end = range->end;
update.event = HMM_UPDATE_INVALIDATE;
update.blockable = true;
hmm_invalidate_range(hmm, false, &update);
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 755466cd289a..74a7dc3d11c8 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -171,14 +171,20 @@ int __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end,
bool blockable)
{
+ struct mmu_notifier_range _range, *range = &_range;
struct mmu_notifier *mn;
int ret = 0;
int id;
+ range->blockable = blockable;
+ range->start = start;
+ range->end = end;
+ range->mm = mm;
+
id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_range_start) {
- int _ret = mn->ops->invalidate_range_start(mn, mm, start, end, blockable);
+ int _ret = mn->ops->invalidate_range_start(mn, range);
if (_ret) {
pr_info("%pS callback failed with %d in %sblockable context.\n",
mn->ops->invalidate_range_start, _ret,
@@ -198,9 +204,20 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long end,
bool only_end)
{
+ struct mmu_notifier_range _range, *range = &_range;
struct mmu_notifier *mn;
int id;
+ /*
+ * The end call back will never be call if the start refused to go
+ * through because of blockable was false so here assume that we
+ * can block.
+ */
+ range->blockable = true;
+ range->start = start;
+ range->end = end;
+ range->mm = mm;
+
id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
/*
@@ -219,7 +236,7 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
if (!only_end && mn->ops->invalidate_range)
mn->ops->invalidate_range(mn, mm, start, end);
if (mn->ops->invalidate_range_end)
- mn->ops->invalidate_range_end(mn, mm, start, end);
+ mn->ops->invalidate_range_end(mn, range);
}
srcu_read_unlock(&srcu, id);
}