From be16dd764a69752a31096d1a6b2ad775b728b1bd Mon Sep 17 00:00:00 2001 From: Muhammad Muzammil Date: Mon, 23 Oct 2023 17:44:05 +0500 Subject: mm: fix multiple typos in multiple files Link: https://lkml.kernel.org/r/20231023124405.36981-1-m.muzzammilashraf@gmail.com Signed-off-by: Muhammad Muzammil Reviewed-by: Randy Dunlap Cc: "James E.J. Bottomley" Cc: Matthew Wilcox (Oracle) Cc: Muhammad Muzammil Signed-off-by: Andrew Morton --- mm/debug_vm_pgtable.c | 4 ++-- mm/internal.h | 2 +- mm/memcontrol.c | 4 ++-- mm/mmap.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index 48e329ea5ba3..e651500e597a 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -1322,8 +1322,8 @@ static int __init debug_vm_pgtable(void) * true irrespective of the starting protection value for a * given page table entry. * - * Protection based vm_flags combinatins are always linear - * and increasing i.e starting from VM_NONE and going upto + * Protection based vm_flags combinations are always linear + * and increasing i.e starting from VM_NONE and going up to * (VM_SHARED | READ | WRITE | EXEC). */ #define VM_FLAGS_START (VM_NONE) diff --git a/mm/internal.h b/mm/internal.h index c61a98d3b3c7..3eceae1ec4c0 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -592,7 +592,7 @@ extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, * range. * "fully mapped" means all the pages of folio is associated with the page * table of range while this function just check whether the folio range is - * within the range [start, end). Funcation caller nees to do page table + * within the range [start, end). Function caller needs to do page table * check if it cares about the page table association. * * Typical usage (like mlock or madvise) is: diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8b0859b8cc03..774bd6e21e27 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -819,7 +819,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, memcg = pn->memcg; /* - * The caller from rmap relay on disabled preemption becase they never + * The caller from rmap relies on disabled preemption because they never * update their counter from in-interrupt context. For these two * counters we check that the update is never performed from an * interrupt context while other caller need to have disabled interrupt. @@ -8044,7 +8044,7 @@ static struct cftype memsw_files[] = { * * This doesn't check for specific headroom, and it is not atomic * either. But with zswap, the size of the allocation is only known - * once compression has occured, and this optimistic pre-check avoids + * once compression has occurred, and this optimistic pre-check avoids * spending cycles on compression when there is already no room left * or zswap is disabled altogether somewhere in the hierarchy. */ diff --git a/mm/mmap.c b/mm/mmap.c index 8b57e42fd980..984804d77ae1 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1223,7 +1223,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, * Does the application expect PROT_READ to imply PROT_EXEC? * * (the exception is when the underlying filesystem is noexec - * mounted, in which case we dont add PROT_EXEC.) + * mounted, in which case we don't add PROT_EXEC.) */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) if (!(file && path_noexec(&file->f_path))) -- cgit