summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorQuanfa Fu <fuqf0919@gmail.com>2022-01-14 14:09:25 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-15 16:30:31 +0200
commit0b8f0d870020dbd7037bfacbb73a9b3213470f90 (patch)
treed74c462f6a8f592e0d9d79db405777f5ca591b67 /mm
parent7f0d267243aa9dd32944bd7d3b34afff60545edb (diff)
mm: fix some comment errors
Link: https://lkml.kernel.org/r/20211101040208.460810-1-fuqf0919@gmail.com Signed-off-by: Quanfa Fu <fuqf0919@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/khugepaged.c2
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/swap.c2
4 files changed, 4 insertions, 4 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 02071f213c58..7af84bac6fc2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1303,7 +1303,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
/*
* Record which node the original page is from and save this
* information to khugepaged_node_load[].
- * Khupaged will allocate hugepage from the node has the max
+ * Khugepaged will allocate hugepage from the node has the max
* hit record.
*/
node = page_to_nid(page);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 6a2b4b86b679..373837bb94cb 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1306,7 +1306,7 @@ static int __get_unpoison_page(struct page *page)
*
* get_hwpoison_page() takes a page refcount of an error page to handle memory
* error on it, after checking that the error page is in a well-defined state
- * (defined as a page-type we can successfully handle the memor error on it,
+ * (defined as a page-type we can successfully handle the memory error on it,
* such as LRU page and hugetlb page).
*
* Memory error handling could be triggered at any time on any type of page,
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 1f75bd4e95d6..9513244457e6 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -819,7 +819,7 @@ void __init setup_kmalloc_cache_index_table(void)
if (KMALLOC_MIN_SIZE >= 64) {
/*
- * The 96 byte size cache is not used if the alignment
+ * The 96 byte sized cache is not used if the alignment
* is 64 byte.
*/
for (i = 64 + 8; i <= 96; i += 8)
diff --git a/mm/swap.c b/mm/swap.c
index e8c9dc6d0377..b461814ce0cb 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -882,7 +882,7 @@ void lru_cache_disable(void)
* all online CPUs so any calls of lru_cache_disabled wrapped by
* local_lock or preemption disabled would be ordered by that.
* The atomic operation doesn't need to have stronger ordering
- * requirements because that is enforeced by the scheduling
+ * requirements because that is enforced by the scheduling
* guarantees.
*/
__lru_add_drain_all(true);