summaryrefslogtreecommitdiff
path: root/mm/mmu_gather.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-11-09 12:30:50 -0800
committerAndrew Morton <akpm@linux-foundation.org>2022-11-30 15:58:50 -0800
commit7cc8f9c7146a5c2dad6e71653c4f69972e73df6b (patch)
treec5ef2891eb00057626c9039cd826cda6de683419 /mm/mmu_gather.c
parent449c796768c9a1c738d1fa8671fb01663380b8a7 (diff)
mm: mmu_gather: prepare to gather encoded page pointers with flags
This is purely a preparatory patch that makes all the data structures ready for encoding flags with the mmu_gather page pointers. The code currently always sets the flag to zero and doesn't use it yet, but now it's tracking the type state along. The next step will be to actually start using it. Link: https://lkml.kernel.org/r/20221109203051.1835763-3-torvalds@linux-foundation.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/mmu_gather.c')
-rw-r--r--mm/mmu_gather.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 3a2c3f8cad2f..382581c4a9f6 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -48,7 +48,7 @@ static void tlb_batch_pages_flush(struct mmu_gather *tlb)
struct mmu_gather_batch *batch;
for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
- struct page **pages = batch->pages;
+ struct encoded_page **pages = batch->encoded_pages;
do {
/*
@@ -77,7 +77,7 @@ static void tlb_batch_list_free(struct mmu_gather *tlb)
tlb->local.next = NULL;
}
-bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
+bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, int page_size)
{
struct mmu_gather_batch *batch;
@@ -92,13 +92,13 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
* Add the page and check if we are full. If so
* force a flush.
*/
- batch->pages[batch->nr++] = page;
+ batch->encoded_pages[batch->nr++] = page;
if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb))
return true;
batch = tlb->active;
}
- VM_BUG_ON_PAGE(batch->nr > batch->max, page);
+ VM_BUG_ON_PAGE(batch->nr > batch->max, encoded_page_ptr(page));
return false;
}