summaryrefslogtreecommitdiff
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-02-03 09:06:08 -0500
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-03-21 12:59:02 -0400
commiteed05e54d275b3cfc5d8c79843c5276a5878e94a (patch)
treeec7411a6713c845154731e0cd66a55adc501e792 /mm/rmap.c
parentf087b903fc2e4975bff9742a66ee7a837a2f545b (diff)
mm: Add DEFINE_PAGE_VMA_WALK and DEFINE_FOLIO_VMA_WALK
Instead of declaring a struct page_vma_mapped_walk directly, use these helpers to allow us to transition to a PFN approach in the following patches. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c31
1 files changed, 5 insertions, 26 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 1a13d5d6cfc7..a7f06b76b503 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -802,11 +802,7 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct page_referenced_arg *pra = arg;
- struct page_vma_mapped_walk pvmw = {
- .page = page,
- .vma = vma,
- .address = address,
- };
+ DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
int referenced = 0;
while (page_vma_mapped_walk(&pvmw)) {
@@ -934,12 +930,7 @@ int page_referenced(struct page *page,
static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
- struct page_vma_mapped_walk pvmw = {
- .page = page,
- .vma = vma,
- .address = address,
- .flags = PVMW_SYNC,
- };
+ DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, PVMW_SYNC);
struct mmu_notifier_range range;
int *cleaned = arg;
@@ -1419,11 +1410,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
- struct page_vma_mapped_walk pvmw = {
- .page = page,
- .vma = vma,
- .address = address,
- };
+ DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
pte_t pteval;
struct page *subpage;
bool ret = true;
@@ -1714,11 +1701,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
- struct page_vma_mapped_walk pvmw = {
- .page = page,
- .vma = vma,
- .address = address,
- };
+ DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
pte_t pteval;
struct page *subpage;
bool ret = true;
@@ -2001,11 +1984,7 @@ static bool page_make_device_exclusive_one(struct page *page,
struct vm_area_struct *vma, unsigned long address, void *priv)
{
struct mm_struct *mm = vma->vm_mm;
- struct page_vma_mapped_walk pvmw = {
- .page = page,
- .vma = vma,
- .address = address,
- };
+ DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
struct make_exclusive_args *args = priv;
pte_t pteval;
struct page *subpage;