summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-03-09 13:48:03 -0500
committerMatthew Wilcox (Oracle) <willy@infradead.org>2021-10-18 07:49:41 -0400
commit121703c1c817b3c77f61002466d0bfca7e39f25d (patch)
treee71f6e481eec64c9f194fb1203c424bd51b714dc
parentb27652d935f41793c5e229a1e8b3a8bb3afe3cc1 (diff)
mm/writeback: Add folio_write_one
Transform write_one_page() into folio_write_one() and add a compatibility wrapper. Also move the declaration to pagemap.h as this is page cache functionality that doesn't need to be used by the rest of the kernel. Saves 58 bytes of kernel text. While folio_write_one() is 101 bytes smaller than write_one_page(), the inlined call to page_folio() expands each caller. There are fewer than ten callers so it doesn't seem worth putting a wrapper in the core. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Howells <dhowells@redhat.com>
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/pagemap.h5
-rw-r--r--mm/page-writeback.c30
3 files changed, 20 insertions, 19 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index efcb06fc6116..40ff114aaf9e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2788,10 +2788,6 @@ extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
-/* mm/page-writeback.c */
-int __must_check write_one_page(struct page *page);
-void task_dirty_inc(struct task_struct *tsk);
-
extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index b68b053e581f..013cdc90f5fd 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -803,6 +803,11 @@ static inline void cancel_dirty_page(struct page *page)
}
bool folio_clear_dirty_for_io(struct folio *folio);
bool clear_page_dirty_for_io(struct page *page);
+int __must_check folio_write_one(struct folio *folio);
+static inline int __must_check write_one_page(struct page *page)
+{
+ return folio_write_one(page_folio(page));
+}
int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a0c35a9d8029..9c64490171e0 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2381,44 +2381,44 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
}
/**
- * write_one_page - write out a single page and wait on I/O
- * @page: the page to write
+ * folio_write_one - write out a single folio and wait on I/O.
+ * @folio: The folio to write.
*
- * The page must be locked by the caller and will be unlocked upon return.
+ * The folio must be locked by the caller and will be unlocked upon return.
*
* Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this
* function returns.
*
* Return: %0 on success, negative error code otherwise
*/
-int write_one_page(struct page *page)
+int folio_write_one(struct folio *folio)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
int ret = 0;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
- .nr_to_write = 1,
+ .nr_to_write = folio_nr_pages(folio),
};
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
- if (clear_page_dirty_for_io(page)) {
- get_page(page);
- ret = mapping->a_ops->writepage(page, &wbc);
+ if (folio_clear_dirty_for_io(folio)) {
+ folio_get(folio);
+ ret = mapping->a_ops->writepage(&folio->page, &wbc);
if (ret == 0)
- wait_on_page_writeback(page);
- put_page(page);
+ folio_wait_writeback(folio);
+ folio_put(folio);
} else {
- unlock_page(page);
+ folio_unlock(folio);
}
if (!ret)
ret = filemap_check_errors(mapping);
return ret;
}
-EXPORT_SYMBOL(write_one_page);
+EXPORT_SYMBOL(folio_write_one);
/*
* For address_spaces which do not use buffers nor write back.