summaryrefslogtreecommitdiff
path: root/lib/kunit_iov_iter.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-09-16 12:13:31 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2024-09-16 12:13:31 +0200
commit35219bc5c71f4197c8bd10297597de797c1eece5 (patch)
tree2448156135b78f54cd341a8457ccd84a371ddac7 /lib/kunit_iov_iter.c
parent9020d0d844ad58a051f90b1e5b82ba34123925b9 (diff)
parent4b40d43d9f951d87ae8dc414c2ef5ae50303a266 (diff)
Merge tag 'vfs-6.12.netfs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
Pull netfs updates from Christian Brauner: "This contains the work to improve read/write performance for the new netfs library. The main performance enhancing changes are: - Define a structure, struct folio_queue, and a new iterator type, ITER_FOLIOQ, to hold a buffer as a replacement for ITER_XARRAY. See that patch for questions about naming and form. ITER_FOLIOQ is provided as a replacement for ITER_XARRAY. The problem with an xarray is that accessing it requires the use of a lock (typically the RCU read lock) - and this means that we can't supply iterate_and_advance() with a step function that might sleep (crypto for example) without having to drop the lock between pages. ITER_FOLIOQ is the iterator for a chain of folio_queue structs, where each folio_queue holds a small list of folios. A folio_queue struct is a simpler structure than xarray and is not subject to concurrent manipulation by the VM. folio_queue is used rather than a bvec[] as it can form lists of indefinite size, adding to one end and removing from the other on the fly. - Provide a copy_folio_from_iter() wrapper. - Make cifs RDMA support ITER_FOLIOQ. - Use folio queues in the write-side helpers instead of xarrays. - Add a function to reset the iterator in a subrequest. - Simplify the write-side helpers to use sheaves to skip gaps rather than trying to work out where gaps are. - In afs, make the read subrequests asynchronous, putting them into work items to allow the next patch to do progressive unlocking/reading. - Overhaul the read-side helpers to improve performance. - Fix the caching of a partial block at the end of a file. - Allow a store to be cancelled. Then some changes for cifs to make it use folio queues instead of xarrays for crypto bufferage: - Use raw iteration functions rather than manually coding iteration when hashing data. - Switch to using folio_queue for crypto buffers. - Remove the xarray bits. Make some adjustments to the /proc/fs/netfs/stats file such that: - All the netfs stats lines begin 'Netfs:' but change this to something a bit more useful. - Add a couple of stats counters to track the numbers of skips and waits on the per-inode writeback serialisation lock to make it easier to check for this as a source of performance loss. Miscellaneous work: - Ensure that the sb_writers lock is taken around vfs_{set,remove}xattr() in the cachefiles code. - Reduce the number of conditional branches in netfs_perform_write(). - Move the CIFS_INO_MODIFIED_ATTR flag to the netfs_inode struct and remove cifs_post_modify(). - Move the max_len/max_nr_segs members from netfs_io_subrequest to netfs_io_request as they're only needed for one subreq at a time. - Add an 'unknown' source value for tracing purposes. - Remove NETFS_COPY_TO_CACHE as it's no longer used. - Set the request work function up front at allocation time. - Use bh-disabling spinlocks for rreq->lock as cachefiles completion may be run from block-filesystem DIO completion in softirq context. - Remove fs/netfs/io.c" * tag 'vfs-6.12.netfs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: (25 commits) docs: filesystems: corrected grammar of netfs page cifs: Don't support ITER_XARRAY cifs: Switch crypto buffer to use a folio_queue rather than an xarray cifs: Use iterate_and_advance*() routines directly for hashing netfs: Cancel dirty folios that have no storage destination cachefiles, netfs: Fix write to partial block at EOF netfs: Remove fs/netfs/io.c netfs: Speed up buffered reading afs: Make read subreqs async netfs: Simplify the writeback code netfs: Provide an iterator-reset function netfs: Use new folio_queue data type and iterator instead of xarray iter cifs: Provide the capability to extract from ITER_FOLIOQ to RDMA SGEs iov_iter: Provide copy_folio_from_iter() mm: Define struct folio_queue and ITER_FOLIOQ to handle a sequence of folios netfs: Use bh-disabling spinlocks for rreq->lock netfs: Set the request work function upon allocation netfs: Remove NETFS_COPY_TO_CACHE netfs: Reserve netfs_sreq_source 0 as unset/unknown netfs: Move max_len/max_nr_segs from netfs_io_subrequest to netfs_io_stream ...
Diffstat (limited to 'lib/kunit_iov_iter.c')
-rw-r--r--lib/kunit_iov_iter.c259
1 files changed, 259 insertions, 0 deletions
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
index 27e0c8ee71d8..13e15687675a 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/kunit_iov_iter.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/bvec.h>
+#include <linux/folio_queue.h>
#include <kunit/test.h>
MODULE_DESCRIPTION("iov_iter testing");
@@ -62,6 +63,9 @@ static void *__init iov_kunit_create_buffer(struct kunit *test,
KUNIT_ASSERT_EQ(test, got, npages);
}
+ for (int i = 0; i < npages; i++)
+ pages[i]->index = i;
+
buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
@@ -362,6 +366,179 @@ stop:
KUNIT_SUCCEED(test);
}
+static void iov_kunit_destroy_folioq(void *data)
+{
+ struct folio_queue *folioq, *next;
+
+ for (folioq = data; folioq; folioq = next) {
+ next = folioq->next;
+ for (int i = 0; i < folioq_nr_slots(folioq); i++)
+ if (folioq_folio(folioq, i))
+ folio_put(folioq_folio(folioq, i));
+ kfree(folioq);
+ }
+}
+
+static void __init iov_kunit_load_folioq(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct folio_queue *folioq,
+ struct page **pages, size_t npages)
+{
+ struct folio_queue *p = folioq;
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < npages; i++) {
+ if (folioq_full(p)) {
+ p->next = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p->next);
+ folioq_init(p->next);
+ p->next->prev = p;
+ p = p->next;
+ }
+ folioq_append(p, page_folio(pages[i]));
+ size += PAGE_SIZE;
+ }
+ iov_iter_folio_queue(iter, dir, folioq, 0, 0, size);
+}
+
+static struct folio_queue *iov_kunit_create_folioq(struct kunit *test)
+{
+ struct folio_queue *folioq;
+
+ folioq = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, folioq);
+ kunit_add_action_or_reset(test, iov_kunit_destroy_folioq, folioq);
+ folioq_init(folioq);
+ return folioq;
+}
+
+/*
+ * Test copying to a ITER_FOLIOQ-type iterator.
+ */
+static void __init iov_kunit_copy_to_folioq(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct folio_queue *folioq;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ folioq = iov_kunit_create_folioq(test);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_folio_queue(&iter, READ, folioq, 0, 0, pr->to);
+ iov_iter_advance(&iter, pr->from);
+ copied = copy_to_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
+ i += size;
+ if (test->status == KUNIT_FAILURE)
+ goto stop;
+ }
+
+ /* Build the expected image in the scratch buffer. */
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++)
+ for (i = pr->from; i < pr->to; i++)
+ scratch[i] = pattern(patt++);
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+stop:
+ KUNIT_SUCCEED(test);
+}
+
+/*
+ * Test copying from a ITER_FOLIOQ-type iterator.
+ */
+static void __init iov_kunit_copy_from_folioq(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct folio_queue *folioq;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ folioq = iov_kunit_create_folioq(test);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
+ iov_iter_advance(&iter, pr->from);
+ copied = copy_from_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
+ i += size;
+ }
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED(test);
+}
+
static void iov_kunit_destroy_xarray(void *data)
{
struct xarray *xarray = data;
@@ -678,6 +855,85 @@ stop:
}
/*
+ * Test the extraction of ITER_FOLIOQ-type iterators.
+ */
+static void __init iov_kunit_extract_pages_folioq(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct folio_queue *folioq;
+ struct iov_iter iter;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ folioq = iov_kunit_create_folioq(test);
+
+ iov_kunit_create_buffer(test, &bpages, npages);
+ iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
+
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ from = pr->from;
+ size = pr->to - from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
+ iov_iter_advance(&iter, from);
+
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ if (len == 0)
+ break;
+ size -= len;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ ix = from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ goto stop;
+ } while (iov_iter_count(&iter) > 0);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ }
+
+stop:
+ KUNIT_SUCCEED(test);
+}
+
+/*
* Test the extraction of ITER_XARRAY-type iterators.
*/
static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
@@ -761,10 +1017,13 @@ static struct kunit_case __refdata iov_kunit_cases[] = {
KUNIT_CASE(iov_kunit_copy_from_kvec),
KUNIT_CASE(iov_kunit_copy_to_bvec),
KUNIT_CASE(iov_kunit_copy_from_bvec),
+ KUNIT_CASE(iov_kunit_copy_to_folioq),
+ KUNIT_CASE(iov_kunit_copy_from_folioq),
KUNIT_CASE(iov_kunit_copy_to_xarray),
KUNIT_CASE(iov_kunit_copy_from_xarray),
KUNIT_CASE(iov_kunit_extract_pages_kvec),
KUNIT_CASE(iov_kunit_extract_pages_bvec),
+ KUNIT_CASE(iov_kunit_extract_pages_folioq),
KUNIT_CASE(iov_kunit_extract_pages_xarray),
{}
};