summaryrefslogtreecommitdiff
path: root/lib/scatterlist.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/scatterlist.c')
-rw-r--r--lib/scatterlist.c385
1 files changed, 351 insertions, 34 deletions
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 8d7519a8f308..4af1c8b0775a 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -9,29 +9,9 @@
#include <linux/scatterlist.h>
#include <linux/highmem.h>
#include <linux/kmemleak.h>
-
-/**
- * sg_next - return the next scatterlist entry in a list
- * @sg: The current sg entry
- *
- * Description:
- * Usually the next entry will be @sg@ + 1, but if this sg element is part
- * of a chained scatterlist, it could jump to the start of a new
- * scatterlist array.
- *
- **/
-struct scatterlist *sg_next(struct scatterlist *sg)
-{
- if (sg_is_last(sg))
- return NULL;
-
- sg++;
- if (unlikely(sg_is_chain(sg)))
- sg = sg_chain_ptr(sg);
-
- return sg;
-}
-EXPORT_SYMBOL(sg_next);
+#include <linux/bvec.h>
+#include <linux/uio.h>
+#include <linux/folio_queue.h>
/**
* sg_nents - return total count of entries in scatterlist
@@ -93,9 +73,9 @@ EXPORT_SYMBOL(sg_nents_for_len);
* Should only be used casually, it (currently) scans the entire list
* to get the last entry.
*
- * Note that the @sgl@ pointer passed in need not be the first one,
- * the important bit is that @nents@ denotes the number of entries that
- * exist from @sgl@.
+ * Note that the @sgl pointer passed in need not be the first one,
+ * the important bit is that @nents denotes the number of entries that
+ * exist from @sgl.
*
**/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
@@ -263,7 +243,8 @@ EXPORT_SYMBOL(sg_free_table);
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @max_ents: The maximum number of entries the allocator returns per call
- * @nents_first_chunk: Number of entries int the (preallocated) first
+ * @first_chunk: first SGL if preallocated (may be %NULL)
+ * @nents_first_chunk: Number of entries in the (preallocated) first
* scatterlist chunk, 0 means no such preallocated chunk provided by user
* @gfp_mask: GFP allocation mask
* @alloc_fn: Allocator to use
@@ -364,7 +345,7 @@ EXPORT_SYMBOL(__sg_alloc_table);
* @gfp_mask: GFP allocation mask
*
* Description:
- * Allocate and initialize an sg table. If @nents@ is larger than
+ * Allocate and initialize an sg table. If @nents is larger than
* SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
*
**/
@@ -470,14 +451,14 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
return -EOPNOTSUPP;
if (sgt_append->prv) {
- unsigned long next_pfn = (page_to_phys(sg_page(sgt_append->prv)) +
- sgt_append->prv->offset + sgt_append->prv->length) / PAGE_SIZE;
+ unsigned long next_pfn;
if (WARN_ON(offset))
return -EINVAL;
/* Merge contiguous pages into the last SG */
prv_len = sgt_append->prv->length;
+ next_pfn = (sg_phys(sgt_append->prv) + prv_len) / PAGE_SIZE;
if (page_to_pfn(pages[0]) == next_pfn) {
last_pg = pfn_to_page(next_pfn - 1);
while (n_pages && pages_are_mergeable(pages[0], last_pg)) {
@@ -786,6 +767,7 @@ EXPORT_SYMBOL(__sg_page_iter_dma_next);
* @miter: sg mapping iter to be started
* @sgl: sg list to iterate over
* @nents: number of sg entries
+ * @flags: sg iterator flags
*
* Description:
* Starts mapping iterator @miter.
@@ -874,7 +856,7 @@ EXPORT_SYMBOL(sg_miter_skip);
* @miter->addr and @miter->length point to the current mapping.
*
* Context:
- * May sleep if !SG_MITER_ATOMIC.
+ * May sleep if !SG_MITER_ATOMIC && !SG_MITER_LOCAL.
*
* Returns:
* true if @miter contains the next mapping. false if end of sg
@@ -896,6 +878,8 @@ bool sg_miter_next(struct sg_mapping_iter *miter)
if (miter->__flags & SG_MITER_ATOMIC)
miter->addr = kmap_atomic(miter->page) + miter->__offset;
+ else if (miter->__flags & SG_MITER_LOCAL)
+ miter->addr = kmap_local_page(miter->page) + miter->__offset;
else
miter->addr = kmap(miter->page) + miter->__offset;
@@ -931,7 +915,9 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
if (miter->__flags & SG_MITER_ATOMIC) {
WARN_ON_ONCE(!pagefault_disabled());
kunmap_atomic(miter->addr);
- } else
+ } else if (miter->__flags & SG_MITER_LOCAL)
+ kunmap_local(miter->addr);
+ else
kunmap(miter->page);
miter->page = NULL;
@@ -960,7 +946,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
{
unsigned int offset = 0;
struct sg_mapping_iter miter;
- unsigned int sg_flags = SG_MITER_ATOMIC;
+ unsigned int sg_flags = SG_MITER_LOCAL;
if (to_buffer)
sg_flags |= SG_MITER_FROM_SG;
@@ -1075,7 +1061,7 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
{
unsigned int offset = 0;
struct sg_mapping_iter miter;
- unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
+ unsigned int sg_flags = SG_MITER_LOCAL | SG_MITER_TO_SG;
sg_miter_start(&miter, sgl, nents, sg_flags);
@@ -1095,3 +1081,334 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
return offset;
}
EXPORT_SYMBOL(sg_zero_buffer);
+
+/*
+ * Extract and pin a list of up to sg_max pages from UBUF- or IOVEC-class
+ * iterators, and add them to the scatterlist.
+ */
+static ssize_t extract_user_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ struct page **pages;
+ unsigned int npages;
+ ssize_t ret = 0, res;
+ size_t len, off;
+
+ /* We decant the page list into the tail of the scatterlist */
+ pages = (void *)sgtable->sgl +
+ array_size(sg_max, sizeof(struct scatterlist));
+ pages -= sg_max;
+
+ do {
+ res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max,
+ extraction_flags, &off);
+ if (res <= 0)
+ goto failed;
+
+ len = res;
+ maxsize -= len;
+ ret += len;
+ npages = DIV_ROUND_UP(off + len, PAGE_SIZE);
+ sg_max -= npages;
+
+ for (; npages > 0; npages--) {
+ struct page *page = *pages;
+ size_t seg = min_t(size_t, PAGE_SIZE - off, len);
+
+ *pages++ = NULL;
+ sg_set_page(sg, page, seg, off);
+ sgtable->nents++;
+ sg++;
+ len -= seg;
+ off = 0;
+ }
+ } while (maxsize > 0 && sg_max > 0);
+
+ return ret;
+
+failed:
+ while (sgtable->nents > sgtable->orig_nents)
+ unpin_user_page(sg_page(&sgtable->sgl[--sgtable->nents]));
+ return res;
+}
+
+/*
+ * Extract up to sg_max pages from a BVEC-type iterator and add them to the
+ * scatterlist. The pages are not pinned.
+ */
+static ssize_t extract_bvec_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ const struct bio_vec *bv = iter->bvec;
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ unsigned long start = iter->iov_offset;
+ unsigned int i;
+ ssize_t ret = 0;
+
+ for (i = 0; i < iter->nr_segs; i++) {
+ size_t off, len;
+
+ len = bv[i].bv_len;
+ if (start >= len) {
+ start -= len;
+ continue;
+ }
+
+ len = min_t(size_t, maxsize, len - start);
+ off = bv[i].bv_offset + start;
+
+ sg_set_page(sg, bv[i].bv_page, len, off);
+ sgtable->nents++;
+ sg++;
+ sg_max--;
+
+ ret += len;
+ maxsize -= len;
+ if (maxsize <= 0 || sg_max == 0)
+ break;
+ start = 0;
+ }
+
+ if (ret > 0)
+ iov_iter_advance(iter, ret);
+ return ret;
+}
+
+/*
+ * Extract up to sg_max pages from a KVEC-type iterator and add them to the
+ * scatterlist. This can deal with vmalloc'd buffers as well as kmalloc'd or
+ * static buffers. The pages are not pinned.
+ */
+static ssize_t extract_kvec_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ const struct kvec *kv = iter->kvec;
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ unsigned long start = iter->iov_offset;
+ unsigned int i;
+ ssize_t ret = 0;
+
+ for (i = 0; i < iter->nr_segs; i++) {
+ struct page *page;
+ unsigned long kaddr;
+ size_t off, len, seg;
+
+ len = kv[i].iov_len;
+ if (start >= len) {
+ start -= len;
+ continue;
+ }
+
+ kaddr = (unsigned long)kv[i].iov_base + start;
+ off = kaddr & ~PAGE_MASK;
+ len = min_t(size_t, maxsize, len - start);
+ kaddr &= PAGE_MASK;
+
+ maxsize -= len;
+ ret += len;
+ do {
+ seg = min_t(size_t, len, PAGE_SIZE - off);
+ if (is_vmalloc_or_module_addr((void *)kaddr))
+ page = vmalloc_to_page((void *)kaddr);
+ else
+ page = virt_to_page((void *)kaddr);
+
+ sg_set_page(sg, page, len, off);
+ sgtable->nents++;
+ sg++;
+ sg_max--;
+
+ len -= seg;
+ kaddr += PAGE_SIZE;
+ off = 0;
+ } while (len > 0 && sg_max > 0);
+
+ if (maxsize <= 0 || sg_max == 0)
+ break;
+ start = 0;
+ }
+
+ if (ret > 0)
+ iov_iter_advance(iter, ret);
+ return ret;
+}
+
+/*
+ * Extract up to sg_max folios from an FOLIOQ-type iterator and add them to
+ * the scatterlist. The pages are not pinned.
+ */
+static ssize_t extract_folioq_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ const struct folio_queue *folioq = iter->folioq;
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ unsigned int slot = iter->folioq_slot;
+ ssize_t ret = 0;
+ size_t offset = iter->iov_offset;
+
+ BUG_ON(!folioq);
+
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = folioq->next;
+ if (WARN_ON_ONCE(!folioq))
+ return 0;
+ slot = 0;
+ }
+
+ do {
+ struct folio *folio = folioq_folio(folioq, slot);
+ size_t fsize = folioq_folio_size(folioq, slot);
+
+ if (offset < fsize) {
+ size_t part = umin(maxsize - ret, fsize - offset);
+
+ sg_set_page(sg, folio_page(folio, 0), part, offset);
+ sgtable->nents++;
+ sg++;
+ sg_max--;
+ offset += part;
+ ret += part;
+ }
+
+ if (offset >= fsize) {
+ offset = 0;
+ slot++;
+ if (slot >= folioq_nr_slots(folioq)) {
+ if (!folioq->next) {
+ WARN_ON_ONCE(ret < iter->count);
+ break;
+ }
+ folioq = folioq->next;
+ slot = 0;
+ }
+ }
+ } while (sg_max > 0 && ret < maxsize);
+
+ iter->folioq = folioq;
+ iter->folioq_slot = slot;
+ iter->iov_offset = offset;
+ iter->count -= ret;
+ return ret;
+}
+
+/*
+ * Extract up to sg_max folios from an XARRAY-type iterator and add them to
+ * the scatterlist. The pages are not pinned.
+ */
+static ssize_t extract_xarray_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ struct xarray *xa = iter->xarray;
+ struct folio *folio;
+ loff_t start = iter->xarray_start + iter->iov_offset;
+ pgoff_t index = start / PAGE_SIZE;
+ ssize_t ret = 0;
+ size_t offset, len;
+ XA_STATE(xas, xa, index);
+
+ rcu_read_lock();
+
+ xas_for_each(&xas, folio, ULONG_MAX) {
+ if (xas_retry(&xas, folio))
+ continue;
+ if (WARN_ON(xa_is_value(folio)))
+ break;
+ if (WARN_ON(folio_test_hugetlb(folio)))
+ break;
+
+ offset = offset_in_folio(folio, start);
+ len = min_t(size_t, maxsize, folio_size(folio) - offset);
+
+ sg_set_page(sg, folio_page(folio, 0), len, offset);
+ sgtable->nents++;
+ sg++;
+ sg_max--;
+
+ maxsize -= len;
+ ret += len;
+ if (maxsize <= 0 || sg_max == 0)
+ break;
+ }
+
+ rcu_read_unlock();
+ if (ret > 0)
+ iov_iter_advance(iter, ret);
+ return ret;
+}
+
+/**
+ * extract_iter_to_sg - Extract pages from an iterator and add to an sglist
+ * @iter: The iterator to extract from
+ * @maxsize: The amount of iterator to copy
+ * @sgtable: The scatterlist table to fill in
+ * @sg_max: Maximum number of elements in @sgtable that may be filled
+ * @extraction_flags: Flags to qualify the request
+ *
+ * Extract the page fragments from the given amount of the source iterator and
+ * add them to a scatterlist that refers to all of those bits, to a maximum
+ * addition of @sg_max elements.
+ *
+ * The pages referred to by UBUF- and IOVEC-type iterators are extracted and
+ * pinned; BVEC-, KVEC-, FOLIOQ- and XARRAY-type are extracted but aren't
+ * pinned; DISCARD-type is not supported.
+ *
+ * No end mark is placed on the scatterlist; that's left to the caller.
+ *
+ * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
+ * be allowed on the pages extracted.
+ *
+ * If successful, @sgtable->nents is updated to include the number of elements
+ * added and the number of bytes added is returned. @sgtable->orig_nents is
+ * left unaltered.
+ *
+ * The iov_iter_extract_mode() function should be used to query how cleanup
+ * should be performed.
+ */
+ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t maxsize,
+ struct sg_table *sgtable, unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ if (maxsize == 0)
+ return 0;
+
+ switch (iov_iter_type(iter)) {
+ case ITER_UBUF:
+ case ITER_IOVEC:
+ return extract_user_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
+ case ITER_BVEC:
+ return extract_bvec_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
+ case ITER_KVEC:
+ return extract_kvec_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
+ case ITER_FOLIOQ:
+ return extract_folioq_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
+ case ITER_XARRAY:
+ return extract_xarray_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
+ default:
+ pr_err("%s(%u) unsupported\n", __func__, iov_iter_type(iter));
+ WARN_ON_ONCE(1);
+ return -EIO;
+ }
+}
+EXPORT_SYMBOL_GPL(extract_iter_to_sg);