summaryrefslogtreecommitdiff
path: root/lib/scatterlist.c
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@wdc.com>2018-01-19 11:00:54 -0800
committerJens Axboe <axboe@kernel.dk>2018-01-19 12:31:03 -0700
commit8c7a8d1c4b9c30a2be3b31a2e6af1cefd45574eb (patch)
tree0a539f04d39495479b9a4965ace69aad45c01cf9 /lib/scatterlist.c
parent9b9c63f71bddf3dc897aaaa46c0593abc3e09932 (diff)
lib/scatterlist: Fix chaining support in sgl_alloc_order()
This patch avoids that workloads with large block sizes (megabytes) can trigger the following call stack with the ib_srpt driver (that driver is the only driver that chains scatterlists allocated by sgl_alloc_order()): BUG: Bad page state in process kworker/0:1H pfn:2423a78 page:fffffb03d08e9e00 count:-3 mapcount:0 mapping: (null) index:0x0 flags: 0x57ffffc0000000() raw: 0057ffffc0000000 0000000000000000 0000000000000000 fffffffdffffffff raw: dead000000000100 dead000000000200 0000000000000000 0000000000000000 page dumped because: nonzero _count CPU: 0 PID: 733 Comm: kworker/0:1H Tainted: G I 4.15.0-rc7.bart+ #1 Hardware name: HP ProLiant DL380 G7, BIOS P67 08/16/2015 Workqueue: ib-comp-wq ib_cq_poll_work [ib_core] Call Trace: dump_stack+0x5c/0x83 bad_page+0xf5/0x10f get_page_from_freelist+0xa46/0x11b0 __alloc_pages_nodemask+0x103/0x290 sgl_alloc_order+0x101/0x180 target_alloc_sgl+0x2c/0x40 [target_core_mod] srpt_alloc_rw_ctxs+0x173/0x2d0 [ib_srpt] srpt_handle_new_iu+0x61e/0x7f0 [ib_srpt] __ib_process_cq+0x55/0xa0 [ib_core] ib_cq_poll_work+0x1b/0x60 [ib_core] process_one_work+0x141/0x340 worker_thread+0x47/0x3e0 kthread+0xf5/0x130 ret_from_fork+0x1f/0x30 Fixes: e80a0af4759a ("lib/scatterlist: Introduce sgl_alloc() and sgl_free()") Reported-by: Laurence Oberman <loberman@redhat.com> Tested-by: Laurence Oberman <loberman@redhat.com> Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Cc: Nicholas A. Bellinger <nab@linux-iscsi.org> Cc: Laurence Oberman <loberman@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'lib/scatterlist.c')
-rw-r--r--lib/scatterlist.c32
1 files changed, 27 insertions, 5 deletions
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 9afc9b432083..53728d391d3a 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -512,7 +512,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
if (!sgl)
return NULL;
- sg_init_table(sgl, nent);
+ sg_init_table(sgl, nalloc);
sg = sgl;
while (length) {
elem_len = min_t(u64, length, PAGE_SIZE << order);
@@ -526,7 +526,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
length -= elem_len;
sg = sg_next(sg);
}
- WARN_ON_ONCE(sg);
+ WARN_ONCE(length, "length = %lld\n", length);
if (nent_p)
*nent_p = nent;
return sgl;
@@ -549,22 +549,44 @@ struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
EXPORT_SYMBOL(sgl_alloc);
/**
- * sgl_free_order - free a scatterlist and its pages
+ * sgl_free_n_order - free a scatterlist and its pages
* @sgl: Scatterlist with one or more elements
+ * @nents: Maximum number of elements to free
* @order: Second argument for __free_pages()
+ *
+ * Notes:
+ * - If several scatterlists have been chained and each chain element is
+ * freed separately then it's essential to set nents correctly to avoid that a
+ * page would get freed twice.
+ * - All pages in a chained scatterlist can be freed at once by setting @nents
+ * to a high number.
*/
-void sgl_free_order(struct scatterlist *sgl, int order)
+void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
{
struct scatterlist *sg;
struct page *page;
+ int i;
- for (sg = sgl; sg; sg = sg_next(sg)) {
+ for_each_sg(sgl, sg, nents, i) {
+ if (!sg)
+ break;
page = sg_page(sg);
if (page)
__free_pages(page, order);
}
kfree(sgl);
}
+EXPORT_SYMBOL(sgl_free_n_order);
+
+/**
+ * sgl_free_order - free a scatterlist and its pages
+ * @sgl: Scatterlist with one or more elements
+ * @order: Second argument for __free_pages()
+ */
+void sgl_free_order(struct scatterlist *sgl, int order)
+{
+ sgl_free_n_order(sgl, INT_MAX, order);
+}
EXPORT_SYMBOL(sgl_free_order);
/**