summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/ll_rw_blk.c1
-rw-r--r--fs/splice.c17
2 files changed, 9 insertions, 9 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 5873861e1dbb..d99d402953a3 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2558,6 +2558,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
bio->bi_rw |= (1 << BIO_RW);
blk_rq_bio_prep(q, rq, bio);
+ blk_queue_bounce(q, &rq->bio);
rq->buffer = rq->data = NULL;
return 0;
}
diff --git a/fs/splice.c b/fs/splice.c
index 5428b0ff3b6f..12f28281d2b1 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -289,12 +289,10 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
nr_pages = PIPE_BUFFERS;
/*
- * Initiate read-ahead on this page range. however, don't call into
- * read-ahead if this is a non-zero offset (we are likely doing small
- * chunk splice and the page is already there) for a single page.
+ * Don't try to 2nd guess the read-ahead logic, call into
+ * page_cache_readahead() like the page cache reads would do.
*/
- if (!loff || nr_pages > 1)
- page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages);
+ page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages);
/*
* Now fill in the holes:
@@ -378,10 +376,11 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
* If in nonblock mode then dont block on waiting
* for an in-flight io page
*/
- if (flags & SPLICE_F_NONBLOCK)
- break;
-
- lock_page(page);
+ if (flags & SPLICE_F_NONBLOCK) {
+ if (TestSetPageLocked(page))
+ break;
+ } else
+ lock_page(page);
/*
* page was truncated, stop here. if this isn't the