summaryrefslogtreecommitdiff
path: root/arch/um
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-10-15 12:56:21 +0200
committerJens Axboe <jaxboe@fusionio.com>2010-10-15 12:56:21 +0200
commit47526903feb52f4c26a6350370bdf74e337fcdb1 (patch)
tree319aaf924a030ccbb680817128606defd33ac476 /arch/um
parent93055c31045a2d5599ec613a0c6cdcefc481a460 (diff)
ubd: fix incorrect sector handling during request restart
Commit f81f2f7c (ubd: drop unnecessary rq->sector manipulation) dropped request->sector manipulation in preparation for global request handling cleanup; unfortunately, it incorrectly assumed that the updated sector wasn't being used. ubd tries to issue as many requests as possible to io_thread. When issuing fails due to memory pressure or other reasons, the device is put on the restart list and issuing stops. On IO completion, devices on the restart list are scanned and IO issuing is restarted. ubd issues IOs sg-by-sg and issuing can be stopped in the middle of a request, so each device on the restart queue needs to remember where to restart in its current request. ubd needs to keep track of the issue position itself because, * blk_rq_pos(req) is now updated by the block layer to keep track of _completion_ position. * Multiple io_req's for the current request may be in flight, so it's difficult to tell where blk_rq_pos(req) currently is. Add ubd->rq_pos to keep track of the issue position and use it to correctly restart io_req issue. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Richard Weinberger <richard@nod.at> Tested-by: Richard Weinberger <richard@nod.at> Tested-by: Chris Frey <cdfrey@foursquare.net> Cc: stable@kernel.org Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'arch/um')
-rw-r--r--arch/um/drivers/ubd_kern.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 1bcd208c459f..9734994cba1e 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -163,6 +163,7 @@ struct ubd {
struct scatterlist sg[MAX_SG];
struct request *request;
int start_sg, end_sg;
+ sector_t rq_pos;
};
#define DEFAULT_COW { \
@@ -187,6 +188,7 @@ struct ubd {
.request = NULL, \
.start_sg = 0, \
.end_sg = 0, \
+ .rq_pos = 0, \
}
/* Protected by ubd_lock */
@@ -1228,7 +1230,6 @@ static void do_ubd_request(struct request_queue *q)
{
struct io_thread_req *io_req;
struct request *req;
- sector_t sector;
int n;
while(1){
@@ -1239,12 +1240,12 @@ static void do_ubd_request(struct request_queue *q)
return;
dev->request = req;
+ dev->rq_pos = blk_rq_pos(req);
dev->start_sg = 0;
dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
}
req = dev->request;
- sector = blk_rq_pos(req);
while(dev->start_sg < dev->end_sg){
struct scatterlist *sg = &dev->sg[dev->start_sg];
@@ -1256,10 +1257,9 @@ static void do_ubd_request(struct request_queue *q)
return;
}
prepare_request(req, io_req,
- (unsigned long long)sector << 9,
+ (unsigned long long)dev->rq_pos << 9,
sg->offset, sg->length, sg_page(sg));
- sector += sg->length >> 9;
n = os_write_file(thread_fd, &io_req,
sizeof(struct io_thread_req *));
if(n != sizeof(struct io_thread_req *)){
@@ -1272,6 +1272,7 @@ static void do_ubd_request(struct request_queue *q)
return;
}
+ dev->rq_pos += sg->length >> 9;
dev->start_sg++;
}
dev->end_sg = 0;