diff options
Diffstat (limited to 'drivers/block/xen-blkback/blkback.c')
| -rw-r--r-- | drivers/block/xen-blkback/blkback.c | 181 |
1 files changed, 136 insertions, 45 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 14e452896d04..a7c2b04ab943 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -239,9 +239,9 @@ static void put_persistent_gnt(struct xen_blkif_ring *ring, atomic_dec(&ring->persistent_gnt_in_use); } -static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root, - unsigned int num) +static void free_persistent_gnts(struct xen_blkif_ring *ring) { + struct rb_root *root = &ring->persistent_gnts; struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt; @@ -249,6 +249,9 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro int segs_to_unmap = 0; struct gntab_unmap_queue_data unmap_data; + if (RB_EMPTY_ROOT(root)) + return; + unmap_data.pages = pages; unmap_data.unmap_ops = unmap; unmap_data.kunmap_ops = NULL; @@ -277,9 +280,11 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro rb_erase(&persistent_gnt->node, root); kfree(persistent_gnt); - num--; + ring->persistent_gnt_c--; } - BUG_ON(num != 0); + + BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts)); + BUG_ON(ring->persistent_gnt_c != 0); } void xen_blkbk_unmap_purged_grants(struct work_struct *work) @@ -442,7 +447,7 @@ static void free_req(struct xen_blkif_ring *ring, struct pending_req *req) * Routines for managing virtual block devices (vbds). */ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, - int operation) + enum req_op operation) { struct xen_vbd *vbd = &blkif->vbd; int rc = -EACCES; @@ -460,7 +465,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, } req->dev = vbd->pdevice; - req->bdev = vbd->bdev; + req->bdev = file_bdev(vbd->bdev_file); rc = 0; out: @@ -539,7 +544,7 @@ static void print_stats(struct xen_blkif_ring *ring) ring->st_rd_req, ring->st_wr_req, ring->st_f_req, ring->st_ds_req, ring->persistent_gnt_c, max_pgrants); - ring->st_print = jiffies + msecs_to_jiffies(10 * 1000); + ring->st_print = jiffies + secs_to_jiffies(10); ring->st_rd_req = 0; ring->st_wr_req = 0; ring->st_oo_req = 0; @@ -631,12 +636,7 @@ purge_gnt_list: void xen_blkbk_free_caches(struct xen_blkif_ring *ring) { /* Free all persistent grant pages */ - if (!RB_EMPTY_ROOT(&ring->persistent_gnts)) - free_persistent_gnts(ring, &ring->persistent_gnts, - ring->persistent_gnt_c); - - BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts)); - ring->persistent_gnt_c = 0; + free_persistent_gnts(ring); /* Since we are shutting down remove all pages from the buffer */ gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */); @@ -891,7 +891,7 @@ next: out: for (i = last_map; i < num; i++) { /* Don't zap current batch's valid persistent grants. */ - if(i >= map_until) + if (i >= map_until) pages[i]->persistent_gnt = NULL; pages[i]->handle = BLKBACK_INVALID_HANDLE; } @@ -931,7 +931,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, if (rc) goto unmap; - for (n = 0, i = 0; n < nseg; n++) { + for (n = 0; n < nseg; n++) { uint8_t first_sect, last_sect; if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { @@ -969,8 +969,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring, int err = 0; int status = BLKIF_RSP_OKAY; struct xen_blkif *blkif = ring->blkif; - struct block_device *bdev = blkif->vbd.bdev; - unsigned long secure; + struct block_device *bdev = file_bdev(blkif->vbd.bdev_file); struct phys_req preq; xen_blkif_get(blkif); @@ -987,13 +986,15 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring, } ring->st_ds_req++; - secure = (blkif->vbd.discard_secure && - (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? - BLKDEV_DISCARD_SECURE : 0; + if (blkif->vbd.discard_secure && + (req->u.discard.flag & BLKIF_DISCARD_SECURE)) + err = blkdev_issue_secure_erase(bdev, + req->u.discard.sector_number, + req->u.discard.nr_sectors, GFP_KERNEL); + else + err = blkdev_issue_discard(bdev, req->u.discard.sector_number, + req->u.discard.nr_sectors, GFP_KERNEL); - err = blkdev_issue_discard(bdev, req->u.discard.sector_number, - req->u.discard.nr_sectors, - GFP_KERNEL, secure); fail_response: if (err == -EOPNOTSUPP) { pr_debug("discard op failed, not supported\n"); @@ -1071,7 +1072,111 @@ static void end_block_io_op(struct bio *bio) bio_put(bio); } +static void blkif_get_x86_32_req(struct blkif_request *dst, + const struct blkif_x86_32_request *src) +{ + unsigned int i, n; + + dst->operation = READ_ONCE(src->operation); + + switch (dst->operation) { + case BLKIF_OP_READ: + case BLKIF_OP_WRITE: + case BLKIF_OP_WRITE_BARRIER: + case BLKIF_OP_FLUSH_DISKCACHE: + dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments); + dst->u.rw.handle = src->u.rw.handle; + dst->u.rw.id = src->u.rw.id; + dst->u.rw.sector_number = src->u.rw.sector_number; + n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST, + dst->u.rw.nr_segments); + for (i = 0; i < n; i++) + dst->u.rw.seg[i] = src->u.rw.seg[i]; + break; + + case BLKIF_OP_DISCARD: + dst->u.discard.flag = src->u.discard.flag; + dst->u.discard.id = src->u.discard.id; + dst->u.discard.sector_number = src->u.discard.sector_number; + dst->u.discard.nr_sectors = src->u.discard.nr_sectors; + break; + + case BLKIF_OP_INDIRECT: + dst->u.indirect.indirect_op = src->u.indirect.indirect_op; + dst->u.indirect.nr_segments = + READ_ONCE(src->u.indirect.nr_segments); + dst->u.indirect.handle = src->u.indirect.handle; + dst->u.indirect.id = src->u.indirect.id; + dst->u.indirect.sector_number = src->u.indirect.sector_number; + n = min(MAX_INDIRECT_PAGES, + INDIRECT_PAGES(dst->u.indirect.nr_segments)); + for (i = 0; i < n; i++) + dst->u.indirect.indirect_grefs[i] = + src->u.indirect.indirect_grefs[i]; + break; + + default: + /* + * Don't know how to translate this op. Only get the + * ID so failure can be reported to the frontend. + */ + dst->u.other.id = src->u.other.id; + break; + } +} + +static void blkif_get_x86_64_req(struct blkif_request *dst, + const struct blkif_x86_64_request *src) +{ + unsigned int i, n; + dst->operation = READ_ONCE(src->operation); + + switch (dst->operation) { + case BLKIF_OP_READ: + case BLKIF_OP_WRITE: + case BLKIF_OP_WRITE_BARRIER: + case BLKIF_OP_FLUSH_DISKCACHE: + dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments); + dst->u.rw.handle = src->u.rw.handle; + dst->u.rw.id = src->u.rw.id; + dst->u.rw.sector_number = src->u.rw.sector_number; + n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST, + dst->u.rw.nr_segments); + for (i = 0; i < n; i++) + dst->u.rw.seg[i] = src->u.rw.seg[i]; + break; + + case BLKIF_OP_DISCARD: + dst->u.discard.flag = src->u.discard.flag; + dst->u.discard.id = src->u.discard.id; + dst->u.discard.sector_number = src->u.discard.sector_number; + dst->u.discard.nr_sectors = src->u.discard.nr_sectors; + break; + + case BLKIF_OP_INDIRECT: + dst->u.indirect.indirect_op = src->u.indirect.indirect_op; + dst->u.indirect.nr_segments = + READ_ONCE(src->u.indirect.nr_segments); + dst->u.indirect.handle = src->u.indirect.handle; + dst->u.indirect.id = src->u.indirect.id; + dst->u.indirect.sector_number = src->u.indirect.sector_number; + n = min(MAX_INDIRECT_PAGES, + INDIRECT_PAGES(dst->u.indirect.nr_segments)); + for (i = 0; i < n; i++) + dst->u.indirect.indirect_grefs[i] = + src->u.indirect.indirect_grefs[i]; + break; + + default: + /* + * Don't know how to translate this op. Only get the + * ID so failure can be reported to the frontend. + */ + dst->u.other.id = src->u.other.id; + break; + } +} /* * Function to copy the from the ring buffer the 'struct blkif_request' @@ -1192,8 +1297,8 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, struct bio *bio = NULL; struct bio **biolist = pending_req->biolist; int i, nbio = 0; - int operation; - int operation_flags = 0; + enum req_op operation; + blk_opf_t operation_flags = 0; struct blk_plug plug; bool drain = false; struct grant_page **pages = pending_req->segments; @@ -1326,16 +1431,13 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, pages[i]->page, seg[i].nsec << 9, seg[i].offset) == 0)) { - bio = bio_alloc(GFP_KERNEL, bio_max_segs(nseg - i)); - if (unlikely(bio == NULL)) - goto fail_put_bio; - + bio = bio_alloc(preq.bdev, bio_max_segs(nseg - i), + operation | operation_flags, + GFP_KERNEL); biolist[nbio++] = bio; - bio_set_dev(bio, preq.bdev); bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio->bi_iter.bi_sector = preq.sector_number; - bio_set_op_attrs(bio, operation, operation_flags); } preq.sector_number += seg[i].nsec; @@ -1345,15 +1447,11 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, if (!bio) { BUG_ON(operation_flags != REQ_PREFLUSH); - bio = bio_alloc(GFP_KERNEL, 0); - if (unlikely(bio == NULL)) - goto fail_put_bio; - + bio = bio_alloc(preq.bdev, 0, operation | operation_flags, + GFP_KERNEL); biolist[nbio++] = bio; - bio_set_dev(bio, preq.bdev); bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; - bio_set_op_attrs(bio, operation, operation_flags); } atomic_set(&pending_req->pendcnt, nbio); @@ -1381,14 +1479,6 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, free_req(ring, pending_req); msleep(1); /* back off a bit */ return -EIO; - - fail_put_bio: - for (i = 0; i < nbio; i++) - bio_put(biolist[i]); - atomic_set(&pending_req->pendcnt, 1); - __end_block_io_op(pending_req, BLK_STS_RESOURCE); - msleep(1); /* back off a bit */ - return -EIO; } @@ -1473,5 +1563,6 @@ static void __exit xen_blkif_fini(void) module_exit(xen_blkif_fini); +MODULE_DESCRIPTION("Virtual block device back-end driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:vbd"); |
