diff options
Diffstat (limited to 'drivers/block/xen-blkback/common.h')
| -rw-r--r-- | drivers/block/xen-blkback/common.h | 124 |
1 files changed, 15 insertions, 109 deletions
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 1d3002d773f7..b427d54bc120 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -36,7 +36,6 @@ #include <linux/io.h> #include <linux/rbtree.h> #include <asm/setup.h> -#include <asm/pgalloc.h> #include <asm/hypervisor.h> #include <xen/grant_table.h> #include <xen/page.h> @@ -133,7 +132,7 @@ struct blkif_x86_32_request { struct blkif_x86_64_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ - uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */ + uint32_t _pad1; /* offsetof(blkif_request..,u.rw.id)==8 */ uint64_t id; blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; @@ -222,11 +221,14 @@ struct xen_vbd { unsigned char type; /* phys device that this vbd maps to. */ u32 pdevice; - struct block_device *bdev; + struct file *bdev_file; /* Cached size parameter. */ sector_t size; unsigned int flush_support:1; unsigned int discard_secure:1; + /* Connect-time cached feature_persistent parameter value */ + unsigned int feature_gnt_persistent_parm:1; + /* Persistent grants feature negotiation result */ unsigned int feature_gnt_persistent:1; unsigned int overflow_max_grants:1; }; @@ -289,14 +291,12 @@ struct xen_blkif_ring { struct work_struct persistent_purge_work; /* Buffer of free pages to map grant refs. */ - spinlock_t free_pages_lock; - int free_pages_num; - struct list_head free_pages; + struct gnttab_page_cache free_pages; struct work_struct free_work; /* Thread shutdown wait queue. */ wait_queue_head_t shutdown_wq; - struct xen_blkif *blkif; + struct xen_blkif *blkif; }; struct xen_blkif { @@ -315,10 +315,12 @@ struct xen_blkif { atomic_t drain; struct work_struct free_work; - unsigned int nr_ring_pages; + unsigned int nr_ring_pages; + bool multi_ref; /* All rings for this device. */ struct xen_blkif_ring *rings; unsigned int nr_rings; + unsigned long buffer_squeeze_end; }; struct seg_buf { @@ -327,7 +329,7 @@ struct seg_buf { }; struct grant_page { - struct page *page; + struct page *page; struct persistent_gnt *persistent_gnt; grant_handle_t handle; grant_ref_t gref; @@ -358,9 +360,7 @@ struct pending_req { }; -#define vbd_sz(_v) ((_v)->bdev->bd_part ? \ - (_v)->bdev->bd_part->nr_sects : \ - get_capacity((_v)->bdev->bd_disk)) +#define vbd_sz(_v) bdev_nr_sectors(file_bdev((_v)->bdev_file)) #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt)) #define xen_blkif_put(_b) \ @@ -375,13 +375,15 @@ struct phys_req { struct block_device *bdev; blkif_sector_t sector_number; }; + int xen_blkif_interface_init(void); +void xen_blkif_interface_fini(void); int xen_blkif_xenbus_init(void); +void xen_blkif_xenbus_fini(void); irqreturn_t xen_blkif_be_int(int irq, void *dev_id); int xen_blkif_schedule(void *arg); -int xen_blkif_purge_persistent(void *arg); void xen_blkbk_free_caches(struct xen_blkif_ring *ring); int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, @@ -392,100 +394,4 @@ int xen_blkbk_barrier(struct xenbus_transaction xbt, struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); void xen_blkbk_unmap_purged_grants(struct work_struct *work); -static inline void blkif_get_x86_32_req(struct blkif_request *dst, - struct blkif_x86_32_request *src) -{ - int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; - dst->operation = READ_ONCE(src->operation); - switch (dst->operation) { - case BLKIF_OP_READ: - case BLKIF_OP_WRITE: - case BLKIF_OP_WRITE_BARRIER: - case BLKIF_OP_FLUSH_DISKCACHE: - dst->u.rw.nr_segments = src->u.rw.nr_segments; - dst->u.rw.handle = src->u.rw.handle; - dst->u.rw.id = src->u.rw.id; - dst->u.rw.sector_number = src->u.rw.sector_number; - barrier(); - if (n > dst->u.rw.nr_segments) - n = dst->u.rw.nr_segments; - for (i = 0; i < n; i++) - dst->u.rw.seg[i] = src->u.rw.seg[i]; - break; - case BLKIF_OP_DISCARD: - dst->u.discard.flag = src->u.discard.flag; - dst->u.discard.id = src->u.discard.id; - dst->u.discard.sector_number = src->u.discard.sector_number; - dst->u.discard.nr_sectors = src->u.discard.nr_sectors; - break; - case BLKIF_OP_INDIRECT: - dst->u.indirect.indirect_op = src->u.indirect.indirect_op; - dst->u.indirect.nr_segments = src->u.indirect.nr_segments; - dst->u.indirect.handle = src->u.indirect.handle; - dst->u.indirect.id = src->u.indirect.id; - dst->u.indirect.sector_number = src->u.indirect.sector_number; - barrier(); - j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments)); - for (i = 0; i < j; i++) - dst->u.indirect.indirect_grefs[i] = - src->u.indirect.indirect_grefs[i]; - break; - default: - /* - * Don't know how to translate this op. Only get the - * ID so failure can be reported to the frontend. - */ - dst->u.other.id = src->u.other.id; - break; - } -} - -static inline void blkif_get_x86_64_req(struct blkif_request *dst, - struct blkif_x86_64_request *src) -{ - int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; - dst->operation = READ_ONCE(src->operation); - switch (dst->operation) { - case BLKIF_OP_READ: - case BLKIF_OP_WRITE: - case BLKIF_OP_WRITE_BARRIER: - case BLKIF_OP_FLUSH_DISKCACHE: - dst->u.rw.nr_segments = src->u.rw.nr_segments; - dst->u.rw.handle = src->u.rw.handle; - dst->u.rw.id = src->u.rw.id; - dst->u.rw.sector_number = src->u.rw.sector_number; - barrier(); - if (n > dst->u.rw.nr_segments) - n = dst->u.rw.nr_segments; - for (i = 0; i < n; i++) - dst->u.rw.seg[i] = src->u.rw.seg[i]; - break; - case BLKIF_OP_DISCARD: - dst->u.discard.flag = src->u.discard.flag; - dst->u.discard.id = src->u.discard.id; - dst->u.discard.sector_number = src->u.discard.sector_number; - dst->u.discard.nr_sectors = src->u.discard.nr_sectors; - break; - case BLKIF_OP_INDIRECT: - dst->u.indirect.indirect_op = src->u.indirect.indirect_op; - dst->u.indirect.nr_segments = src->u.indirect.nr_segments; - dst->u.indirect.handle = src->u.indirect.handle; - dst->u.indirect.id = src->u.indirect.id; - dst->u.indirect.sector_number = src->u.indirect.sector_number; - barrier(); - j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments)); - for (i = 0; i < j; i++) - dst->u.indirect.indirect_grefs[i] = - src->u.indirect.indirect_grefs[i]; - break; - default: - /* - * Don't know how to translate this op. Only get the - * ID so failure can be reported to the frontend. - */ - dst->u.other.id = src->u.other.id; - break; - } -} - #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */ |
