diff options
Diffstat (limited to 'drivers/block/xen-blkback/xenbus.c')
| -rw-r--r-- | drivers/block/xen-blkback/xenbus.c | 310 |
1 files changed, 186 insertions, 124 deletions
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index a4bc74e72c39..0621878940ae 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -1,24 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* Xenbus code for blkif backend Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au> Copyright (C) 2005 XenSource Ltd - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. */ #define pr_fmt(fmt) "xen-blkback: " fmt -#include <stdarg.h> #include <linux/module.h> #include <linux/kthread.h> +#include <linux/pagemap.h> #include <xen/events.h> #include <xen/grant_table.h> #include "common.h" @@ -89,7 +81,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif) int i; /* Not ready to connect? */ - if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev) + if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_file) return; /* Already connected? */ @@ -107,12 +99,12 @@ static void xen_update_blkif_status(struct xen_blkif *blkif) return; } - err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping); + err = sync_blockdev(file_bdev(blkif->vbd.bdev_file)); if (err) { xenbus_dev_error(blkif->be->dev, err, "block flush"); return; } - invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping); + invalidate_inode_pages2(blkif->vbd.bdev_file->f_mapping); for (i = 0; i < blkif->nr_rings; i++) { ring = &blkif->rings[i]; @@ -152,8 +144,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif) INIT_LIST_HEAD(&ring->pending_free); INIT_LIST_HEAD(&ring->persistent_purge_list); INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants); - spin_lock_init(&ring->free_pages_lock); - INIT_LIST_HEAD(&ring->free_pages); + gnttab_page_cache_init(&ring->free_pages); spin_lock_init(&ring->pending_free_lock); init_waitqueue_head(&ring->pending_free_wq); @@ -166,6 +157,11 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif) return 0; } +/* Enable the persistent grants feature. */ +static bool feature_persistent = true; +module_param(feature_persistent, bool, 0644); +MODULE_PARM_DESC(feature_persistent, "Enables the persistent grants feature"); + static struct xen_blkif *xen_blkif_alloc(domid_t domid) { struct xen_blkif *blkif; @@ -179,6 +175,15 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) blkif->domid = domid; atomic_set(&blkif->refcnt, 1); init_completion(&blkif->drain_complete); + + /* + * Because freeing back to the cache may be deferred, it is not + * safe to unload the module (and hence destroy the cache) until + * this has completed. To prevent premature unloading, take an + * extra module reference here and release only when the object + * has been freed back to the cache. + */ + __module_get(THIS_MODULE); INIT_WORK(&blkif->free_work, xen_blkif_deferred_free); return blkif; @@ -189,6 +194,9 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, { int err; struct xen_blkif *blkif = ring->blkif; + const struct blkif_common_sring *sring_common; + RING_IDX rsp_prod, req_prod; + unsigned int size; /* Already connected through? */ if (ring->irq) @@ -199,46 +207,61 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, if (err < 0) return err; + sring_common = (struct blkif_common_sring *)ring->blk_ring; + rsp_prod = READ_ONCE(sring_common->rsp_prod); + req_prod = READ_ONCE(sring_common->req_prod); + switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: { - struct blkif_sring *sring; - sring = (struct blkif_sring *)ring->blk_ring; - BACK_RING_INIT(&ring->blk_rings.native, sring, - XEN_PAGE_SIZE * nr_grefs); + struct blkif_sring *sring_native = + (struct blkif_sring *)ring->blk_ring; + + BACK_RING_ATTACH(&ring->blk_rings.native, sring_native, + rsp_prod, XEN_PAGE_SIZE * nr_grefs); + size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs); break; } case BLKIF_PROTOCOL_X86_32: { - struct blkif_x86_32_sring *sring_x86_32; - sring_x86_32 = (struct blkif_x86_32_sring *)ring->blk_ring; - BACK_RING_INIT(&ring->blk_rings.x86_32, sring_x86_32, - XEN_PAGE_SIZE * nr_grefs); + struct blkif_x86_32_sring *sring_x86_32 = + (struct blkif_x86_32_sring *)ring->blk_ring; + + BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32, + rsp_prod, XEN_PAGE_SIZE * nr_grefs); + size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs); break; } case BLKIF_PROTOCOL_X86_64: { - struct blkif_x86_64_sring *sring_x86_64; - sring_x86_64 = (struct blkif_x86_64_sring *)ring->blk_ring; - BACK_RING_INIT(&ring->blk_rings.x86_64, sring_x86_64, - XEN_PAGE_SIZE * nr_grefs); + struct blkif_x86_64_sring *sring_x86_64 = + (struct blkif_x86_64_sring *)ring->blk_ring; + + BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64, + rsp_prod, XEN_PAGE_SIZE * nr_grefs); + size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs); break; } default: BUG(); } - err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, - xen_blkif_be_int, 0, - "blkif-backend", ring); - if (err < 0) { - xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); - ring->blk_rings.common.sring = NULL; - return err; - } + err = -EIO; + if (req_prod - rsp_prod > size) + goto fail; + + err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->be->dev, + evtchn, xen_blkif_be_int, 0, "blkif-backend", ring); + if (err < 0) + goto fail; ring->irq = err; return 0; + +fail: + xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); + ring->blk_rings.common.sring = NULL; + return err; } static int xen_blkif_disconnect(struct xen_blkif *blkif) @@ -256,6 +279,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) if (ring->xenblkd) { kthread_stop(ring->xenblkd); + ring->xenblkd = NULL; wake_up(&ring->shutdown_wq); } @@ -298,8 +322,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0); BUG_ON(!list_empty(&ring->persistent_purge_list)); BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts)); - BUG_ON(!list_empty(&ring->free_pages)); - BUG_ON(ring->free_pages_num != 0); + BUG_ON(ring->free_pages.num_pages != 0); BUG_ON(ring->persistent_gnt_c != 0); WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); ring->active = false; @@ -328,6 +351,7 @@ static void xen_blkif_free(struct xen_blkif *blkif) /* Make sure everything is drained before shutting down */ kmem_cache_free(xen_blkif_cachep, blkif); + module_put(THIS_MODULE); } int __init xen_blkif_interface_init(void) @@ -341,6 +365,12 @@ int __init xen_blkif_interface_init(void) return 0; } +void xen_blkif_interface_fini(void) +{ + kmem_cache_destroy(xen_blkif_cachep); + xen_blkif_cachep = NULL; +} + /* * sysfs interface for VBD I/O requests */ @@ -440,12 +470,11 @@ static void xenvbd_sysfs_delif(struct xenbus_device *dev) device_remove_file(&dev->dev, &dev_attr_physical_device); } - static void xen_vbd_free(struct xen_vbd *vbd) { - if (vbd->bdev) - blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE); - vbd->bdev = NULL; + if (vbd->bdev_file) + fput(vbd->bdev_file); + vbd->bdev_file = NULL; } static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, @@ -453,8 +482,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, int cdrom) { struct xen_vbd *vbd; - struct block_device *bdev; - struct request_queue *q; + struct file *bdev_file; vbd = &blkif->vbd; vbd->handle = handle; @@ -463,17 +491,17 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, vbd->pdevice = MKDEV(major, minor); - bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ? - FMODE_READ : FMODE_WRITE, NULL); + bdev_file = bdev_file_open_by_dev(vbd->pdevice, vbd->readonly ? + BLK_OPEN_READ : BLK_OPEN_WRITE, NULL, NULL); - if (IS_ERR(bdev)) { + if (IS_ERR(bdev_file)) { pr_warn("xen_vbd_create: device %08x could not be opened\n", vbd->pdevice); return -ENOENT; } - vbd->bdev = bdev; - if (vbd->bdev->bd_disk == NULL) { + vbd->bdev_file = bdev_file; + if (file_bdev(vbd->bdev_file)->bd_disk == NULL) { pr_warn("xen_vbd_create: device %08x doesn't exist\n", vbd->pdevice); xen_vbd_free(vbd); @@ -481,23 +509,22 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, } vbd->size = vbd_sz(vbd); - if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom) + if (cdrom || disk_to_cdi(file_bdev(vbd->bdev_file)->bd_disk)) vbd->type |= VDISK_CDROM; - if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE) + if (file_bdev(vbd->bdev_file)->bd_disk->flags & GENHD_FL_REMOVABLE) vbd->type |= VDISK_REMOVABLE; - q = bdev_get_queue(bdev); - if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags)) + if (bdev_write_cache(file_bdev(bdev_file))) vbd->flush_support = true; - - if (q && blk_queue_secure_erase(q)) + if (bdev_max_secure_erase_sectors(file_bdev(bdev_file))) vbd->discard_secure = true; pr_debug("Successful creation of handle=%04x (dom=%u)\n", handle, blkif->domid); return 0; } -static int xen_blkbk_remove(struct xenbus_device *dev) + +static void xen_blkbk_remove(struct xenbus_device *dev) { struct backend_info *be = dev_get_drvdata(&dev->dev); @@ -520,8 +547,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev) /* Put the reference we set in xen_blkif_alloc(). */ xen_blkif_put(be->blkif); } - - return 0; } int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, @@ -544,23 +569,22 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info struct xen_blkif *blkif = be->blkif; int err; int state = 0; - struct block_device *bdev = be->blkif->vbd.bdev; - struct request_queue *q = bdev_get_queue(bdev); + struct block_device *bdev = file_bdev(be->blkif->vbd.bdev_file); if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1)) return; - if (blk_queue_discard(q)) { + if (bdev_max_discard_sectors(bdev)) { err = xenbus_printf(xbt, dev->nodename, "discard-granularity", "%u", - q->limits.discard_granularity); + bdev_discard_granularity(bdev)); if (err) { dev_warn(&dev->dev, "writing discard-granularity (%d)", err); return; } err = xenbus_printf(xbt, dev->nodename, "discard-alignment", "%u", - q->limits.discard_alignment); + bdev_discard_alignment(bdev)); if (err) { dev_warn(&dev->dev, "writing discard-alignment (%d)", err); return; @@ -580,6 +604,7 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info if (err) dev_warn(&dev->dev, "writing feature-discard (%d)", err); } + int xen_blkbk_barrier(struct xenbus_transaction xbt, struct backend_info *be, int state) { @@ -642,7 +667,8 @@ static int xen_blkbk_probe(struct xenbus_device *dev, /* setup back pointer */ be->blkif->be = be; - err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed, + err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL, + backend_changed, "%s/%s", dev->nodename, "physical-device"); if (err) goto fail; @@ -664,7 +690,6 @@ fail: return err; } - /* * Callback received when the hotplug scripts have placed the physical-device * node. Read it and the mode node, and create a vbd. If the frontend is @@ -756,7 +781,6 @@ static void backend_changed(struct xenbus_watch *watch, } } - /* * Callback received when the frontend's state changes. */ @@ -817,7 +841,7 @@ static void frontend_changed(struct xenbus_device *dev, xenbus_switch_state(dev, XenbusStateClosed); if (xenbus_dev_is_online(dev)) break; - /* fall through */ + fallthrough; /* if not online */ case XenbusStateUnknown: /* implies xen_blkif_disconnect() via xen_blkbk_remove() */ @@ -831,9 +855,27 @@ static void frontend_changed(struct xenbus_device *dev, } } +/* Once a memory pressure is detected, squeeze free page pools for a while. */ +static unsigned int buffer_squeeze_duration_ms = 10; +module_param_named(buffer_squeeze_duration_ms, + buffer_squeeze_duration_ms, int, 0644); +MODULE_PARM_DESC(buffer_squeeze_duration_ms, +"Duration in ms to squeeze pages buffer when a memory pressure is detected"); -/* ** Connection ** */ +/* + * Callback received when the memory pressure is detected. + */ +static void reclaim_memory(struct xenbus_device *dev) +{ + struct backend_info *be = dev_get_drvdata(&dev->dev); + + if (!be) + return; + be->blkif->buffer_squeeze_end = jiffies + + msecs_to_jiffies(buffer_squeeze_duration_ms); +} +/* ** Connection ** */ /* * Write the physical details regarding the block device to the store, and @@ -862,7 +904,8 @@ again: xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support); - err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1); + err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", + be->blkif->vbd.feature_gnt_persistent_parm); if (err) { xenbus_dev_fatal(dev, err, "writing %s/feature-persistent", dev->nodename); @@ -887,15 +930,16 @@ again: goto abort; } err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu", - (unsigned long) - bdev_logical_block_size(be->blkif->vbd.bdev)); + (unsigned long)bdev_logical_block_size( + file_bdev(be->blkif->vbd.bdev_file))); if (err) { xenbus_dev_fatal(dev, err, "writing %s/sector-size", dev->nodename); goto abort; } err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u", - bdev_physical_block_size(be->blkif->vbd.bdev)); + bdev_physical_block_size( + file_bdev(be->blkif->vbd.bdev_file))); if (err) xenbus_dev_error(dev, err, "writing %s/physical-sector-size", dev->nodename); @@ -926,7 +970,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) int err, i, j; struct xen_blkif *blkif = ring->blkif; struct xenbus_device *dev = blkif->be->dev; - unsigned int ring_page_order, nr_grefs, evtchn; + unsigned int nr_grefs, evtchn; err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u", &evtchn); @@ -936,44 +980,35 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) return err; } - err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u", - &ring_page_order); - if (err != 1) { - err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]); - if (err != 1) { - err = -EINVAL; - xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir); - return err; - } - nr_grefs = 1; - } else { - unsigned int i; + nr_grefs = blkif->nr_ring_pages; - if (ring_page_order > xen_blkif_max_ring_order) { - err = -EINVAL; - xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d", - dir, ring_page_order, - xen_blkif_max_ring_order); - return err; - } + if (unlikely(!nr_grefs)) { + WARN_ON(true); + return -EINVAL; + } - nr_grefs = 1 << ring_page_order; - for (i = 0; i < nr_grefs; i++) { - char ring_ref_name[RINGREF_NAME_LEN]; + for (i = 0; i < nr_grefs; i++) { + char ring_ref_name[RINGREF_NAME_LEN]; + if (blkif->multi_ref) snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); - err = xenbus_scanf(XBT_NIL, dir, ring_ref_name, - "%u", &ring_ref[i]); - if (err != 1) { - err = -EINVAL; - xenbus_dev_fatal(dev, err, "reading %s/%s", - dir, ring_ref_name); - return err; - } + else { + WARN_ON(i != 0); + snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref"); + } + + err = xenbus_scanf(XBT_NIL, dir, ring_ref_name, + "%u", &ring_ref[i]); + + if (err != 1) { + err = -EINVAL; + xenbus_dev_fatal(dev, err, "reading %s/%s", + dir, ring_ref_name); + return err; } } - blkif->nr_ring_pages = nr_grefs; + err = -ENOMEM; for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) @@ -996,7 +1031,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn); if (err) { xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn); - return err; + goto fail; } return 0; @@ -1016,42 +1051,45 @@ fail: } kfree(req); } - return -ENOMEM; - + return err; } static int connect_ring(struct backend_info *be) { struct xenbus_device *dev = be->dev; - unsigned int pers_grants; + struct xen_blkif *blkif = be->blkif; char protocol[64] = ""; int err, i; char *xspath; size_t xspathsize; const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ unsigned int requested_num_queues = 0; + unsigned int ring_page_order; pr_debug("%s %s\n", __func__, dev->otherend); - be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT; + blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT; err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol", "%63s", protocol); if (err <= 0) strcpy(protocol, "unspecified, assuming default"); else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) - be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; + blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) - be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; + blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) - be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; + blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; else { xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); return -ENOSYS; } - pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent", - 0); - be->blkif->vbd.feature_gnt_persistent = pers_grants; - be->blkif->vbd.overflow_max_grants = 0; + + blkif->vbd.feature_gnt_persistent_parm = feature_persistent; + blkif->vbd.feature_gnt_persistent = + blkif->vbd.feature_gnt_persistent_parm && + xenbus_read_unsigned(dev->otherend, "feature-persistent", 0); + + blkif->vbd.overflow_max_grants = 0; /* * Read the number of hardware queues from frontend. @@ -1067,16 +1105,33 @@ static int connect_ring(struct backend_info *be) requested_num_queues, xenblk_max_queues); return -ENOSYS; } - be->blkif->nr_rings = requested_num_queues; - if (xen_blkif_alloc_rings(be->blkif)) + blkif->nr_rings = requested_num_queues; + if (xen_blkif_alloc_rings(blkif)) return -ENOMEM; pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename, - be->blkif->nr_rings, be->blkif->blk_protocol, protocol, - pers_grants ? "persistent grants" : ""); + blkif->nr_rings, blkif->blk_protocol, protocol, + blkif->vbd.feature_gnt_persistent ? "persistent grants" : ""); - if (be->blkif->nr_rings == 1) - return read_per_ring_refs(&be->blkif->rings[0], dev->otherend); + err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u", + &ring_page_order); + if (err != 1) { + blkif->nr_ring_pages = 1; + blkif->multi_ref = false; + } else if (ring_page_order <= xen_blkif_max_ring_order) { + blkif->nr_ring_pages = 1 << ring_page_order; + blkif->multi_ref = true; + } else { + err = -EINVAL; + xenbus_dev_fatal(dev, err, + "requested ring page order %d exceed max:%d", + ring_page_order, + xen_blkif_max_ring_order); + return err; + } + + if (blkif->nr_rings == 1) + return read_per_ring_refs(&blkif->rings[0], dev->otherend); else { xspathsize = strlen(dev->otherend) + xenstore_path_ext_size; xspath = kmalloc(xspathsize, GFP_KERNEL); @@ -1085,10 +1140,10 @@ static int connect_ring(struct backend_info *be) return -ENOMEM; } - for (i = 0; i < be->blkif->nr_rings; i++) { + for (i = 0; i < blkif->nr_rings; i++) { memset(xspath, 0, xspathsize); snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i); - err = read_per_ring_refs(&be->blkif->rings[i], xspath); + err = read_per_ring_refs(&blkif->rings[i], xspath); if (err) { kfree(xspath); return err; @@ -1108,10 +1163,17 @@ static struct xenbus_driver xen_blkbk_driver = { .ids = xen_blkbk_ids, .probe = xen_blkbk_probe, .remove = xen_blkbk_remove, - .otherend_changed = frontend_changed + .otherend_changed = frontend_changed, + .allow_rebind = true, + .reclaim_memory = reclaim_memory, }; int xen_blkif_xenbus_init(void) { return xenbus_register_backend(&xen_blkbk_driver); } + +void xen_blkif_xenbus_fini(void) +{ + xenbus_unregister_driver(&xen_blkbk_driver); +} |
