summaryrefslogtreecommitdiff
path: root/drivers/xen/gntdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/gntdev.c')
-rw-r--r--drivers/xen/gntdev.c320
1 files changed, 203 insertions, 117 deletions
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 59ffea800079..2c960f187f7c 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/refcount.h>
+#include <linux/workqueue.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -55,15 +56,25 @@ MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
"Gerd Hoffmann <kraxel@redhat.com>");
MODULE_DESCRIPTION("User-space granted page access driver");
+#define GNTDEV_COPY_BATCH 16
+
+struct gntdev_copy_batch {
+ struct gnttab_copy ops[GNTDEV_COPY_BATCH];
+ struct page *pages[GNTDEV_COPY_BATCH];
+ s16 __user *status[GNTDEV_COPY_BATCH];
+ unsigned int nr_ops;
+ unsigned int nr_pages;
+ bool writeable;
+ struct gntdev_copy_batch *next;
+};
+
static unsigned int limit = 64*1024;
module_param(limit, uint, 0644);
MODULE_PARM_DESC(limit,
"Maximum number of grants that may be mapped by one mapping request");
-static int use_ptemod;
-
-static int unmap_grant_pages(struct gntdev_grant_map *map,
- int offset, int pages);
+static void unmap_grant_pages(struct gntdev_grant_map *map,
+ int offset, int pages);
static struct miscdevice gntdev_miscdev;
@@ -120,6 +131,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map)
kvfree(map->unmap_ops);
kvfree(map->kmap_ops);
kvfree(map->kunmap_ops);
+ kvfree(map->being_removed);
kfree(map);
}
@@ -140,12 +152,15 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]),
GFP_KERNEL);
add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
+ add->being_removed =
+ kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
- NULL == add->pages)
+ NULL == add->pages ||
+ NULL == add->being_removed)
goto err;
- if (use_ptemod) {
+ if (xen_pv_domain()) {
add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]),
GFP_KERNEL);
add->kunmap_ops = kvmalloc_array(count, sizeof(add->kunmap_ops[0]),
@@ -193,7 +208,7 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
add->grants[i].ref = INVALID_GRANT_REF;
add->map_ops[i].handle = INVALID_GRANT_HANDLE;
add->unmap_ops[i].handle = INVALID_GRANT_HANDLE;
- if (use_ptemod) {
+ if (xen_pv_domain()) {
add->kmap_ops[i].handle = INVALID_GRANT_HANDLE;
add->kunmap_ops[i].handle = INVALID_GRANT_HANDLE;
}
@@ -250,9 +265,39 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
if (!refcount_dec_and_test(&map->users))
return;
- if (map->pages && !use_ptemod)
+ if (map->pages && !xen_pv_domain()) {
+ /*
+ * Increment the reference count. This ensures that the
+ * subsequent call to unmap_grant_pages() will not wind up
+ * re-entering itself. It *can* wind up calling
+ * gntdev_put_map() recursively, but such calls will be with a
+ * reference count greater than 1, so they will return before
+ * this code is reached. The recursion depth is thus limited to
+ * 1. Do NOT use refcount_inc() here, as it will detect that
+ * the reference count is zero and WARN().
+ */
+ refcount_set(&map->users, 1);
+
+ /*
+ * Unmap the grants. This may or may not be asynchronous, so it
+ * is possible that the reference count is 1 on return, but it
+ * could also be greater than 1.
+ */
unmap_grant_pages(map, 0, map->count);
+ /* Check if the memory now needs to be freed */
+ if (!refcount_dec_and_test(&map->users))
+ return;
+
+ /*
+ * All pages have been returned to the hypervisor, so free the
+ * map.
+ */
+ }
+
+ if (xen_pv_domain() && map->notifier_init)
+ mmu_interval_notifier_remove(&map->notifier);
+
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
@@ -265,7 +310,7 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
{
struct gntdev_grant_map *map = data;
- unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
+ unsigned int pgnr = (addr - map->pages_vm_start) >> PAGE_SHIFT;
int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte |
(1 << _GNTMAP_guest_avail0);
u64 pte_maddr;
@@ -273,6 +318,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
BUG_ON(pgnr >= map->count);
pte_maddr = arbitrary_virt_to_machine(pte).maddr;
+ /* Note: this will perform a pte_mkspecial() through the hypercall. */
gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
map->grants[pgnr].ref,
map->grants[pgnr].domid);
@@ -283,9 +329,10 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
int gntdev_map_grant_pages(struct gntdev_grant_map *map)
{
+ size_t alloced = 0;
int i, err = 0;
- if (!use_ptemod) {
+ if (!xen_pv_domain()) {
/* Note: it could already be mapped */
if (map->map_ops[0].handle != INVALID_GRANT_HANDLE)
return 0;
@@ -331,97 +378,130 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
map->count);
for (i = 0; i < map->count; i++) {
- if (map->map_ops[i].status == GNTST_okay)
+ if (map->map_ops[i].status == GNTST_okay) {
map->unmap_ops[i].handle = map->map_ops[i].handle;
- else if (!err)
+ alloced++;
+ } else if (!err)
err = -EINVAL;
if (map->flags & GNTMAP_device_map)
map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
- if (use_ptemod) {
- if (map->kmap_ops[i].status == GNTST_okay)
+ if (xen_pv_domain()) {
+ if (map->kmap_ops[i].status == GNTST_okay) {
+ alloced++;
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
- else if (!err)
+ } else if (!err)
err = -EINVAL;
}
}
+ atomic_add(alloced, &map->live_grants);
return err;
}
-static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
- int pages)
+static void __unmap_grant_pages_done(int result,
+ struct gntab_unmap_queue_data *data)
{
- int i, err = 0;
- struct gntab_unmap_queue_data unmap_data;
+ unsigned int i;
+ struct gntdev_grant_map *map = data->data;
+ unsigned int offset = data->unmap_ops - map->unmap_ops;
+ int successful_unmaps = 0;
+ int live_grants;
+
+ for (i = 0; i < data->count; i++) {
+ if (map->unmap_ops[offset + i].status == GNTST_okay &&
+ map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
+ successful_unmaps++;
+
+ WARN_ON(map->unmap_ops[offset + i].status != GNTST_okay &&
+ map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
+ pr_debug("unmap handle=%d st=%d\n",
+ map->unmap_ops[offset+i].handle,
+ map->unmap_ops[offset+i].status);
+ map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
+ if (xen_pv_domain()) {
+ if (map->kunmap_ops[offset + i].status == GNTST_okay &&
+ map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
+ successful_unmaps++;
+
+ WARN_ON(map->kunmap_ops[offset + i].status != GNTST_okay &&
+ map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
+ pr_debug("kunmap handle=%u st=%d\n",
+ map->kunmap_ops[offset+i].handle,
+ map->kunmap_ops[offset+i].status);
+ map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
+ }
+ }
+
+ /*
+ * Decrease the live-grant counter. This must happen after the loop to
+ * prevent premature reuse of the grants by gnttab_mmap().
+ */
+ live_grants = atomic_sub_return(successful_unmaps, &map->live_grants);
+ if (WARN_ON(live_grants < 0))
+ pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n",
+ __func__, live_grants, successful_unmaps);
+
+ /* Release reference taken by __unmap_grant_pages */
+ gntdev_put_map(NULL, map);
+}
+static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
+{
if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
int pgno = (map->notify.addr >> PAGE_SHIFT);
+
if (pgno >= offset && pgno < offset + pages) {
/* No need for kmap, pages are in lowmem */
uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
+
tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
}
}
- unmap_data.unmap_ops = map->unmap_ops + offset;
- unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
- unmap_data.pages = map->pages + offset;
- unmap_data.count = pages;
-
- err = gnttab_unmap_refs_sync(&unmap_data);
- if (err)
- return err;
+ map->unmap_data.unmap_ops = map->unmap_ops + offset;
+ map->unmap_data.kunmap_ops = xen_pv_domain() ? map->kunmap_ops + offset : NULL;
+ map->unmap_data.pages = map->pages + offset;
+ map->unmap_data.count = pages;
+ map->unmap_data.done = __unmap_grant_pages_done;
+ map->unmap_data.data = map;
+ refcount_inc(&map->users); /* to keep map alive during async call below */
- for (i = 0; i < pages; i++) {
- if (map->unmap_ops[offset+i].status)
- err = -EINVAL;
- pr_debug("unmap handle=%d st=%d\n",
- map->unmap_ops[offset+i].handle,
- map->unmap_ops[offset+i].status);
- map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
- if (use_ptemod) {
- if (map->kunmap_ops[offset+i].status)
- err = -EINVAL;
- pr_debug("kunmap handle=%u st=%d\n",
- map->kunmap_ops[offset+i].handle,
- map->kunmap_ops[offset+i].status);
- map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
- }
- }
- return err;
+ gnttab_unmap_refs_async(&map->unmap_data);
}
-static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
- int pages)
+static void unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
{
- int range, err = 0;
+ int range;
+
+ if (atomic_read(&map->live_grants) == 0)
+ return; /* Nothing to do */
pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
/* It is possible the requested range will have a "hole" where we
* already unmapped some of the grants. Only unmap valid ranges.
*/
- while (pages && !err) {
- while (pages &&
- map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) {
+ while (pages) {
+ while (pages && map->being_removed[offset]) {
offset++;
pages--;
}
range = 0;
while (range < pages) {
- if (map->unmap_ops[offset + range].handle ==
- INVALID_GRANT_HANDLE)
+ if (map->being_removed[offset + range])
break;
+ map->being_removed[offset + range] = true;
range++;
}
- err = __unmap_grant_pages(map, offset, range);
+ if (range)
+ __unmap_grant_pages(map, offset, range);
offset += range;
pages -= range;
}
-
- return err;
}
/* ------------------------------------------------------------------ */
@@ -441,16 +521,12 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
struct gntdev_priv *priv = file->private_data;
pr_debug("gntdev_vma_close %p\n", vma);
- if (use_ptemod) {
- WARN_ON(map->vma != vma);
- mmu_interval_notifier_remove(&map->notifier);
- map->vma = NULL;
- }
+
vma->vm_private_data = NULL;
gntdev_put_map(priv, map);
}
-static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
+static struct page *gntdev_vma_find_normal_page(struct vm_area_struct *vma,
unsigned long addr)
{
struct gntdev_grant_map *map = vma->vm_private_data;
@@ -461,7 +537,7 @@ static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
static const struct vm_operations_struct gntdev_vmops = {
.open = gntdev_vma_open,
.close = gntdev_vma_close,
- .find_special_page = gntdev_vma_find_special_page,
+ .find_normal_page = gntdev_vma_find_normal_page,
};
/* ------------------------------------------------------------------ */
@@ -473,31 +549,30 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
struct gntdev_grant_map *map =
container_of(mn, struct gntdev_grant_map, notifier);
unsigned long mstart, mend;
- int err;
+ unsigned long map_start, map_end;
if (!mmu_notifier_range_blockable(range))
return false;
+ map_start = map->pages_vm_start;
+ map_end = map->pages_vm_start + (map->count << PAGE_SHIFT);
+
/*
* If the VMA is split or otherwise changed the notifier is not
* updated, but we don't want to process VA's outside the modified
* VMA. FIXME: It would be much more understandable to just prevent
* modifying the VMA in the first place.
*/
- if (map->vma->vm_start >= range->end ||
- map->vma->vm_end <= range->start)
+ if (map_start >= range->end || map_end <= range->start)
return true;
- mstart = max(range->start, map->vma->vm_start);
- mend = min(range->end, map->vma->vm_end);
+ mstart = max(range->start, map_start);
+ mend = min(range->end, map_end);
pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
- map->index, map->count,
- map->vma->vm_start, map->vma->vm_end,
- range->start, range->end, mstart, mend);
- err = unmap_grant_pages(map,
- (mstart - map->vma->vm_start) >> PAGE_SHIFT,
- (mend - mstart) >> PAGE_SHIFT);
- WARN_ON(err);
+ map->index, map->count, map_start, map_end,
+ range->start, range->end, mstart, mend);
+ unmap_grant_pages(map, (mstart - map_start) >> PAGE_SHIFT,
+ (mend - mstart) >> PAGE_SHIFT);
return true;
}
@@ -519,6 +594,8 @@ static int gntdev_open(struct inode *inode, struct file *flip)
INIT_LIST_HEAD(&priv->maps);
mutex_init(&priv->lock);
+ mutex_init(&priv->batch_lock);
+
#ifdef CONFIG_XEN_GNTDEV_DMABUF
priv->dmabuf_priv = gntdev_dmabuf_init(flip);
if (IS_ERR(priv->dmabuf_priv)) {
@@ -543,6 +620,7 @@ static int gntdev_release(struct inode *inode, struct file *flip)
{
struct gntdev_priv *priv = flip->private_data;
struct gntdev_grant_map *map;
+ struct gntdev_copy_batch *batch;
pr_debug("priv %p\n", priv);
@@ -555,6 +633,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
}
mutex_unlock(&priv->lock);
+ mutex_lock(&priv->batch_lock);
+ while (priv->batch) {
+ batch = priv->batch;
+ priv->batch = batch->next;
+ kfree(batch);
+ }
+ mutex_unlock(&priv->batch_lock);
+
#ifdef CONFIG_XEN_GNTDEV_DMABUF
gntdev_dmabuf_fini(priv->dmabuf_priv);
#endif
@@ -720,17 +806,6 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
return rc;
}
-#define GNTDEV_COPY_BATCH 16
-
-struct gntdev_copy_batch {
- struct gnttab_copy ops[GNTDEV_COPY_BATCH];
- struct page *pages[GNTDEV_COPY_BATCH];
- s16 __user *status[GNTDEV_COPY_BATCH];
- unsigned int nr_ops;
- unsigned int nr_pages;
- bool writeable;
-};
-
static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
unsigned long *gfn)
{
@@ -888,36 +963,53 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
{
struct ioctl_gntdev_grant_copy copy;
- struct gntdev_copy_batch batch;
+ struct gntdev_copy_batch *batch;
unsigned int i;
int ret = 0;
if (copy_from_user(&copy, u, sizeof(copy)))
return -EFAULT;
- batch.nr_ops = 0;
- batch.nr_pages = 0;
+ mutex_lock(&priv->batch_lock);
+ if (!priv->batch) {
+ batch = kmalloc(sizeof(*batch), GFP_KERNEL);
+ } else {
+ batch = priv->batch;
+ priv->batch = batch->next;
+ }
+ mutex_unlock(&priv->batch_lock);
+ if (!batch)
+ return -ENOMEM;
+
+ batch->nr_ops = 0;
+ batch->nr_pages = 0;
for (i = 0; i < copy.count; i++) {
struct gntdev_grant_copy_segment seg;
if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
ret = -EFAULT;
+ gntdev_put_pages(batch);
goto out;
}
- ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
- if (ret < 0)
+ ret = gntdev_grant_copy_seg(batch, &seg, &copy.segments[i].status);
+ if (ret < 0) {
+ gntdev_put_pages(batch);
goto out;
+ }
cond_resched();
}
- if (batch.nr_ops)
- ret = gntdev_copy(&batch);
- return ret;
+ if (batch->nr_ops)
+ ret = gntdev_copy(batch);
+
+ out:
+ mutex_lock(&priv->batch_lock);
+ batch->next = priv->batch;
+ priv->batch = batch;
+ mutex_unlock(&priv->batch_lock);
- out:
- gntdev_put_pages(&batch);
return ret;
}
@@ -945,7 +1037,7 @@ static long gntdev_ioctl(struct file *flip,
#ifdef CONFIG_XEN_GNTDEV_DMABUF
case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
- return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr);
+ return gntdev_ioctl_dmabuf_exp_from_refs(priv, ptr);
case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
@@ -977,22 +1069,23 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
return -EINVAL;
pr_debug("map %d+%d at %lx (pgoff %lx)\n",
- index, count, vma->vm_start, vma->vm_pgoff);
+ index, count, vma->vm_start, vma->vm_pgoff);
mutex_lock(&priv->lock);
map = gntdev_find_map_index(priv, index, count);
if (!map)
goto unlock_out;
- if (use_ptemod && map->vma)
+ if (!atomic_add_unless(&map->in_use, 1, 1))
goto unlock_out;
+
refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP);
- if (use_ptemod)
- vma->vm_flags |= VM_DONTCOPY;
+ if (xen_pv_domain())
+ vm_flags_set(vma, VM_DONTCOPY);
vma->vm_private_data = map;
if (map->flags) {
@@ -1005,19 +1098,20 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
map->flags |= GNTMAP_readonly;
}
- if (use_ptemod) {
- map->vma = vma;
+ map->pages_vm_start = vma->vm_start;
+
+ if (xen_pv_domain()) {
err = mmu_interval_notifier_insert_locked(
&map->notifier, vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
- if (err) {
- map->vma = NULL;
+ if (err)
goto out_unlock_put;
- }
+
+ map->notifier_init = true;
}
mutex_unlock(&priv->lock);
- if (use_ptemod) {
+ if (xen_pv_domain()) {
/*
* gntdev takes the address of the PTE in find_grant_ptes() and
* passes it to the hypervisor in gntdev_map_grant_pages(). The
@@ -1030,7 +1124,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
*/
mmu_interval_read_begin(&map->notifier);
- map->pages_vm_start = vma->vm_start;
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start,
find_grant_ptes, map);
@@ -1044,7 +1137,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
if (err)
goto out_put_map;
- if (!use_ptemod) {
+ if (!xen_pv_domain()) {
err = vm_map_pages_zero(vma, map->pages, map->count);
if (err)
goto out_put_map;
@@ -1059,13 +1152,8 @@ unlock_out:
out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
- if (use_ptemod) {
+ if (xen_pv_domain())
unmap_grant_pages(map, 0, map->count);
- if (map->vma) {
- mmu_interval_notifier_remove(&map->notifier);
- map->vma = NULL;
- }
- }
gntdev_put_map(priv, map);
return err;
}
@@ -1093,8 +1181,6 @@ static int __init gntdev_init(void)
if (!xen_domain())
return -ENODEV;
- use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
-
err = misc_register(&gntdev_miscdev);
if (err != 0) {
pr_err("Could not register gntdev device\n");