summaryrefslogtreecommitdiff
path: root/lib/test_hmm.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/test_hmm.c')
-rw-r--r--lib/test_hmm.c1086
1 files changed, 869 insertions, 217 deletions
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 28528285942c..8af169d3873a 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/cdev.h>
#include <linux/device.h>
+#include <linux/memremap.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
@@ -25,18 +26,41 @@
#include <linux/swapops.h>
#include <linux/sched/mm.h>
#include <linux/platform_device.h>
+#include <linux/rmap.h>
+#include <linux/mmu_notifier.h>
+#include <linux/migrate.h>
#include "test_hmm_uapi.h"
-#define DMIRROR_NDEVICES 2
+#define DMIRROR_NDEVICES 4
#define DMIRROR_RANGE_FAULT_TIMEOUT 1000
#define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U)
#define DEVMEM_CHUNKS_RESERVE 16
+/*
+ * For device_private pages, dpage is just a dummy struct page
+ * representing a piece of device memory. dmirror_devmem_alloc_page
+ * allocates a real system memory page as backing storage to fake a
+ * real device. zone_device_data points to that backing page. But
+ * for device_coherent memory, the struct page represents real
+ * physical CPU-accessible memory that we can use directly.
+ */
+#define BACKING_PAGE(page) (is_device_private_page((page)) ? \
+ (page)->zone_device_data : (page))
+
+static unsigned long spm_addr_dev0;
+module_param(spm_addr_dev0, long, 0644);
+MODULE_PARM_DESC(spm_addr_dev0,
+ "Specify start address for SPM (special purpose memory) used for device 0. By setting this Coherent device type will be used. Make sure spm_addr_dev1 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
+
+static unsigned long spm_addr_dev1;
+module_param(spm_addr_dev1, long, 0644);
+MODULE_PARM_DESC(spm_addr_dev1,
+ "Specify start address for SPM (special purpose memory) used for device 1. By setting this Coherent device type will be used. Make sure spm_addr_dev0 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
+
static const struct dev_pagemap_ops dmirror_devmem_ops;
static const struct mmu_interval_notifier_ops dmirror_min_ops;
static dev_t dmirror_dev;
-static struct page *dmirror_zero_page;
struct dmirror_device;
@@ -47,6 +71,7 @@ struct dmirror_bounce {
unsigned long cpages;
};
+#define DPT_XA_TAG_ATOMIC 1UL
#define DPT_XA_TAG_WRITE 3UL
/*
@@ -67,6 +92,7 @@ struct dmirror {
struct xarray pt;
struct mmu_interval_notifier notifier;
struct mutex mutex;
+ __u64 flags;
};
/*
@@ -75,6 +101,7 @@ struct dmirror {
struct dmirror_chunk {
struct dev_pagemap pagemap;
struct dmirror_device *mdevice;
+ bool remove;
};
/*
@@ -82,7 +109,8 @@ struct dmirror_chunk {
*/
struct dmirror_device {
struct cdev cdevice;
- struct hmm_devmem *devmem;
+ unsigned int zone_device_type;
+ struct device device;
unsigned int devmem_capacity;
unsigned int devmem_count;
@@ -92,6 +120,7 @@ struct dmirror_device {
unsigned long calloc;
unsigned long cfree;
struct page *free_pages;
+ struct folio *free_folios;
spinlock_t lock; /* protects the above */
};
@@ -110,6 +139,21 @@ static int dmirror_bounce_init(struct dmirror_bounce *bounce,
return 0;
}
+static bool dmirror_is_private_zone(struct dmirror_device *mdevice)
+{
+ return (mdevice->zone_device_type ==
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE);
+}
+
+static enum migrate_vma_direction
+dmirror_select_device(struct dmirror *dmirror)
+{
+ return (dmirror->mdevice->zone_device_type ==
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ?
+ MIGRATE_VMA_SELECT_DEVICE_PRIVATE :
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT;
+}
+
static void dmirror_bounce_fini(struct dmirror_bounce *bounce)
{
vfree(bounce->ptr);
@@ -151,11 +195,16 @@ static int dmirror_fops_release(struct inode *inode, struct file *filp)
return 0;
}
+static struct dmirror_chunk *dmirror_page_to_chunk(struct page *page)
+{
+ return container_of(page_pgmap(page), struct dmirror_chunk,
+ pagemap);
+}
+
static struct dmirror_device *dmirror_page_to_device(struct page *page)
{
- return container_of(page->pgmap, struct dmirror_chunk,
- pagemap)->mdevice;
+ return dmirror_page_to_chunk(page)->mdevice;
}
static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range)
@@ -214,6 +263,14 @@ static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
{
struct dmirror *dmirror = container_of(mni, struct dmirror, notifier);
+ /*
+ * Ignore invalidation callbacks for device private pages since
+ * the invalidation is handled as part of the migration process.
+ */
+ if (range->event == MMU_NOTIFY_MIGRATE &&
+ range->owner == dmirror->mdevice)
+ return true;
+
if (mmu_notifier_range_blockable(range))
mutex_lock(&dmirror->mutex);
else if (!mutex_trylock(&dmirror->mutex))
@@ -275,7 +332,7 @@ static int dmirror_fault(struct dmirror *dmirror, unsigned long start,
{
struct mm_struct *mm = dmirror->notifier.mm;
unsigned long addr;
- unsigned long pfns[64];
+ unsigned long pfns[32];
struct hmm_range range = {
.notifier = &dmirror->notifier,
.hmm_pfns = pfns,
@@ -314,16 +371,13 @@ static int dmirror_do_read(struct dmirror *dmirror, unsigned long start,
for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
void *entry;
struct page *page;
- void *tmp;
entry = xa_load(&dmirror->pt, pfn);
page = xa_untag_pointer(entry);
if (!page)
return -ENOENT;
- tmp = kmap(page);
- memcpy(ptr, tmp, PAGE_SIZE);
- kunmap(page);
+ memcpy_from_page(ptr, page, 0, PAGE_SIZE);
ptr += PAGE_SIZE;
bounce->cpages++;
@@ -383,16 +437,13 @@ static int dmirror_do_write(struct dmirror *dmirror, unsigned long start,
for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
void *entry;
struct page *page;
- void *tmp;
entry = xa_load(&dmirror->pt, pfn);
page = xa_untag_pointer(entry);
if (!page || xa_pointer_tag(entry) != DPT_XA_TAG_WRITE)
return -ENOENT;
- tmp = kmap(page);
- memcpy(tmp, ptr, PAGE_SIZE);
- kunmap(page);
+ memcpy_to_page(page, 0, ptr, PAGE_SIZE);
ptr += PAGE_SIZE;
bounce->cpages++;
@@ -442,15 +493,47 @@ fini:
return ret;
}
-static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
- struct page **ppage)
+static int dmirror_allocate_chunk(struct dmirror_device *mdevice,
+ struct page **ppage, bool is_large)
{
struct dmirror_chunk *devmem;
- struct resource *res;
+ struct resource *res = NULL;
unsigned long pfn;
unsigned long pfn_first;
unsigned long pfn_last;
void *ptr;
+ int ret = -ENOMEM;
+
+ devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
+ if (!devmem)
+ return ret;
+
+ switch (mdevice->zone_device_type) {
+ case HMM_DMIRROR_MEMORY_DEVICE_PRIVATE:
+ res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
+ "hmm_dmirror");
+ if (IS_ERR_OR_NULL(res))
+ goto err_devmem;
+ devmem->pagemap.range.start = res->start;
+ devmem->pagemap.range.end = res->end;
+ devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ break;
+ case HMM_DMIRROR_MEMORY_DEVICE_COHERENT:
+ devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ?
+ spm_addr_dev0 :
+ spm_addr_dev1;
+ devmem->pagemap.range.end = devmem->pagemap.range.start +
+ DEVMEM_CHUNK_SIZE - 1;
+ devmem->pagemap.type = MEMORY_DEVICE_COHERENT;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_devmem;
+ }
+
+ devmem->pagemap.nr_range = 1;
+ devmem->pagemap.ops = &dmirror_devmem_ops;
+ devmem->pagemap.owner = mdevice;
mutex_lock(&mdevice->devmem_lock);
@@ -464,33 +547,22 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
sizeof(new_chunks[0]) * new_capacity,
GFP_KERNEL);
if (!new_chunks)
- goto err;
+ goto err_release;
mdevice->devmem_capacity = new_capacity;
mdevice->devmem_chunks = new_chunks;
}
-
- res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
- "hmm_dmirror");
- if (IS_ERR(res))
- goto err;
-
- devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
- if (!devmem)
- goto err_release;
-
- devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
- devmem->pagemap.res = *res;
- devmem->pagemap.ops = &dmirror_devmem_ops;
- devmem->pagemap.owner = mdevice;
-
ptr = memremap_pages(&devmem->pagemap, numa_node_id());
- if (IS_ERR(ptr))
- goto err_free;
+ if (IS_ERR_OR_NULL(ptr)) {
+ if (ptr)
+ ret = PTR_ERR(ptr);
+ else
+ ret = -EFAULT;
+ goto err_release;
+ }
devmem->mdevice = mdevice;
- pfn_first = devmem->pagemap.res.start >> PAGE_SHIFT;
- pfn_last = pfn_first +
- (resource_size(&devmem->pagemap.res) >> PAGE_SHIFT);
+ pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
+ pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT);
mdevice->devmem_chunks[mdevice->devmem_count++] = devmem;
mutex_unlock(&mdevice->devmem_lock);
@@ -502,108 +574,174 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
pfn_first, pfn_last);
spin_lock(&mdevice->lock);
- for (pfn = pfn_first; pfn < pfn_last; pfn++) {
+ for (pfn = pfn_first; pfn < pfn_last; ) {
struct page *page = pfn_to_page(pfn);
+ if (is_large && IS_ALIGNED(pfn, HPAGE_PMD_NR)
+ && (pfn + HPAGE_PMD_NR <= pfn_last)) {
+ page->zone_device_data = mdevice->free_folios;
+ mdevice->free_folios = page_folio(page);
+ pfn += HPAGE_PMD_NR;
+ continue;
+ }
+
page->zone_device_data = mdevice->free_pages;
mdevice->free_pages = page;
+ pfn++;
}
+
+ ret = 0;
if (ppage) {
- *ppage = mdevice->free_pages;
- mdevice->free_pages = (*ppage)->zone_device_data;
- mdevice->calloc++;
+ if (is_large) {
+ if (!mdevice->free_folios) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+ *ppage = folio_page(mdevice->free_folios, 0);
+ mdevice->free_folios = (*ppage)->zone_device_data;
+ mdevice->calloc += HPAGE_PMD_NR;
+ } else if (mdevice->free_pages) {
+ *ppage = mdevice->free_pages;
+ mdevice->free_pages = (*ppage)->zone_device_data;
+ mdevice->calloc++;
+ } else {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
}
+err_unlock:
spin_unlock(&mdevice->lock);
- return true;
+ return ret;
-err_free:
- kfree(devmem);
err_release:
- release_mem_region(devmem->pagemap.res.start,
- resource_size(&devmem->pagemap.res));
-err:
mutex_unlock(&mdevice->devmem_lock);
- return false;
+ if (res && devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
+ release_mem_region(devmem->pagemap.range.start,
+ range_len(&devmem->pagemap.range));
+err_devmem:
+ kfree(devmem);
+
+ return ret;
}
-static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
+static struct page *dmirror_devmem_alloc_page(struct dmirror *dmirror,
+ bool is_large)
{
struct page *dpage = NULL;
- struct page *rpage;
+ struct page *rpage = NULL;
+ unsigned int order = is_large ? HPAGE_PMD_ORDER : 0;
+ struct dmirror_device *mdevice = dmirror->mdevice;
/*
- * This is a fake device so we alloc real system memory to store
- * our device memory.
+ * For ZONE_DEVICE private type, this is a fake device so we allocate
+ * real system memory to store our device memory.
+ * For ZONE_DEVICE coherent type we use the actual dpage to store the
+ * data and ignore rpage.
*/
- rpage = alloc_page(GFP_HIGHUSER);
- if (!rpage)
- return NULL;
-
+ if (dmirror_is_private_zone(mdevice)) {
+ rpage = folio_page(folio_alloc(GFP_HIGHUSER, order), 0);
+ if (!rpage)
+ return NULL;
+ }
spin_lock(&mdevice->lock);
- if (mdevice->free_pages) {
+ if (is_large && mdevice->free_folios) {
+ dpage = folio_page(mdevice->free_folios, 0);
+ mdevice->free_folios = dpage->zone_device_data;
+ mdevice->calloc += 1 << order;
+ spin_unlock(&mdevice->lock);
+ } else if (!is_large && mdevice->free_pages) {
dpage = mdevice->free_pages;
mdevice->free_pages = dpage->zone_device_data;
mdevice->calloc++;
spin_unlock(&mdevice->lock);
} else {
spin_unlock(&mdevice->lock);
- if (!dmirror_allocate_chunk(mdevice, &dpage))
+ if (dmirror_allocate_chunk(mdevice, &dpage, is_large))
goto error;
}
+ zone_device_folio_init(page_folio(dpage), order);
dpage->zone_device_data = rpage;
- get_page(dpage);
- lock_page(dpage);
return dpage;
error:
- __free_page(rpage);
+ if (rpage)
+ __free_pages(rpage, order);
return NULL;
}
static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
struct dmirror *dmirror)
{
- struct dmirror_device *mdevice = dmirror->mdevice;
const unsigned long *src = args->src;
unsigned long *dst = args->dst;
unsigned long addr;
- for (addr = args->start; addr < args->end; addr += PAGE_SIZE,
- src++, dst++) {
+ for (addr = args->start; addr < args->end; ) {
struct page *spage;
struct page *dpage;
struct page *rpage;
+ bool is_large = *src & MIGRATE_PFN_COMPOUND;
+ int write = (*src & MIGRATE_PFN_WRITE) ? MIGRATE_PFN_WRITE : 0;
+ unsigned long nr = 1;
if (!(*src & MIGRATE_PFN_MIGRATE))
- continue;
+ goto next;
/*
* Note that spage might be NULL which is OK since it is an
* unallocated pte_none() or read-only zero page.
*/
spage = migrate_pfn_to_page(*src);
-
- /*
- * Don't migrate device private pages from our own driver or
- * others. For our own we would do a device private memory copy
- * not a migration and for others, we would need to fault the
- * other device's page into system memory first.
- */
- if (spage && is_zone_device_page(spage))
+ if (WARN(spage && is_zone_device_page(spage),
+ "page already in device spage pfn: 0x%lx\n",
+ page_to_pfn(spage)))
+ goto next;
+
+ if (dmirror->flags & HMM_DMIRROR_FLAG_FAIL_ALLOC) {
+ dmirror->flags &= ~HMM_DMIRROR_FLAG_FAIL_ALLOC;
+ dpage = NULL;
+ } else
+ dpage = dmirror_devmem_alloc_page(dmirror, is_large);
+
+ if (!dpage) {
+ struct folio *folio;
+ unsigned long i;
+ unsigned long spfn = *src >> MIGRATE_PFN_SHIFT;
+ struct page *src_page;
+
+ if (!is_large)
+ goto next;
+
+ if (!spage && is_large) {
+ nr = HPAGE_PMD_NR;
+ } else {
+ folio = page_folio(spage);
+ nr = folio_nr_pages(folio);
+ }
+
+ for (i = 0; i < nr && addr < args->end; i++) {
+ dpage = dmirror_devmem_alloc_page(dmirror, false);
+ rpage = BACKING_PAGE(dpage);
+ rpage->zone_device_data = dmirror;
+
+ *dst = migrate_pfn(page_to_pfn(dpage)) | write;
+ src_page = pfn_to_page(spfn + i);
+
+ if (spage)
+ copy_highpage(rpage, src_page);
+ else
+ clear_highpage(rpage);
+ src++;
+ dst++;
+ addr += PAGE_SIZE;
+ }
continue;
+ }
- dpage = dmirror_devmem_alloc_page(mdevice);
- if (!dpage)
- continue;
-
- rpage = dpage->zone_device_data;
- if (spage)
- copy_highpage(rpage, spage);
- else
- clear_highpage(rpage);
+ rpage = BACKING_PAGE(dpage);
/*
* Normally, a device would use the page->zone_device_data to
@@ -613,14 +751,82 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
*/
rpage->zone_device_data = dmirror;
- *dst = migrate_pfn(page_to_pfn(dpage)) |
- MIGRATE_PFN_LOCKED;
- if ((*src & MIGRATE_PFN_WRITE) ||
- (!spage && args->vma->vm_flags & VM_WRITE))
- *dst |= MIGRATE_PFN_WRITE;
+ pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 0x%lx\n",
+ page_to_pfn(spage), page_to_pfn(dpage));
+
+ *dst = migrate_pfn(page_to_pfn(dpage)) | write;
+
+ if (is_large) {
+ int i;
+ struct folio *folio = page_folio(dpage);
+ *dst |= MIGRATE_PFN_COMPOUND;
+
+ if (folio_test_large(folio)) {
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ struct page *dst_page =
+ pfn_to_page(page_to_pfn(rpage) + i);
+ struct page *src_page =
+ pfn_to_page(page_to_pfn(spage) + i);
+
+ if (spage)
+ copy_highpage(dst_page, src_page);
+ else
+ clear_highpage(dst_page);
+ src++;
+ dst++;
+ addr += PAGE_SIZE;
+ }
+ continue;
+ }
+ }
+
+ if (spage)
+ copy_highpage(rpage, spage);
+ else
+ clear_highpage(rpage);
+
+next:
+ src++;
+ dst++;
+ addr += PAGE_SIZE;
}
}
+static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
+ unsigned long end)
+{
+ unsigned long pfn;
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
+ void *entry;
+
+ entry = xa_load(&dmirror->pt, pfn);
+ if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC)
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int dmirror_atomic_map(unsigned long addr, struct page *page,
+ struct dmirror *dmirror)
+{
+ void *entry;
+
+ /* Map the migrated pages into the device's page tables. */
+ mutex_lock(&dmirror->mutex);
+
+ entry = xa_tag_pointer(page, DPT_XA_TAG_ATOMIC);
+ entry = xa_store(&dmirror->pt, addr >> PAGE_SHIFT, entry, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ mutex_unlock(&dmirror->mutex);
+ return xa_err(entry);
+ }
+
+ mutex_unlock(&dmirror->mutex);
+ return 0;
+}
+
static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
struct dmirror *dmirror)
{
@@ -629,14 +835,17 @@ static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
const unsigned long *src = args->src;
const unsigned long *dst = args->dst;
unsigned long pfn;
+ const unsigned long start_pfn = start >> PAGE_SHIFT;
+ const unsigned long end_pfn = end >> PAGE_SHIFT;
/* Map the migrated pages into the device's page tables. */
mutex_lock(&dmirror->mutex);
- for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++,
- src++, dst++) {
+ for (pfn = start_pfn; pfn < end_pfn; pfn++, src++, dst++) {
struct page *dpage;
void *entry;
+ int nr, i;
+ struct page *rpage;
if (!(*src & MIGRATE_PFN_MIGRATE))
continue;
@@ -645,17 +854,25 @@ static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
if (!dpage)
continue;
- /*
- * Store the page that holds the data so the page table
- * doesn't have to deal with ZONE_DEVICE private pages.
- */
- entry = dpage->zone_device_data;
- if (*dst & MIGRATE_PFN_WRITE)
- entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
- entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
- if (xa_is_err(entry)) {
- mutex_unlock(&dmirror->mutex);
- return xa_err(entry);
+ if (*dst & MIGRATE_PFN_COMPOUND)
+ nr = folio_nr_pages(page_folio(dpage));
+ else
+ nr = 1;
+
+ WARN_ON_ONCE(end_pfn < start_pfn + nr);
+
+ rpage = BACKING_PAGE(dpage);
+ VM_WARN_ON(folio_nr_pages(page_folio(rpage)) != nr);
+
+ for (i = 0; i < nr; i++) {
+ entry = folio_page(page_folio(rpage), i);
+ if (*dst & MIGRATE_PFN_WRITE)
+ entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
+ entry = xa_store(&dmirror->pt, pfn + i, entry, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ mutex_unlock(&dmirror->mutex);
+ return xa_err(entry);
+ }
}
}
@@ -663,19 +880,238 @@ static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
return 0;
}
-static int dmirror_migrate(struct dmirror *dmirror,
- struct hmm_dmirror_cmd *cmd)
+static int dmirror_exclusive(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ unsigned long start, end, addr;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct dmirror_bounce bounce;
+ int ret = 0;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ mmap_read_lock(mm);
+ for (addr = start; !ret && addr < end; addr += PAGE_SIZE) {
+ struct folio *folio;
+ struct page *page;
+
+ page = make_device_exclusive(mm, addr, NULL, &folio);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ break;
+ }
+
+ ret = dmirror_atomic_map(addr, page, dmirror);
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ if (ret)
+ return ret;
+
+ /* Return the migrated data for verification. */
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_read(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret == 0) {
+ if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
+ bounce.size))
+ ret = -EFAULT;
+ }
+
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+}
+
+static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
+ struct dmirror *dmirror)
+{
+ const unsigned long *src = args->src;
+ unsigned long *dst = args->dst;
+ unsigned long start = args->start;
+ unsigned long end = args->end;
+ unsigned long addr;
+ unsigned int order = 0;
+ int i;
+
+ for (addr = start; addr < end; ) {
+ struct page *dpage, *spage;
+
+ spage = migrate_pfn_to_page(*src);
+ if (!spage || !(*src & MIGRATE_PFN_MIGRATE)) {
+ addr += PAGE_SIZE;
+ goto next;
+ }
+
+ if (WARN_ON(!is_device_private_page(spage) &&
+ !is_device_coherent_page(spage))) {
+ addr += PAGE_SIZE;
+ goto next;
+ }
+
+ spage = BACKING_PAGE(spage);
+ order = folio_order(page_folio(spage));
+ if (order)
+ *dst = MIGRATE_PFN_COMPOUND;
+ if (*src & MIGRATE_PFN_WRITE)
+ *dst |= MIGRATE_PFN_WRITE;
+
+ if (dmirror->flags & HMM_DMIRROR_FLAG_FAIL_ALLOC) {
+ dmirror->flags &= ~HMM_DMIRROR_FLAG_FAIL_ALLOC;
+ *dst &= ~MIGRATE_PFN_COMPOUND;
+ dpage = NULL;
+ } else if (order) {
+ dpage = folio_page(vma_alloc_folio(GFP_HIGHUSER_MOVABLE,
+ order, args->vma, addr), 0);
+ } else {
+ dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
+ }
+
+ if (!dpage && !order)
+ return VM_FAULT_OOM;
+
+ pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 0x%lx\n",
+ page_to_pfn(spage), page_to_pfn(dpage));
+
+ if (dpage) {
+ lock_page(dpage);
+ *dst |= migrate_pfn(page_to_pfn(dpage));
+ }
+
+ for (i = 0; i < (1 << order); i++) {
+ struct page *src_page;
+ struct page *dst_page;
+
+ /* Try with smaller pages if large allocation fails */
+ if (!dpage && order) {
+ dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
+ lock_page(dpage);
+ dst[i] = migrate_pfn(page_to_pfn(dpage));
+ dst_page = pfn_to_page(page_to_pfn(dpage));
+ dpage = NULL; /* For the next iteration */
+ } else {
+ dst_page = pfn_to_page(page_to_pfn(dpage) + i);
+ }
+
+ src_page = pfn_to_page(page_to_pfn(spage) + i);
+
+ xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
+ addr += PAGE_SIZE;
+ copy_highpage(dst_page, src_page);
+ }
+next:
+ src += 1 << order;
+ dst += 1 << order;
+ }
+ return 0;
+}
+
+static unsigned long
+dmirror_successful_migrated_pages(struct migrate_vma *migrate)
+{
+ unsigned long cpages = 0;
+ unsigned long i;
+
+ for (i = 0; i < migrate->npages; i++) {
+ if (migrate->src[i] & MIGRATE_PFN_VALID &&
+ migrate->src[i] & MIGRATE_PFN_MIGRATE)
+ cpages++;
+ }
+ return cpages;
+}
+
+static int dmirror_migrate_to_system(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ unsigned long start, end, addr;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct vm_area_struct *vma;
+ struct migrate_vma args = { 0 };
+ unsigned long next;
+ int ret;
+ unsigned long *src_pfns;
+ unsigned long *dst_pfns;
+
+ src_pfns = kvcalloc(PTRS_PER_PTE, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(PTRS_PER_PTE, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ cmd->cpages = 0;
+ mmap_read_lock(mm);
+ for (addr = start; addr < end; addr = next) {
+ vma = vma_lookup(mm, addr);
+ if (!vma || !(vma->vm_flags & VM_READ)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ next = min(end, addr + (PTRS_PER_PTE << PAGE_SHIFT));
+ if (next > vma->vm_end)
+ next = vma->vm_end;
+
+ args.vma = vma;
+ args.src = src_pfns;
+ args.dst = dst_pfns;
+ args.start = addr;
+ args.end = next;
+ args.pgmap_owner = dmirror->mdevice;
+ args.flags = dmirror_select_device(dmirror) | MIGRATE_VMA_SELECT_COMPOUND;
+
+ ret = migrate_vma_setup(&args);
+ if (ret)
+ goto out;
+
+ pr_debug("Migrating from device mem to sys mem\n");
+ dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
+
+ migrate_vma_pages(&args);
+ cmd->cpages += dmirror_successful_migrated_pages(&args);
+ migrate_vma_finalize(&args);
+ }
+out:
+ mmap_read_unlock(mm);
+ mmput(mm);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
+
+ return ret;
+}
+
+static int dmirror_migrate_to_device(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
{
unsigned long start, end, addr;
unsigned long size = cmd->npages << PAGE_SHIFT;
struct mm_struct *mm = dmirror->notifier.mm;
struct vm_area_struct *vma;
- unsigned long src_pfns[64];
- unsigned long dst_pfns[64];
struct dmirror_bounce bounce;
- struct migrate_vma args;
+ struct migrate_vma args = { 0 };
unsigned long next;
int ret;
+ unsigned long *src_pfns = NULL;
+ unsigned long *dst_pfns = NULL;
start = cmd->addr;
end = start + size;
@@ -686,15 +1122,26 @@ static int dmirror_migrate(struct dmirror *dmirror,
if (!mmget_not_zero(mm))
return -EINVAL;
+ ret = -ENOMEM;
+ src_pfns = kvcalloc(PTRS_PER_PTE, sizeof(*src_pfns),
+ GFP_KERNEL | __GFP_NOFAIL);
+ if (!src_pfns)
+ goto free_mem;
+
+ dst_pfns = kvcalloc(PTRS_PER_PTE, sizeof(*dst_pfns),
+ GFP_KERNEL | __GFP_NOFAIL);
+ if (!dst_pfns)
+ goto free_mem;
+
+ ret = 0;
mmap_read_lock(mm);
for (addr = start; addr < end; addr = next) {
- vma = find_vma(mm, addr);
- if (!vma || addr < vma->vm_start ||
- !(vma->vm_flags & VM_READ)) {
+ vma = vma_lookup(mm, addr);
+ if (!vma || !(vma->vm_flags & VM_READ)) {
ret = -EINVAL;
goto out;
}
- next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT));
+ next = min(end, addr + (PTRS_PER_PTE << PAGE_SHIFT));
if (next > vma->vm_end)
next = vma->vm_end;
@@ -703,11 +1150,14 @@ static int dmirror_migrate(struct dmirror *dmirror,
args.dst = dst_pfns;
args.start = addr;
args.end = next;
- args.src_owner = NULL;
+ args.pgmap_owner = dmirror->mdevice;
+ args.flags = MIGRATE_VMA_SELECT_SYSTEM |
+ MIGRATE_VMA_SELECT_COMPOUND;
ret = migrate_vma_setup(&args);
if (ret)
goto out;
+ pr_debug("Migrating from sys mem to device mem\n");
dmirror_migrate_alloc_and_copy(&args, dmirror);
migrate_vma_pages(&args);
dmirror_migrate_finalize_and_map(&args, dmirror);
@@ -716,10 +1166,13 @@ static int dmirror_migrate(struct dmirror *dmirror,
mmap_read_unlock(mm);
mmput(mm);
- /* Return the migrated data for verification. */
+ /*
+ * Return the migrated data for verification.
+ * Only for pages in device zone
+ */
ret = dmirror_bounce_init(&bounce, start, size);
if (ret)
- return ret;
+ goto free_mem;
mutex_lock(&dmirror->mutex);
ret = dmirror_do_read(dmirror, start, end, &bounce);
mutex_unlock(&dmirror->mutex);
@@ -730,11 +1183,14 @@ static int dmirror_migrate(struct dmirror *dmirror,
}
cmd->cpages = bounce.cpages;
dmirror_bounce_fini(&bounce);
- return ret;
+ goto free_mem;
out:
mmap_read_unlock(mm);
mmput(mm);
+free_mem:
+ kfree(src_pfns);
+ kfree(dst_pfns);
return ret;
}
@@ -759,6 +1215,12 @@ static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range,
*perm = HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL;
else
*perm = HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE;
+ } else if (is_device_coherent_page(page)) {
+ /* Is the page migrated to this device or some other? */
+ if (dmirror->mdevice == dmirror_page_to_device(page))
+ *perm = HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL;
+ else
+ *perm = HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE;
} else if (is_zero_pfn(page_to_pfn(page)))
*perm = HMM_DMIRROR_PROT_ZERO;
else
@@ -767,6 +1229,10 @@ static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range,
*perm |= HMM_DMIRROR_PROT_WRITE;
else
*perm |= HMM_DMIRROR_PROT_READ;
+ if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PMD_SHIFT)
+ *perm |= HMM_DMIRROR_PROT_PMD;
+ else if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PUD_SHIFT)
+ *perm |= HMM_DMIRROR_PROT_PUD;
}
static bool dmirror_snapshot_invalidate(struct mmu_interval_notifier *mni,
@@ -861,8 +1327,8 @@ static int dmirror_snapshot(struct dmirror *dmirror,
unsigned long size = cmd->npages << PAGE_SHIFT;
unsigned long addr;
unsigned long next;
- unsigned long pfns[64];
- unsigned char perm[64];
+ unsigned long pfns[32];
+ unsigned char perm[32];
char __user *uptr;
struct hmm_range range = {
.hmm_pfns = pfns,
@@ -909,6 +1375,104 @@ static int dmirror_snapshot(struct dmirror *dmirror,
return ret;
}
+static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+{
+ unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
+ unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
+ unsigned long npages = end_pfn - start_pfn + 1;
+ unsigned long i;
+ unsigned long *src_pfns;
+ unsigned long *dst_pfns;
+ unsigned int order = 0;
+
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+
+ migrate_device_range(src_pfns, start_pfn, npages);
+ for (i = 0; i < npages; i++) {
+ struct page *dpage, *spage;
+
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ if (WARN_ON(!is_device_private_page(spage) &&
+ !is_device_coherent_page(spage)))
+ continue;
+
+ order = folio_order(page_folio(spage));
+ spage = BACKING_PAGE(spage);
+ if (src_pfns[i] & MIGRATE_PFN_COMPOUND) {
+ dpage = folio_page(folio_alloc(GFP_HIGHUSER_MOVABLE,
+ order), 0);
+ } else {
+ dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
+ order = 0;
+ }
+
+ /* TODO Support splitting here */
+ lock_page(dpage);
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
+ if (src_pfns[i] & MIGRATE_PFN_WRITE)
+ dst_pfns[i] |= MIGRATE_PFN_WRITE;
+ if (order)
+ dst_pfns[i] |= MIGRATE_PFN_COMPOUND;
+ folio_copy(page_folio(dpage), page_folio(spage));
+ }
+ migrate_device_pages(src_pfns, dst_pfns, npages);
+ migrate_device_finalize(src_pfns, dst_pfns, npages);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
+}
+
+/* Removes free pages from the free list so they can't be re-allocated */
+static void dmirror_remove_free_pages(struct dmirror_chunk *devmem)
+{
+ struct dmirror_device *mdevice = devmem->mdevice;
+ struct page *page;
+ struct folio *folio;
+
+
+ for (folio = mdevice->free_folios; folio; folio = folio_zone_device_data(folio))
+ if (dmirror_page_to_chunk(folio_page(folio, 0)) == devmem)
+ mdevice->free_folios = folio_zone_device_data(folio);
+ for (page = mdevice->free_pages; page; page = page->zone_device_data)
+ if (dmirror_page_to_chunk(page) == devmem)
+ mdevice->free_pages = page->zone_device_data;
+}
+
+static void dmirror_device_remove_chunks(struct dmirror_device *mdevice)
+{
+ unsigned int i;
+
+ mutex_lock(&mdevice->devmem_lock);
+ if (mdevice->devmem_chunks) {
+ for (i = 0; i < mdevice->devmem_count; i++) {
+ struct dmirror_chunk *devmem =
+ mdevice->devmem_chunks[i];
+
+ spin_lock(&mdevice->lock);
+ devmem->remove = true;
+ dmirror_remove_free_pages(devmem);
+ spin_unlock(&mdevice->lock);
+
+ dmirror_device_evict_chunk(devmem);
+ memunmap_pages(&devmem->pagemap);
+ if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
+ release_mem_region(devmem->pagemap.range.start,
+ range_len(&devmem->pagemap.range));
+ kfree(devmem);
+ }
+ mdevice->devmem_count = 0;
+ mdevice->devmem_capacity = 0;
+ mdevice->free_pages = NULL;
+ mdevice->free_folios = NULL;
+ kfree(mdevice->devmem_chunks);
+ mdevice->devmem_chunks = NULL;
+ }
+ mutex_unlock(&mdevice->devmem_lock);
+}
+
static long dmirror_fops_unlocked_ioctl(struct file *filp,
unsigned int command,
unsigned long arg)
@@ -942,14 +1506,36 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp,
ret = dmirror_write(dmirror, &cmd);
break;
- case HMM_DMIRROR_MIGRATE:
- ret = dmirror_migrate(dmirror, &cmd);
+ case HMM_DMIRROR_MIGRATE_TO_DEV:
+ ret = dmirror_migrate_to_device(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_MIGRATE_TO_SYS:
+ ret = dmirror_migrate_to_system(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_EXCLUSIVE:
+ ret = dmirror_exclusive(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_CHECK_EXCLUSIVE:
+ ret = dmirror_check_atomic(dmirror, cmd.addr,
+ cmd.addr + (cmd.npages << PAGE_SHIFT));
break;
case HMM_DMIRROR_SNAPSHOT:
ret = dmirror_snapshot(dmirror, &cmd);
break;
+ case HMM_DMIRROR_RELEASE:
+ dmirror_device_remove_chunks(dmirror->mdevice);
+ ret = 0;
+ break;
+ case HMM_DMIRROR_FLAGS:
+ dmirror->flags = cmd.npages;
+ ret = 0;
+ break;
+
default:
return -EINVAL;
}
@@ -962,111 +1548,180 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp,
return 0;
}
+static int dmirror_fops_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long addr;
+
+ for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
+ struct page *page;
+ int ret;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return -ENOMEM;
+
+ ret = vm_insert_page(vma, addr, page);
+ if (ret) {
+ __free_page(page);
+ return ret;
+ }
+ put_page(page);
+ }
+
+ return 0;
+}
+
static const struct file_operations dmirror_fops = {
.open = dmirror_fops_open,
.release = dmirror_fops_release,
+ .mmap = dmirror_fops_mmap,
.unlocked_ioctl = dmirror_fops_unlocked_ioctl,
.llseek = default_llseek,
.owner = THIS_MODULE,
};
-static void dmirror_devmem_free(struct page *page)
+static void dmirror_devmem_free(struct folio *folio)
{
- struct page *rpage = page->zone_device_data;
+ struct page *page = &folio->page;
+ struct page *rpage = BACKING_PAGE(page);
struct dmirror_device *mdevice;
+ struct folio *rfolio = page_folio(rpage);
+ unsigned int order = folio_order(rfolio);
- if (rpage)
- __free_page(rpage);
+ if (rpage != page) {
+ if (order)
+ __free_pages(rpage, order);
+ else
+ __free_page(rpage);
+ rpage = NULL;
+ }
mdevice = dmirror_page_to_device(page);
-
spin_lock(&mdevice->lock);
- mdevice->cfree++;
- page->zone_device_data = mdevice->free_pages;
- mdevice->free_pages = page;
- spin_unlock(&mdevice->lock);
-}
-static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
- struct dmirror_device *mdevice)
-{
- const unsigned long *src = args->src;
- unsigned long *dst = args->dst;
- unsigned long start = args->start;
- unsigned long end = args->end;
- unsigned long addr;
-
- for (addr = start; addr < end; addr += PAGE_SIZE,
- src++, dst++) {
- struct page *dpage, *spage;
-
- spage = migrate_pfn_to_page(*src);
- if (!spage || !(*src & MIGRATE_PFN_MIGRATE))
- continue;
- spage = spage->zone_device_data;
-
- dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
- if (!dpage)
- continue;
-
- lock_page(dpage);
- copy_highpage(dpage, spage);
- *dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
- if (*src & MIGRATE_PFN_WRITE)
- *dst |= MIGRATE_PFN_WRITE;
+ /* Return page to our allocator if not freeing the chunk */
+ if (!dmirror_page_to_chunk(page)->remove) {
+ mdevice->cfree += 1 << order;
+ if (order) {
+ page->zone_device_data = mdevice->free_folios;
+ mdevice->free_folios = page_folio(page);
+ } else {
+ page->zone_device_data = mdevice->free_pages;
+ mdevice->free_pages = page;
+ }
}
- return 0;
-}
-
-static void dmirror_devmem_fault_finalize_and_map(struct migrate_vma *args,
- struct dmirror *dmirror)
-{
- /* Invalidate the device's page table mapping. */
- mutex_lock(&dmirror->mutex);
- dmirror_do_update(dmirror, args->start, args->end);
- mutex_unlock(&dmirror->mutex);
+ spin_unlock(&mdevice->lock);
}
static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
{
- struct migrate_vma args;
- unsigned long src_pfns;
- unsigned long dst_pfns;
+ struct migrate_vma args = { 0 };
struct page *rpage;
struct dmirror *dmirror;
- vm_fault_t ret;
+ vm_fault_t ret = 0;
+ unsigned int order, nr;
/*
* Normally, a device would use the page->zone_device_data to point to
* the mirror but here we use it to hold the page for the simulated
* device memory and that page holds the pointer to the mirror.
*/
- rpage = vmf->page->zone_device_data;
+ rpage = folio_zone_device_data(page_folio(vmf->page));
dmirror = rpage->zone_device_data;
/* FIXME demonstrate how we can adjust migrate range */
+ order = folio_order(page_folio(vmf->page));
+ nr = 1 << order;
+
+ /*
+ * When folios are partially mapped, we can't rely on the folio
+ * order of vmf->page as the folio might not be fully split yet
+ */
+ if (vmf->pte) {
+ order = 0;
+ nr = 1;
+ }
+
+ /*
+ * Consider a per-cpu cache of src and dst pfns, but with
+ * large number of cpus that might not scale well.
+ */
+ args.start = ALIGN_DOWN(vmf->address, (PAGE_SIZE << order));
args.vma = vmf->vma;
- args.start = vmf->address;
- args.end = args.start + PAGE_SIZE;
- args.src = &src_pfns;
- args.dst = &dst_pfns;
- args.src_owner = dmirror->mdevice;
+ args.end = args.start + (PAGE_SIZE << order);
+
+ nr = (args.end - args.start) >> PAGE_SHIFT;
+ args.src = kcalloc(nr, sizeof(unsigned long), GFP_KERNEL);
+ args.dst = kcalloc(nr, sizeof(unsigned long), GFP_KERNEL);
+ args.pgmap_owner = dmirror->mdevice;
+ args.flags = dmirror_select_device(dmirror);
+ args.fault_page = vmf->page;
+
+ if (!args.src || !args.dst) {
+ ret = VM_FAULT_OOM;
+ goto err;
+ }
+
+ if (order)
+ args.flags |= MIGRATE_VMA_SELECT_COMPOUND;
if (migrate_vma_setup(&args))
return VM_FAULT_SIGBUS;
- ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror->mdevice);
+ ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
if (ret)
- return ret;
+ goto err;
migrate_vma_pages(&args);
- dmirror_devmem_fault_finalize_and_map(&args, dmirror);
+ /*
+ * No device finalize step is needed since
+ * dmirror_devmem_fault_alloc_and_copy() will have already
+ * invalidated the device page table.
+ */
migrate_vma_finalize(&args);
- return 0;
+err:
+ kfree(args.src);
+ kfree(args.dst);
+ return ret;
+}
+
+static void dmirror_devmem_folio_split(struct folio *head, struct folio *tail)
+{
+ struct page *rpage = BACKING_PAGE(folio_page(head, 0));
+ struct page *rpage_tail;
+ struct folio *rfolio;
+ unsigned long offset = 0;
+
+ if (!rpage) {
+ tail->page.zone_device_data = NULL;
+ return;
+ }
+
+ rfolio = page_folio(rpage);
+
+ if (tail == NULL) {
+ folio_reset_order(rfolio);
+ rfolio->mapping = NULL;
+ folio_set_count(rfolio, 1);
+ return;
+ }
+
+ offset = folio_pfn(tail) - folio_pfn(head);
+
+ rpage_tail = folio_page(rfolio, offset);
+ tail->page.zone_device_data = rpage_tail;
+ rpage_tail->zone_device_data = rpage->zone_device_data;
+ clear_compound_head(rpage_tail);
+ rpage_tail->mapping = NULL;
+
+ folio_page(tail, 0)->mapping = folio_page(head, 0)->mapping;
+ tail->pgmap = head->pgmap;
+ folio_set_count(page_folio(rpage_tail), 1);
}
static const struct dev_pagemap_ops dmirror_devmem_ops = {
- .page_free = dmirror_devmem_free,
+ .folio_free = dmirror_devmem_free,
.migrate_to_ram = dmirror_devmem_fault,
+ .folio_split = dmirror_devmem_folio_split,
};
static int dmirror_device_init(struct dmirror_device *mdevice, int id)
@@ -1080,63 +1735,60 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id)
cdev_init(&mdevice->cdevice, &dmirror_fops);
mdevice->cdevice.owner = THIS_MODULE;
- ret = cdev_add(&mdevice->cdevice, dev, 1);
+ device_initialize(&mdevice->device);
+ mdevice->device.devt = dev;
+
+ ret = dev_set_name(&mdevice->device, "hmm_dmirror%u", id);
if (ret)
- return ret;
+ goto put_device;
+
+ ret = cdev_device_add(&mdevice->cdevice, &mdevice->device);
+ if (ret)
+ goto put_device;
- /* Build a list of free ZONE_DEVICE private struct pages */
- dmirror_allocate_chunk(mdevice, NULL);
+ /* Build a list of free ZONE_DEVICE struct pages */
+ return dmirror_allocate_chunk(mdevice, NULL, false);
- return 0;
+put_device:
+ put_device(&mdevice->device);
+ return ret;
}
static void dmirror_device_remove(struct dmirror_device *mdevice)
{
- unsigned int i;
-
- if (mdevice->devmem_chunks) {
- for (i = 0; i < mdevice->devmem_count; i++) {
- struct dmirror_chunk *devmem =
- mdevice->devmem_chunks[i];
-
- memunmap_pages(&devmem->pagemap);
- release_mem_region(devmem->pagemap.res.start,
- resource_size(&devmem->pagemap.res));
- kfree(devmem);
- }
- kfree(mdevice->devmem_chunks);
- }
-
- cdev_del(&mdevice->cdevice);
+ dmirror_device_remove_chunks(mdevice);
+ cdev_device_del(&mdevice->cdevice, &mdevice->device);
+ put_device(&mdevice->device);
}
static int __init hmm_dmirror_init(void)
{
int ret;
- int id;
+ int id = 0;
+ int ndevices = 0;
ret = alloc_chrdev_region(&dmirror_dev, 0, DMIRROR_NDEVICES,
"HMM_DMIRROR");
if (ret)
goto err_unreg;
- for (id = 0; id < DMIRROR_NDEVICES; id++) {
+ memset(dmirror_devices, 0, DMIRROR_NDEVICES * sizeof(dmirror_devices[0]));
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
+ if (spm_addr_dev0 && spm_addr_dev1) {
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_COHERENT;
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_COHERENT;
+ }
+ for (id = 0; id < ndevices; id++) {
ret = dmirror_device_init(dmirror_devices + id, id);
if (ret)
goto err_chrdev;
}
- /*
- * Allocate a zero page to simulate a reserved page of device private
- * memory which is always zero. The zero_pfn page isn't used just to
- * make the code here simpler (i.e., we need a struct page for it).
- */
- dmirror_zero_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
- if (!dmirror_zero_page) {
- ret = -ENOMEM;
- goto err_chrdev;
- }
-
pr_info("HMM test module loaded. This is only for testing HMM.\n");
return 0;
@@ -1152,13 +1804,13 @@ static void __exit hmm_dmirror_exit(void)
{
int id;
- if (dmirror_zero_page)
- __free_page(dmirror_zero_page);
for (id = 0; id < DMIRROR_NDEVICES; id++)
- dmirror_device_remove(dmirror_devices + id);
+ if (dmirror_devices[id].zone_device_type)
+ dmirror_device_remove(dmirror_devices + id);
unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);
}
module_init(hmm_dmirror_init);
module_exit(hmm_dmirror_exit);
+MODULE_DESCRIPTION("HMM (Heterogeneous Memory Management) test module");
MODULE_LICENSE("GPL");