summaryrefslogtreecommitdiff
path: root/drivers/misc/cxl
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/cxl')
-rw-r--r--drivers/misc/cxl/context.c96
-rw-r--r--drivers/misc/cxl/cxl.h9
-rw-r--r--drivers/misc/cxl/file.c20
-rw-r--r--drivers/misc/cxl/native.c12
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c10
6 files changed, 106 insertions, 43 deletions
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index cca472109135..d1b55fe62817 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -34,7 +34,8 @@ struct cxl_context *cxl_context_alloc(void)
/*
* Initialises a CXL context.
*/
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
+ struct address_space *mapping)
{
int i;
@@ -42,6 +43,8 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
ctx->afu = afu;
ctx->master = master;
ctx->pid = NULL; /* Set in start work ioctl */
+ mutex_init(&ctx->mapping_lock);
+ ctx->mapping = mapping;
/*
* Allocate the segment table before we put it in the IDR so that we
@@ -82,12 +85,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
* Allocating IDR! We better make sure everything's setup that
* dereferences from it.
*/
+ mutex_lock(&afu->contexts_lock);
idr_preload(GFP_KERNEL);
- spin_lock(&afu->contexts_lock);
i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
ctx->afu->num_procs, GFP_NOWAIT);
- spin_unlock(&afu->contexts_lock);
idr_preload_end();
+ mutex_unlock(&afu->contexts_lock);
if (i < 0)
return i;
@@ -97,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
return 0;
}
+static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct cxl_context *ctx = vma->vm_file->private_data;
+ unsigned long address = (unsigned long)vmf->virtual_address;
+ u64 area, offset;
+
+ offset = vmf->pgoff << PAGE_SHIFT;
+
+ pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
+ __func__, ctx->pe, address, offset);
+
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+ area = ctx->afu->psn_phys;
+ if (offset > ctx->afu->adapter->ps_size)
+ return VM_FAULT_SIGBUS;
+ } else {
+ area = ctx->psn_phys;
+ if (offset > ctx->psn_size)
+ return VM_FAULT_SIGBUS;
+ }
+
+ mutex_lock(&ctx->status_mutex);
+
+ if (ctx->status != STARTED) {
+ mutex_unlock(&ctx->status_mutex);
+ pr_devel("%s: Context not started, failing problem state access\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+
+ mutex_unlock(&ctx->status_mutex);
+
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct cxl_mmap_vmops = {
+ .fault = cxl_mmap_fault,
+};
+
/*
* Map a per-context mmio space into the given vma.
*/
@@ -105,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
u64 len = vma->vm_end - vma->vm_start;
len = min(len, ctx->psn_size);
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
- }
+ if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
+ /* make sure there is a valid per process space for this AFU */
+ if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
+ pr_devel("AFU doesn't support mmio space\n");
+ return -EINVAL;
+ }
- /* make sure there is a valid per process space for this AFU */
- if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
- pr_devel("AFU doesn't support mmio space\n");
- return -EINVAL;
+ /* Can't mmap until the AFU is enabled */
+ if (!ctx->afu->enabled)
+ return -EBUSY;
}
- /* Can't mmap until the AFU is enabled */
- if (!ctx->afu->enabled)
- return -EBUSY;
-
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
ctx->psn_phys, ctx->pe , ctx->master);
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return vm_iomap_memory(vma, ctx->psn_phys, len);
+ vma->vm_ops = &cxl_mmap_vmops;
+ return 0;
}
/*
@@ -168,21 +210,33 @@ void cxl_context_detach_all(struct cxl_afu *afu)
struct cxl_context *ctx;
int tmp;
- rcu_read_lock();
- idr_for_each_entry(&afu->contexts_idr, ctx, tmp)
+ mutex_lock(&afu->contexts_lock);
+ idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
/*
* Anything done in here needs to be setup before the IDR is
* created and torn down after the IDR removed
*/
__detach_context(ctx);
- rcu_read_unlock();
+
+ /*
+ * We are force detaching - remove any active PSA mappings so
+ * userspace cannot interfere with the card if it comes back.
+ * Easiest way to exercise this is to unbind and rebind the
+ * driver via sysfs while it is in use.
+ */
+ mutex_lock(&ctx->mapping_lock);
+ if (ctx->mapping)
+ unmap_mapping_range(ctx->mapping, 0, 0, 1);
+ mutex_unlock(&ctx->mapping_lock);
+ }
+ mutex_unlock(&afu->contexts_lock);
}
void cxl_context_free(struct cxl_context *ctx)
{
- spin_lock(&ctx->afu->contexts_lock);
+ mutex_lock(&ctx->afu->contexts_lock);
idr_remove(&ctx->afu->contexts_idr, ctx->pe);
- spin_unlock(&ctx->afu->contexts_lock);
+ mutex_unlock(&ctx->afu->contexts_lock);
synchronize_rcu();
free_page((u64)ctx->sstp);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index b5b6bda44a00..28078f8894a5 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -351,7 +351,7 @@ struct cxl_afu {
struct device *chardev_s, *chardev_m, *chardev_d;
struct idr contexts_idr;
struct dentry *debugfs;
- spinlock_t contexts_lock;
+ struct mutex contexts_lock;
struct mutex spa_mutex;
spinlock_t afu_cntl_lock;
@@ -398,6 +398,10 @@ struct cxl_context {
phys_addr_t psn_phys;
u64 psn_size;
+ /* Used to unmap any mmaps when force detaching */
+ struct address_space *mapping;
+ struct mutex mapping_lock;
+
spinlock_t sste_lock; /* Protects segment table entries */
struct cxl_sste *sstp;
u64 sstp0, sstp1;
@@ -599,7 +603,8 @@ int cxl_alloc_sst(struct cxl_context *ctx);
void init_cxl_native(void);
struct cxl_context *cxl_context_alloc(void);
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
+ struct address_space *mapping);
void cxl_context_free(struct cxl_context *ctx);
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 378b099e7c0b..b15d8113877c 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -77,7 +77,7 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
goto err_put_afu;
}
- if ((rc = cxl_context_init(ctx, afu, master)))
+ if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
goto err_put_afu;
pr_devel("afu_open pe: %i\n", ctx->pe);
@@ -113,6 +113,10 @@ static int afu_release(struct inode *inode, struct file *file)
__func__, ctx->pe);
cxl_context_detach(ctx);
+ mutex_lock(&ctx->mapping_lock);
+ ctx->mapping = NULL;
+ mutex_unlock(&ctx->mapping_lock);
+
put_device(&ctx->afu->dev);
/*
@@ -136,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
pr_devel("%s: pe: %i\n", __func__, ctx->pe);
- mutex_lock(&ctx->status_mutex);
- if (ctx->status != OPENED) {
- rc = -EIO;
- goto out;
- }
-
+ /* Do this outside the status_mutex to avoid a circular dependency with
+ * the locking in cxl_mmap_fault() */
if (copy_from_user(&work, uwork,
sizeof(struct cxl_ioctl_start_work))) {
rc = -EFAULT;
goto out;
}
+ mutex_lock(&ctx->status_mutex);
+ if (ctx->status != OPENED) {
+ rc = -EIO;
+ goto out;
+ }
+
/*
* if any of the reserved fields are set or any of the unused
* flags are set it's invalid
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 9a5a442269a8..f2b37b41a0da 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -277,6 +277,7 @@ static int do_process_element_cmd(struct cxl_context *ctx,
u64 cmd, u64 pe_state)
{
u64 state;
+ unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
WARN_ON(!ctx->afu->enabled);
@@ -286,6 +287,10 @@ static int do_process_element_cmd(struct cxl_context *ctx,
smp_mb();
cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
while (1) {
+ if (time_after_eq(jiffies, timeout)) {
+ dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
+ return -EBUSY;
+ }
state = be64_to_cpup(ctx->afu->sw_command_status);
if (state == ~0ULL) {
pr_err("cxl: Error adding process element to AFU\n");
@@ -610,13 +615,6 @@ static inline int detach_process_native_dedicated(struct cxl_context *ctx)
return 0;
}
-/*
- * TODO: handle case when this is called inside a rcu_read_lock() which may
- * happen when we unbind the driver (ie. cxl_context_detach_all()) . Terminate
- * & remove use a mutex lock and schedule which will not good with lock held.
- * May need to write do_process_element_cmd() that handles outstanding page
- * faults synchronously.
- */
static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
{
if (!ctx->pe_inserted)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 10c98ab7f46e..0f2cc9f8b4db 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -502,7 +502,7 @@ static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
afu->dev.release = cxl_release_afu;
afu->slice = slice;
idr_init(&afu->contexts_idr);
- spin_lock_init(&afu->contexts_lock);
+ mutex_init(&afu->contexts_lock);
spin_lock_init(&afu->afu_cntl_lock);
mutex_init(&afu->spa_mutex);
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index ce7ec06d87d1..461bdbd5d483 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -121,7 +121,7 @@ static ssize_t reset_store_afu(struct device *device,
int rc;
/* Not safe to reset if it is currently in use */
- spin_lock(&afu->contexts_lock);
+ mutex_lock(&afu->contexts_lock);
if (!idr_is_empty(&afu->contexts_idr)) {
rc = -EBUSY;
goto err;
@@ -132,7 +132,7 @@ static ssize_t reset_store_afu(struct device *device,
rc = count;
err:
- spin_unlock(&afu->contexts_lock);
+ mutex_unlock(&afu->contexts_lock);
return rc;
}
@@ -247,7 +247,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
int rc = -EBUSY;
/* can't change this if we have a user */
- spin_lock(&afu->contexts_lock);
+ mutex_lock(&afu->contexts_lock);
if (!idr_is_empty(&afu->contexts_idr))
goto err;
@@ -271,7 +271,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
afu->current_mode = 0;
afu->num_procs = 0;
- spin_unlock(&afu->contexts_lock);
+ mutex_unlock(&afu->contexts_lock);
if ((rc = _cxl_afu_deactivate_mode(afu, old_mode)))
return rc;
@@ -280,7 +280,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
return count;
err:
- spin_unlock(&afu->contexts_lock);
+ mutex_unlock(&afu->contexts_lock);
return rc;
}