summaryrefslogtreecommitdiff
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/Kconfig12
-rw-r--r--drivers/s390/block/dasd.c24
-rw-r--r--drivers/s390/block/dcssblk.c35
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/hmcdrv_dev.c19
-rw-r--r--drivers/s390/char/sclp_cmd.c478
-rw-r--r--drivers/s390/char/sclp_mem.c399
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c4
-rw-r--r--drivers/s390/net/Kconfig3
-rw-r--r--drivers/s390/net/ism.h53
-rw-r--r--drivers/s390/net/ism_drv.c573
12 files changed, 756 insertions, 847 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 8c1c908d2c6e..877a9bc7f04b 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -5,19 +5,11 @@ comment "S/390 block device drivers"
config DCSSBLK
def_tristate m
prompt "DCSSBLK support"
- depends on S390 && BLOCK && (DAX || DAX=n)
+ depends on S390 && BLOCK && ZONE_DEVICE
+ select FS_DAX
help
Support for dcss block device
-config DCSSBLK_DAX
- def_bool y
- depends on DCSSBLK
- # requires S390 ZONE_DEVICE support
- depends on BROKEN
- prompt "DCSSBLK DAX support"
- help
- Enable DAX operation for the dcss block device
-
config DASD
def_tristate y
prompt "Support for DASD devices"
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 506a947d00a5..7765e40f7cea 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -334,6 +334,11 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
lim.max_dev_sectors = device->discipline->max_sectors(block);
lim.max_hw_sectors = lim.max_dev_sectors;
lim.logical_block_size = block->bp_block;
+ /*
+ * Adjust dma_alignment to match block_size - 1
+ * to ensure proper buffer alignment checks in the block layer.
+ */
+ lim.dma_alignment = lim.logical_block_size - 1;
if (device->discipline->has_discard) {
unsigned int max_bytes;
@@ -3114,12 +3119,14 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
PTR_ERR(cqr) == -ENOMEM ||
PTR_ERR(cqr) == -EAGAIN) {
rc = BLK_STS_RESOURCE;
- goto out;
+ } else if (PTR_ERR(cqr) == -EINVAL) {
+ rc = BLK_STS_INVAL;
+ } else {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "CCW creation failed (rc=%ld) on request %p",
+ PTR_ERR(cqr), req);
+ rc = BLK_STS_IOERR;
}
- DBF_DEV_EVENT(DBF_ERR, basedev,
- "CCW creation failed (rc=%ld) on request %p",
- PTR_ERR(cqr), req);
- rc = BLK_STS_IOERR;
goto out;
}
/*
@@ -3317,11 +3324,11 @@ static void dasd_release(struct gendisk *disk)
/*
* Return disk geometry.
*/
-static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int dasd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
struct dasd_device *base;
- base = dasd_device_from_gendisk(bdev->bd_disk);
+ base = dasd_device_from_gendisk(disk);
if (!base)
return -ENODEV;
@@ -3331,7 +3338,8 @@ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return -EINVAL;
}
base->discipline->fill_geometry(base->block, geo);
- geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
+ // geo->start is left unchanged by the above
+ geo->start >>= base->block->s2b_shift;
dasd_put_device(base);
return 0;
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 94fa5edecadd..86fef4b15015 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -79,6 +79,8 @@ struct dcssblk_dev_info {
int num_of_segments;
struct list_head seg_list;
struct dax_device *dax_dev;
+ struct dev_pagemap pgmap;
+ void *pgmap_addr;
};
struct segment_info {
@@ -415,6 +417,8 @@ removeseg:
dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
+ if (dev_info->pgmap_addr)
+ devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap);
del_gendisk(dev_info->gd);
put_disk(dev_info->gd);
@@ -537,9 +541,6 @@ static int dcssblk_setup_dax(struct dcssblk_dev_info *dev_info)
{
struct dax_device *dax_dev;
- if (!IS_ENABLED(CONFIG_DCSSBLK_DAX))
- return 0;
-
dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
if (IS_ERR(dax_dev))
return PTR_ERR(dax_dev);
@@ -562,6 +563,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
struct dcssblk_dev_info *dev_info;
struct segment_info *seg_info, *temp;
char *local_buf;
+ void *addr;
unsigned long seg_byte_size;
dev_info = NULL;
@@ -687,9 +689,26 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto put_dev;
- rc = dcssblk_setup_dax(dev_info);
- if (rc)
- goto out_dax;
+ if (!IS_ALIGNED(dev_info->start, SUBSECTION_SIZE) ||
+ !IS_ALIGNED(dev_info->end + 1, SUBSECTION_SIZE)) {
+ pr_info("DCSS %s is not aligned to %lu bytes, DAX support disabled\n",
+ local_buf, SUBSECTION_SIZE);
+ } else {
+ dev_info->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ dev_info->pgmap.range.start = dev_info->start;
+ dev_info->pgmap.range.end = dev_info->end;
+ dev_info->pgmap.nr_range = 1;
+ addr = devm_memremap_pages(&dev_info->dev, &dev_info->pgmap);
+ if (IS_ERR(addr)) {
+ rc = PTR_ERR(addr);
+ goto put_dev;
+ }
+ dev_info->pgmap_addr = addr;
+ rc = dcssblk_setup_dax(dev_info);
+ if (rc)
+ goto out_dax;
+ pr_info("DAX support enabled for DCSS %s\n", local_buf);
+ }
get_device(&dev_info->dev);
rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL);
@@ -716,6 +735,8 @@ out_dax_host:
out_dax:
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
+ if (dev_info->pgmap_addr)
+ devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap);
put_dev:
list_del(&dev_info->lh);
put_disk(dev_info->gd);
@@ -801,6 +822,8 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
+ if (dev_info->pgmap_addr)
+ devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap);
del_gendisk(dev_info->gd);
put_disk(dev_info->gd);
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 81d6744e1861..dcbd51152ee3 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -21,6 +21,7 @@ obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o sclp_early_core.o sclp_sd.o
+obj-$(CONFIG_MEMORY_HOTPLUG) += sclp_mem.o
obj-$(CONFIG_TN3270) += raw3270.o con3270.o
obj-$(CONFIG_TN3270_FS) += fs3270.o
diff --git a/drivers/s390/char/hmcdrv_dev.c b/drivers/s390/char/hmcdrv_dev.c
index e069dd685899..b26fcf6849f2 100644
--- a/drivers/s390/char/hmcdrv_dev.c
+++ b/drivers/s390/char/hmcdrv_dev.c
@@ -244,24 +244,17 @@ static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
size_t len, loff_t *pos)
{
ssize_t retlen;
+ void *pdata;
pr_debug("writing file '/dev/%pD' at pos. %lld with length %zd\n",
fp, (long long) *pos, len);
if (!fp->private_data) { /* first expect a cmd write */
- fp->private_data = kmalloc(len + 1, GFP_KERNEL);
-
- if (!fp->private_data)
- return -ENOMEM;
-
- if (!copy_from_user(fp->private_data, ubuf, len)) {
- ((char *)fp->private_data)[len] = '\0';
- return len;
- }
-
- kfree(fp->private_data);
- fp->private_data = NULL;
- return -EFAULT;
+ pdata = memdup_user_nul(ubuf, len);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ fp->private_data = pdata;
+ return len;
}
retlen = hmcdrv_dev_transfer((char *) fp->private_data,
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 16469678548f..3480198eac02 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -8,31 +8,46 @@
#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/cpufeature.h>
#include <linux/completion.h>
-#include <linux/init.h>
-#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/mmzone.h>
-#include <linux/memory.h>
-#include <linux/memory_hotplug.h>
-#include <linux/module.h>
-#include <asm/ctlreg.h>
#include <asm/chpid.h>
-#include <asm/setup.h>
-#include <asm/page.h>
+#include <asm/ctlreg.h>
#include <asm/sclp.h>
-#include <asm/numa.h>
-#include <asm/facility.h>
-#include <asm/page-states.h>
#include "sclp.h"
-#define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001
-#define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001
+/* CPU configuration related functions */
+#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
+#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
+/* Channel path configuration related functions */
+#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
+#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
+#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
+
+struct cpu_configure_sccb {
+ struct sccb_header header;
+} __packed __aligned(8);
+
+struct chp_cfg_sccb {
+ struct sccb_header header;
+ u8 ccm;
+ u8 reserved[6];
+ u8 cssid;
+} __packed;
+
+struct chp_info_sccb {
+ struct sccb_header header;
+ u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
+ u8 standby[SCLP_CHP_INFO_MASK_SIZE];
+ u8 configured[SCLP_CHP_INFO_MASK_SIZE];
+ u8 ccm;
+ u8 reserved[6];
+ u8 cssid;
+} __packed;
static void sclp_sync_callback(struct sclp_req *req, void *data)
{
@@ -64,13 +79,11 @@ int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
request->callback_data = &completion;
init_completion(&completion);
- /* Perform sclp request. */
rc = sclp_add_request(request);
if (rc)
goto out;
wait_for_completion(&completion);
- /* Check response. */
if (request->status != SCLP_REQ_DONE) {
pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
cmd, request->status);
@@ -81,22 +94,15 @@ out:
return rc;
}
-/*
- * CPU configuration related functions.
- */
-
-#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
-#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
-
int _sclp_get_core_info(struct sclp_core_info *info)
{
- int rc;
- int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
struct read_cpu_info_sccb *sccb;
+ int rc, length;
if (!SCLP_HAS_CPU_INFO)
return -EOPNOTSUPP;
+ length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length));
if (!sccb)
return -ENOMEM;
@@ -114,14 +120,10 @@ int _sclp_get_core_info(struct sclp_core_info *info)
}
sclp_fill_core_info(info, sccb);
out:
- free_pages((unsigned long) sccb, get_order(length));
+ free_pages((unsigned long)sccb, get_order(length));
return rc;
}
-struct cpu_configure_sccb {
- struct sccb_header header;
-} __attribute__((packed, aligned(8)));
-
static int do_core_configure(sclp_cmdw_t cmd)
{
struct cpu_configure_sccb *sccb;
@@ -130,8 +132,8 @@ static int do_core_configure(sclp_cmdw_t cmd)
if (!SCLP_HAS_CPU_RECONFIG)
return -EOPNOTSUPP;
/*
- * This is not going to cross a page boundary since we force
- * kmalloc to have a minimum alignment of 8 bytes on s390.
+ * Use kmalloc to have a minimum alignment of 8 bytes and ensure sccb
+ * is not going to cross a page boundary.
*/
sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
if (!sccb)
@@ -165,394 +167,6 @@ int sclp_core_deconfigure(u8 core)
return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
}
-#ifdef CONFIG_MEMORY_HOTPLUG
-
-static DEFINE_MUTEX(sclp_mem_mutex);
-static LIST_HEAD(sclp_mem_list);
-static u8 sclp_max_storage_id;
-static DECLARE_BITMAP(sclp_storage_ids, 256);
-
-struct memory_increment {
- struct list_head list;
- u16 rn;
- int standby;
-};
-
-struct assign_storage_sccb {
- struct sccb_header header;
- u16 rn;
-} __packed;
-
-int arch_get_memory_phys_device(unsigned long start_pfn)
-{
- if (!sclp.rzm)
- return 0;
- return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
-}
-
-static unsigned long long rn2addr(u16 rn)
-{
- return (unsigned long long) (rn - 1) * sclp.rzm;
-}
-
-static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
-{
- struct assign_storage_sccb *sccb;
- int rc;
-
- sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!sccb)
- return -ENOMEM;
- sccb->header.length = PAGE_SIZE;
- sccb->rn = rn;
- rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
- if (rc)
- goto out;
- switch (sccb->header.response_code) {
- case 0x0020:
- case 0x0120:
- break;
- default:
- pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
- cmd, sccb->header.response_code, rn);
- rc = -EIO;
- break;
- }
-out:
- free_page((unsigned long) sccb);
- return rc;
-}
-
-static int sclp_assign_storage(u16 rn)
-{
- unsigned long long start;
- int rc;
-
- rc = do_assign_storage(SCLP_CMDW_ASSIGN_STORAGE, rn);
- if (rc)
- return rc;
- start = rn2addr(rn);
- storage_key_init_range(start, start + sclp.rzm);
- return 0;
-}
-
-static int sclp_unassign_storage(u16 rn)
-{
- return do_assign_storage(SCLP_CMDW_UNASSIGN_STORAGE, rn);
-}
-
-struct attach_storage_sccb {
- struct sccb_header header;
- u16 :16;
- u16 assigned;
- u32 :32;
- u32 entries[];
-} __packed;
-
-static int sclp_attach_storage(u8 id)
-{
- struct attach_storage_sccb *sccb;
- int rc;
- int i;
-
- sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!sccb)
- return -ENOMEM;
- sccb->header.length = PAGE_SIZE;
- sccb->header.function_code = 0x40;
- rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
- SCLP_QUEUE_INTERVAL);
- if (rc)
- goto out;
- switch (sccb->header.response_code) {
- case 0x0020:
- set_bit(id, sclp_storage_ids);
- for (i = 0; i < sccb->assigned; i++) {
- if (sccb->entries[i])
- sclp_unassign_storage(sccb->entries[i] >> 16);
- }
- break;
- default:
- rc = -EIO;
- break;
- }
-out:
- free_page((unsigned long) sccb);
- return rc;
-}
-
-static int sclp_mem_change_state(unsigned long start, unsigned long size,
- int online)
-{
- struct memory_increment *incr;
- unsigned long long istart;
- int rc = 0;
-
- list_for_each_entry(incr, &sclp_mem_list, list) {
- istart = rn2addr(incr->rn);
- if (start + size - 1 < istart)
- break;
- if (start > istart + sclp.rzm - 1)
- continue;
- if (online)
- rc |= sclp_assign_storage(incr->rn);
- else
- sclp_unassign_storage(incr->rn);
- if (rc == 0)
- incr->standby = online ? 0 : 1;
- }
- return rc ? -EIO : 0;
-}
-
-static bool contains_standby_increment(unsigned long start, unsigned long end)
-{
- struct memory_increment *incr;
- unsigned long istart;
-
- list_for_each_entry(incr, &sclp_mem_list, list) {
- istart = rn2addr(incr->rn);
- if (end - 1 < istart)
- continue;
- if (start > istart + sclp.rzm - 1)
- continue;
- if (incr->standby)
- return true;
- }
- return false;
-}
-
-static int sclp_mem_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- unsigned long start, size;
- struct memory_notify *arg;
- unsigned char id;
- int rc = 0;
-
- arg = data;
- start = arg->start_pfn << PAGE_SHIFT;
- size = arg->nr_pages << PAGE_SHIFT;
- mutex_lock(&sclp_mem_mutex);
- for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
- sclp_attach_storage(id);
- switch (action) {
- case MEM_GOING_OFFLINE:
- /*
- * We do not allow to set memory blocks offline that contain
- * standby memory. This is done to simplify the "memory online"
- * case.
- */
- if (contains_standby_increment(start, start + size))
- rc = -EPERM;
- break;
- case MEM_PREPARE_ONLINE:
- /*
- * Access the altmap_start_pfn and altmap_nr_pages fields
- * within the struct memory_notify specifically when dealing
- * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
- *
- * When altmap is in use, take the specified memory range
- * online, which includes the altmap.
- */
- if (arg->altmap_nr_pages) {
- start = PFN_PHYS(arg->altmap_start_pfn);
- size += PFN_PHYS(arg->altmap_nr_pages);
- }
- rc = sclp_mem_change_state(start, size, 1);
- if (rc || !arg->altmap_nr_pages)
- break;
- /*
- * Set CMMA state to nodat here, since the struct page memory
- * at the beginning of the memory block will not go through the
- * buddy allocator later.
- */
- __arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
- break;
- case MEM_FINISH_OFFLINE:
- /*
- * When altmap is in use, take the specified memory range
- * offline, which includes the altmap.
- */
- if (arg->altmap_nr_pages) {
- start = PFN_PHYS(arg->altmap_start_pfn);
- size += PFN_PHYS(arg->altmap_nr_pages);
- }
- sclp_mem_change_state(start, size, 0);
- break;
- default:
- break;
- }
- mutex_unlock(&sclp_mem_mutex);
- return rc ? NOTIFY_BAD : NOTIFY_OK;
-}
-
-static struct notifier_block sclp_mem_nb = {
- .notifier_call = sclp_mem_notifier,
-};
-
-static void __init align_to_block_size(unsigned long long *start,
- unsigned long long *size,
- unsigned long long alignment)
-{
- unsigned long long start_align, size_align;
-
- start_align = roundup(*start, alignment);
- size_align = rounddown(*start + *size, alignment) - start_align;
-
- pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
- *start, size_align >> 20, *size >> 20);
- *start = start_align;
- *size = size_align;
-}
-
-static void __init add_memory_merged(u16 rn)
-{
- unsigned long long start, size, addr, block_size;
- static u16 first_rn, num;
-
- if (rn && first_rn && (first_rn + num == rn)) {
- num++;
- return;
- }
- if (!first_rn)
- goto skip_add;
- start = rn2addr(first_rn);
- size = (unsigned long long) num * sclp.rzm;
- if (start >= ident_map_size)
- goto skip_add;
- if (start + size > ident_map_size)
- size = ident_map_size - start;
- block_size = memory_block_size_bytes();
- align_to_block_size(&start, &size, block_size);
- if (!size)
- goto skip_add;
- for (addr = start; addr < start + size; addr += block_size)
- add_memory(0, addr, block_size,
- cpu_has_edat1() ?
- MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
-skip_add:
- first_rn = rn;
- num = 1;
-}
-
-static void __init sclp_add_standby_memory(void)
-{
- struct memory_increment *incr;
-
- list_for_each_entry(incr, &sclp_mem_list, list)
- if (incr->standby)
- add_memory_merged(incr->rn);
- add_memory_merged(0);
-}
-
-static void __init insert_increment(u16 rn, int standby, int assigned)
-{
- struct memory_increment *incr, *new_incr;
- struct list_head *prev;
- u16 last_rn;
-
- new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
- if (!new_incr)
- return;
- new_incr->rn = rn;
- new_incr->standby = standby;
- last_rn = 0;
- prev = &sclp_mem_list;
- list_for_each_entry(incr, &sclp_mem_list, list) {
- if (assigned && incr->rn > rn)
- break;
- if (!assigned && incr->rn - last_rn > 1)
- break;
- last_rn = incr->rn;
- prev = &incr->list;
- }
- if (!assigned)
- new_incr->rn = last_rn + 1;
- if (new_incr->rn > sclp.rnmax) {
- kfree(new_incr);
- return;
- }
- list_add(&new_incr->list, prev);
-}
-
-static int __init sclp_detect_standby_memory(void)
-{
- struct read_storage_sccb *sccb;
- int i, id, assigned, rc;
-
- if (oldmem_data.start) /* No standby memory in kdump mode */
- return 0;
- if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
- return 0;
- rc = -ENOMEM;
- sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
- if (!sccb)
- goto out;
- assigned = 0;
- for (id = 0; id <= sclp_max_storage_id; id++) {
- memset(sccb, 0, PAGE_SIZE);
- sccb->header.length = PAGE_SIZE;
- rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
- if (rc)
- goto out;
- switch (sccb->header.response_code) {
- case 0x0010:
- set_bit(id, sclp_storage_ids);
- for (i = 0; i < sccb->assigned; i++) {
- if (!sccb->entries[i])
- continue;
- assigned++;
- insert_increment(sccb->entries[i] >> 16, 0, 1);
- }
- break;
- case 0x0310:
- break;
- case 0x0410:
- for (i = 0; i < sccb->assigned; i++) {
- if (!sccb->entries[i])
- continue;
- assigned++;
- insert_increment(sccb->entries[i] >> 16, 1, 1);
- }
- break;
- default:
- rc = -EIO;
- break;
- }
- if (!rc)
- sclp_max_storage_id = sccb->max_id;
- }
- if (rc || list_empty(&sclp_mem_list))
- goto out;
- for (i = 1; i <= sclp.rnmax - assigned; i++)
- insert_increment(0, 1, 0);
- rc = register_memory_notifier(&sclp_mem_nb);
- if (rc)
- goto out;
- sclp_add_standby_memory();
-out:
- free_page((unsigned long) sccb);
- return rc;
-}
-__initcall(sclp_detect_standby_memory);
-
-#endif /* CONFIG_MEMORY_HOTPLUG */
-
-/*
- * Channel path configuration related functions.
- */
-
-#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
-#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
-#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
-
-struct chp_cfg_sccb {
- struct sccb_header header;
- u8 ccm;
- u8 reserved[6];
- u8 cssid;
-} __attribute__((packed));
-
static int do_chp_configure(sclp_cmdw_t cmd)
{
struct chp_cfg_sccb *sccb;
@@ -560,8 +174,7 @@ static int do_chp_configure(sclp_cmdw_t cmd)
if (!SCLP_HAS_CHP_RECONFIG)
return -EOPNOTSUPP;
- /* Prepare sccb. */
- sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sccb = (struct chp_cfg_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
@@ -581,7 +194,7 @@ static int do_chp_configure(sclp_cmdw_t cmd)
break;
}
out:
- free_page((unsigned long) sccb);
+ free_page((unsigned long)sccb);
return rc;
}
@@ -609,16 +222,6 @@ int sclp_chp_deconfigure(struct chp_id chpid)
return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
}
-struct chp_info_sccb {
- struct sccb_header header;
- u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
- u8 standby[SCLP_CHP_INFO_MASK_SIZE];
- u8 configured[SCLP_CHP_INFO_MASK_SIZE];
- u8 ccm;
- u8 reserved[6];
- u8 cssid;
-} __attribute__((packed));
-
/**
* sclp_chp_read_info - perform read channel-path information sclp command
* @info: resulting channel-path information data
@@ -634,8 +237,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info)
if (!SCLP_HAS_CHP_INFO)
return -EOPNOTSUPP;
- /* Prepare sccb. */
- sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sccb = (struct chp_info_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
@@ -652,6 +254,6 @@ int sclp_chp_read_info(struct sclp_chp_info *info)
memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
out:
- free_page((unsigned long) sccb);
+ free_page((unsigned long)sccb);
return rc;
}
diff --git a/drivers/s390/char/sclp_mem.c b/drivers/s390/char/sclp_mem.c
new file mode 100644
index 000000000000..27f49f5fd358
--- /dev/null
+++ b/drivers/s390/char/sclp_mem.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory hotplug support via sclp
+ *
+ * Copyright IBM Corp. 2025
+ */
+
+#define KMSG_COMPONENT "sclp_mem"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/cpufeature.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/slab.h>
+#include <asm/facility.h>
+#include <asm/page.h>
+#include <asm/page-states.h>
+#include <asm/sclp.h>
+
+#include "sclp.h"
+
+#define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001
+#define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001
+
+static DEFINE_MUTEX(sclp_mem_mutex);
+static LIST_HEAD(sclp_mem_list);
+static u8 sclp_max_storage_id;
+static DECLARE_BITMAP(sclp_storage_ids, 256);
+
+struct memory_increment {
+ struct list_head list;
+ u16 rn;
+ int standby;
+};
+
+struct assign_storage_sccb {
+ struct sccb_header header;
+ u16 rn;
+} __packed;
+
+struct attach_storage_sccb {
+ struct sccb_header header;
+ u16 :16;
+ u16 assigned;
+ u32 :32;
+ u32 entries[];
+} __packed;
+
+int arch_get_memory_phys_device(unsigned long start_pfn)
+{
+ if (!sclp.rzm)
+ return 0;
+ return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
+}
+
+static unsigned long rn2addr(u16 rn)
+{
+ return (unsigned long)(rn - 1) * sclp.rzm;
+}
+
+static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
+{
+ struct assign_storage_sccb *sccb;
+ int rc;
+
+ sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ return -ENOMEM;
+ sccb->header.length = PAGE_SIZE;
+ sccb->rn = rn;
+ rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
+ if (rc)
+ goto out;
+ switch (sccb->header.response_code) {
+ case 0x0020:
+ case 0x0120:
+ break;
+ default:
+ pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
+ cmd, sccb->header.response_code, rn);
+ rc = -EIO;
+ break;
+ }
+out:
+ free_page((unsigned long)sccb);
+ return rc;
+}
+
+static int sclp_assign_storage(u16 rn)
+{
+ unsigned long start;
+ int rc;
+
+ rc = do_assign_storage(SCLP_CMDW_ASSIGN_STORAGE, rn);
+ if (rc)
+ return rc;
+ start = rn2addr(rn);
+ storage_key_init_range(start, start + sclp.rzm);
+ return 0;
+}
+
+static int sclp_unassign_storage(u16 rn)
+{
+ return do_assign_storage(SCLP_CMDW_UNASSIGN_STORAGE, rn);
+}
+
+static int sclp_attach_storage(u8 id)
+{
+ struct attach_storage_sccb *sccb;
+ int rc, i;
+
+ sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ return -ENOMEM;
+ sccb->header.length = PAGE_SIZE;
+ sccb->header.function_code = 0x40;
+ rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
+ SCLP_QUEUE_INTERVAL);
+ if (rc)
+ goto out;
+ switch (sccb->header.response_code) {
+ case 0x0020:
+ set_bit(id, sclp_storage_ids);
+ for (i = 0; i < sccb->assigned; i++) {
+ if (sccb->entries[i])
+ sclp_unassign_storage(sccb->entries[i] >> 16);
+ }
+ break;
+ default:
+ rc = -EIO;
+ break;
+ }
+out:
+ free_page((unsigned long)sccb);
+ return rc;
+}
+
+static int sclp_mem_change_state(unsigned long start, unsigned long size,
+ int online)
+{
+ struct memory_increment *incr;
+ unsigned long istart;
+ int rc = 0;
+
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+ istart = rn2addr(incr->rn);
+ if (start + size - 1 < istart)
+ break;
+ if (start > istart + sclp.rzm - 1)
+ continue;
+ if (online)
+ rc |= sclp_assign_storage(incr->rn);
+ else
+ sclp_unassign_storage(incr->rn);
+ if (rc == 0)
+ incr->standby = online ? 0 : 1;
+ }
+ return rc ? -EIO : 0;
+}
+
+static bool contains_standby_increment(unsigned long start, unsigned long end)
+{
+ struct memory_increment *incr;
+ unsigned long istart;
+
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+ istart = rn2addr(incr->rn);
+ if (end - 1 < istart)
+ continue;
+ if (start > istart + sclp.rzm - 1)
+ continue;
+ if (incr->standby)
+ return true;
+ }
+ return false;
+}
+
+static int sclp_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ unsigned long start, size;
+ struct memory_notify *arg;
+ unsigned char id;
+ int rc = 0;
+
+ arg = data;
+ start = arg->start_pfn << PAGE_SHIFT;
+ size = arg->nr_pages << PAGE_SHIFT;
+ mutex_lock(&sclp_mem_mutex);
+ for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
+ sclp_attach_storage(id);
+ switch (action) {
+ case MEM_GOING_OFFLINE:
+ /*
+ * Do not allow to set memory blocks offline that contain
+ * standby memory. This is done to simplify the "memory online"
+ * case.
+ */
+ if (contains_standby_increment(start, start + size))
+ rc = -EPERM;
+ break;
+ case MEM_PREPARE_ONLINE:
+ /*
+ * Access the altmap_start_pfn and altmap_nr_pages fields
+ * within the struct memory_notify specifically when dealing
+ * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
+ *
+ * When altmap is in use, take the specified memory range
+ * online, which includes the altmap.
+ */
+ if (arg->altmap_nr_pages) {
+ start = PFN_PHYS(arg->altmap_start_pfn);
+ size += PFN_PHYS(arg->altmap_nr_pages);
+ }
+ rc = sclp_mem_change_state(start, size, 1);
+ if (rc || !arg->altmap_nr_pages)
+ break;
+ /*
+ * Set CMMA state to nodat here, since the struct page memory
+ * at the beginning of the memory block will not go through the
+ * buddy allocator later.
+ */
+ __arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
+ break;
+ case MEM_FINISH_OFFLINE:
+ /*
+ * When altmap is in use, take the specified memory range
+ * offline, which includes the altmap.
+ */
+ if (arg->altmap_nr_pages) {
+ start = PFN_PHYS(arg->altmap_start_pfn);
+ size += PFN_PHYS(arg->altmap_nr_pages);
+ }
+ sclp_mem_change_state(start, size, 0);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&sclp_mem_mutex);
+ return rc ? NOTIFY_BAD : NOTIFY_OK;
+}
+
+static struct notifier_block sclp_mem_nb = {
+ .notifier_call = sclp_mem_notifier,
+};
+
+static void __init align_to_block_size(unsigned long *start,
+ unsigned long *size,
+ unsigned long alignment)
+{
+ unsigned long start_align, size_align;
+
+ start_align = roundup(*start, alignment);
+ size_align = rounddown(*start + *size, alignment) - start_align;
+
+ pr_info("Standby memory at 0x%lx (%luM of %luM usable)\n",
+ *start, size_align >> 20, *size >> 20);
+ *start = start_align;
+ *size = size_align;
+}
+
+static void __init add_memory_merged(u16 rn)
+{
+ unsigned long start, size, addr, block_size;
+ static u16 first_rn, num;
+
+ if (rn && first_rn && (first_rn + num == rn)) {
+ num++;
+ return;
+ }
+ if (!first_rn)
+ goto skip_add;
+ start = rn2addr(first_rn);
+ size = (unsigned long)num * sclp.rzm;
+ if (start >= ident_map_size)
+ goto skip_add;
+ if (start + size > ident_map_size)
+ size = ident_map_size - start;
+ block_size = memory_block_size_bytes();
+ align_to_block_size(&start, &size, block_size);
+ if (!size)
+ goto skip_add;
+ for (addr = start; addr < start + size; addr += block_size) {
+ add_memory(0, addr, block_size,
+ cpu_has_edat1() ?
+ MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
+ }
+skip_add:
+ first_rn = rn;
+ num = 1;
+}
+
+static void __init sclp_add_standby_memory(void)
+{
+ struct memory_increment *incr;
+
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+ if (incr->standby)
+ add_memory_merged(incr->rn);
+ }
+ add_memory_merged(0);
+}
+
+static void __init insert_increment(u16 rn, int standby, int assigned)
+{
+ struct memory_increment *incr, *new_incr;
+ struct list_head *prev;
+ u16 last_rn;
+
+ new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
+ if (!new_incr)
+ return;
+ new_incr->rn = rn;
+ new_incr->standby = standby;
+ last_rn = 0;
+ prev = &sclp_mem_list;
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+ if (assigned && incr->rn > rn)
+ break;
+ if (!assigned && incr->rn - last_rn > 1)
+ break;
+ last_rn = incr->rn;
+ prev = &incr->list;
+ }
+ if (!assigned)
+ new_incr->rn = last_rn + 1;
+ if (new_incr->rn > sclp.rnmax) {
+ kfree(new_incr);
+ return;
+ }
+ list_add(&new_incr->list, prev);
+}
+
+static int __init sclp_detect_standby_memory(void)
+{
+ struct read_storage_sccb *sccb;
+ int i, id, assigned, rc;
+
+ /* No standby memory in kdump mode */
+ if (oldmem_data.start)
+ return 0;
+ if ((sclp.facilities & 0xe00000000000UL) != 0xe00000000000UL)
+ return 0;
+ rc = -ENOMEM;
+ sccb = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ goto out;
+ assigned = 0;
+ for (id = 0; id <= sclp_max_storage_id; id++) {
+ memset(sccb, 0, PAGE_SIZE);
+ sccb->header.length = PAGE_SIZE;
+ rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
+ if (rc)
+ goto out;
+ switch (sccb->header.response_code) {
+ case 0x0010:
+ set_bit(id, sclp_storage_ids);
+ for (i = 0; i < sccb->assigned; i++) {
+ if (!sccb->entries[i])
+ continue;
+ assigned++;
+ insert_increment(sccb->entries[i] >> 16, 0, 1);
+ }
+ break;
+ case 0x0310:
+ break;
+ case 0x0410:
+ for (i = 0; i < sccb->assigned; i++) {
+ if (!sccb->entries[i])
+ continue;
+ assigned++;
+ insert_increment(sccb->entries[i] >> 16, 1, 1);
+ }
+ break;
+ default:
+ rc = -EIO;
+ break;
+ }
+ if (!rc)
+ sclp_max_storage_id = sccb->max_id;
+ }
+ if (rc || list_empty(&sclp_mem_list))
+ goto out;
+ for (i = 1; i <= sclp.rnmax - assigned; i++)
+ insert_increment(0, 1, 0);
+ rc = register_memory_notifier(&sclp_mem_nb);
+ if (rc)
+ goto out;
+ sclp_add_standby_memory();
+out:
+ free_page((unsigned long)sccb);
+ return rc;
+}
+__initcall(sclp_detect_standby_memory);
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index a1bafaf73f87..2a2931d303cb 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1671,7 +1671,7 @@ tape_3590_init(void)
DBF_EVENT(3, "3590 init\n");
- tape_3590_wq = alloc_workqueue("tape_3590", 0, 0);
+ tape_3590_wq = alloc_workqueue("tape_3590", WQ_PERCPU, 0);
if (!tape_3590_wq)
return -ENOMEM;
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index 3bf09a89a089..e92e2fd8ce5d 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -1405,7 +1405,9 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
/* Step 3: import the encrypted key value as a new key */
rc = ep11_unwrapkey(card, domain, kek, keklen,
encbuf, encbuflen, 0, def_iv,
- keybitsize, 0, keybuf, keybufsize, keytype, xflags);
+ keybitsize, keygenflags,
+ keybuf, keybufsize,
+ keytype, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s importing key value as new key failed, rc=%d\n",
__func__, rc);
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 2b43f6f28362..0fd700c5745a 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -81,8 +81,7 @@ config CCWGROUP
config ISM
tristate "Support for ISM vPCI Adapter"
- depends on PCI
- imply SMC
+ depends on PCI && DIBS
default n
help
Select this option if you want to use the Internal Shared Memory
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 047fa6101555..08d17956cb36 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -5,11 +5,13 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pci.h>
-#include <linux/ism.h>
-#include <net/smc.h>
+#include <linux/dibs.h>
#include <asm/pci_insn.h>
#define UTIL_STR_LEN 16
+#define ISM_ERROR 0xFFFF
+
+#define ISM_NR_DMBS 1920
/*
* Do not use the first word of the DMB bits to ensure 8 byte aligned access.
@@ -32,6 +34,23 @@
#define ISM_UNREG_SBA 0x11
#define ISM_UNREG_IEQ 0x12
+enum ism_event_type {
+ ISM_EVENT_BUF = 0x00,
+ ISM_EVENT_DEV = 0x01,
+ ISM_EVENT_SWR = 0x02
+};
+
+enum ism_event_code {
+ ISM_BUF_DMB_UNREGISTERED = 0x04,
+ ISM_BUF_USING_ISM_DEV_DISABLED = 0x08,
+ ISM_BUF_OWNING_ISM_DEV_IN_ERR_STATE = 0x02,
+ ISM_BUF_USING_ISM_DEV_IN_ERR_STATE = 0x03,
+ ISM_BUF_VLAN_MISMATCH_WITH_OWNER = 0x05,
+ ISM_BUF_VLAN_MISMATCH_WITH_USER = 0x06,
+ ISM_DEV_GID_DISABLED = 0x07,
+ ISM_DEV_GID_ERR_STATE = 0x01
+};
+
struct ism_req_hdr {
u32 cmd;
u16 : 16;
@@ -65,6 +84,15 @@ union ism_reg_ieq {
} response;
} __aligned(16);
+/* ISM-vPCI devices provide 64 Bit GIDs
+ * Map them to ISM UUID GIDs like this:
+ * _________________________________________
+ * | 64 Bit ISM-vPCI GID | 00000000_00000000 |
+ * -----------------------------------------
+ * This will be interpreted as a UIID variant, that is reserved
+ * for NCS backward compatibility. So it will not collide with
+ * proper UUIDs.
+ */
union ism_read_gid {
struct {
struct ism_req_hdr hdr;
@@ -174,6 +202,14 @@ struct ism_eq_header {
u64 : 64;
};
+struct ism_event {
+ u32 type;
+ u32 code;
+ u64 tok;
+ u64 time;
+ u64 info;
+};
+
struct ism_eq {
struct ism_eq_header header;
struct ism_event entry[15];
@@ -188,6 +224,19 @@ struct ism_sba {
u16 dmbe_mask[ISM_NR_DMBS];
};
+struct ism_dev {
+ spinlock_t cmd_lock; /* serializes cmds */
+ struct dibs_dev *dibs;
+ struct pci_dev *pdev;
+ struct ism_sba *sba;
+ dma_addr_t sba_dma_addr;
+ DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
+
+ struct ism_eq *ieq;
+ dma_addr_t ieq_dma_addr;
+ int ieq_idx;
+};
+
#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
((dmb) | (idx) << 24 | (sf) << 23 | (offset))
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 6cd60b174315..f84aa2e676e9 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -31,101 +31,6 @@ MODULE_DEVICE_TABLE(pci, ism_device_table);
static debug_info_t *ism_debug_info;
-#define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
-static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
- /* a list for fast mapping */
-static u8 max_client;
-static DEFINE_MUTEX(clients_lock);
-static bool ism_v2_capable;
-struct ism_dev_list {
- struct list_head list;
- struct mutex mutex; /* protects ism device list */
-};
-
-static struct ism_dev_list ism_dev_list = {
- .list = LIST_HEAD_INIT(ism_dev_list.list),
- .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex),
-};
-
-static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ism->lock, flags);
- ism->subs[client->id] = client;
- spin_unlock_irqrestore(&ism->lock, flags);
-}
-
-int ism_register_client(struct ism_client *client)
-{
- struct ism_dev *ism;
- int i, rc = -ENOSPC;
-
- mutex_lock(&ism_dev_list.mutex);
- mutex_lock(&clients_lock);
- for (i = 0; i < MAX_CLIENTS; ++i) {
- if (!clients[i]) {
- clients[i] = client;
- client->id = i;
- if (i == max_client)
- max_client++;
- rc = 0;
- break;
- }
- }
- mutex_unlock(&clients_lock);
-
- if (i < MAX_CLIENTS) {
- /* initialize with all devices that we got so far */
- list_for_each_entry(ism, &ism_dev_list.list, list) {
- ism->priv[i] = NULL;
- client->add(ism);
- ism_setup_forwarding(client, ism);
- }
- }
- mutex_unlock(&ism_dev_list.mutex);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(ism_register_client);
-
-int ism_unregister_client(struct ism_client *client)
-{
- struct ism_dev *ism;
- unsigned long flags;
- int rc = 0;
-
- mutex_lock(&ism_dev_list.mutex);
- list_for_each_entry(ism, &ism_dev_list.list, list) {
- spin_lock_irqsave(&ism->lock, flags);
- /* Stop forwarding IRQs and events */
- ism->subs[client->id] = NULL;
- for (int i = 0; i < ISM_NR_DMBS; ++i) {
- if (ism->sba_client_arr[i] == client->id) {
- WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
- __func__, client->name);
- rc = -EBUSY;
- goto err_reg_dmb;
- }
- }
- spin_unlock_irqrestore(&ism->lock, flags);
- }
- mutex_unlock(&ism_dev_list.mutex);
-
- mutex_lock(&clients_lock);
- clients[client->id] = NULL;
- if (client->id + 1 == max_client)
- max_client--;
- mutex_unlock(&clients_lock);
- return rc;
-
-err_reg_dmb:
- spin_unlock_irqrestore(&ism->lock, flags);
- mutex_unlock(&ism_dev_list.mutex);
- return rc;
-}
-EXPORT_SYMBOL_GPL(ism_unregister_client);
-
static int ism_cmd(struct ism_dev *ism, void *cmd)
{
struct ism_req_hdr *req = cmd;
@@ -273,8 +178,9 @@ static int unregister_ieq(struct ism_dev *ism)
return 0;
}
-static int ism_read_local_gid(struct ism_dev *ism)
+static int ism_read_local_gid(struct dibs_dev *dibs)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_read_gid cmd;
int ret;
@@ -286,20 +192,43 @@ static int ism_read_local_gid(struct ism_dev *ism)
if (ret)
goto out;
- ism->local_gid = cmd.response.gid;
+ memset(&dibs->gid, 0, sizeof(dibs->gid));
+ memcpy(&dibs->gid, &cmd.response.gid, sizeof(cmd.response.gid));
out:
return ret;
}
-static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
+static int ism_query_rgid(struct dibs_dev *dibs, const uuid_t *rgid,
+ u32 vid_valid, u32 vid)
{
- clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ struct ism_dev *ism = dibs->drv_priv;
+ union ism_query_rgid cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_QUERY_RGID;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ memcpy(&cmd.request.rgid, rgid, sizeof(cmd.request.rgid));
+ cmd.request.vlan_valid = vid_valid;
+ cmd.request.vlan_id = vid;
+
+ return ism_cmd(ism, &cmd);
+}
+
+static int ism_max_dmbs(void)
+{
+ return ISM_NR_DMBS;
+}
+
+static void ism_free_dmb(struct ism_dev *ism, struct dibs_dmb *dmb)
+{
+ clear_bit(dmb->idx, ism->sba_bitmap);
dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
DMA_FROM_DEVICE);
folio_put(virt_to_folio(dmb->cpu_addr));
}
-static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
+static int ism_alloc_dmb(struct ism_dev *ism, struct dibs_dmb *dmb)
{
struct folio *folio;
unsigned long bit;
@@ -308,16 +237,16 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
return -EINVAL;
- if (!dmb->sba_idx) {
+ if (!dmb->idx) {
bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
ISM_DMB_BIT_OFFSET);
if (bit == ISM_NR_DMBS)
return -ENOSPC;
- dmb->sba_idx = bit;
+ dmb->idx = bit;
}
- if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
- test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
+ if (dmb->idx < ISM_DMB_BIT_OFFSET ||
+ test_and_set_bit(dmb->idx, ism->sba_bitmap))
return -EINVAL;
folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
@@ -342,13 +271,14 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
out_free:
kfree(dmb->cpu_addr);
out_bit:
- clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ clear_bit(dmb->idx, ism->sba_bitmap);
return rc;
}
-int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
- struct ism_client *client)
+static int ism_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb,
+ struct dibs_client *client)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_reg_dmb cmd;
unsigned long flags;
int ret;
@@ -363,10 +293,10 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
cmd.request.dmb = dmb->dma_addr;
cmd.request.dmb_len = dmb->dmb_len;
- cmd.request.sba_idx = dmb->sba_idx;
+ cmd.request.sba_idx = dmb->idx;
cmd.request.vlan_valid = dmb->vlan_valid;
cmd.request.vlan_id = dmb->vlan_id;
- cmd.request.rgid = dmb->rgid;
+ memcpy(&cmd.request.rgid, &dmb->rgid, sizeof(u64));
ret = ism_cmd(ism, &cmd);
if (ret) {
@@ -374,16 +304,16 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
goto out;
}
dmb->dmb_tok = cmd.response.dmb_tok;
- spin_lock_irqsave(&ism->lock, flags);
- ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id;
- spin_unlock_irqrestore(&ism->lock, flags);
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[dmb->idx - ISM_DMB_BIT_OFFSET] = client->id;
+ spin_unlock_irqrestore(&dibs->lock, flags);
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ism_register_dmb);
-int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
+static int ism_unregister_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_unreg_dmb cmd;
unsigned long flags;
int ret;
@@ -394,9 +324,9 @@ int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
cmd.request.dmb_tok = dmb->dmb_tok;
- spin_lock_irqsave(&ism->lock, flags);
- ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT;
- spin_unlock_irqrestore(&ism->lock, flags);
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[dmb->idx - ISM_DMB_BIT_OFFSET] = NO_DIBS_CLIENT;
+ spin_unlock_irqrestore(&dibs->lock, flags);
ret = ism_cmd(ism, &cmd);
if (ret && ret != ISM_ERROR)
@@ -406,10 +336,10 @@ int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ism_unregister_dmb);
-static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
+static int ism_add_vlan_id(struct dibs_dev *dibs, u64 vlan_id)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_set_vlan_id cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -421,8 +351,9 @@ static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
return ism_cmd(ism, &cmd);
}
-static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
+static int ism_del_vlan_id(struct dibs_dev *dibs, u64 vlan_id)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_set_vlan_id cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -434,15 +365,35 @@ static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
return ism_cmd(ism, &cmd);
}
+static int ism_signal_ieq(struct dibs_dev *dibs, const uuid_t *rgid,
+ u32 trigger_irq, u32 event_code, u64 info)
+{
+ struct ism_dev *ism = dibs->drv_priv;
+ union ism_sig_ieq cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ memcpy(&cmd.request.rgid, rgid, sizeof(cmd.request.rgid));
+ cmd.request.trigger_irq = trigger_irq;
+ cmd.request.event_code = event_code;
+ cmd.request.info = info;
+
+ return ism_cmd(ism, &cmd);
+}
+
static unsigned int max_bytes(unsigned int start, unsigned int len,
unsigned int boundary)
{
return min(boundary - (start & (boundary - 1)), len);
}
-int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
- unsigned int offset, void *data, unsigned int size)
+static int ism_move(struct dibs_dev *dibs, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data,
+ unsigned int size)
{
+ struct ism_dev *ism = dibs->drv_priv;
unsigned int bytes;
u64 dmb_req;
int ret;
@@ -463,24 +414,79 @@ int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
return 0;
}
-EXPORT_SYMBOL_GPL(ism_move);
+
+static u16 ism_get_chid(struct dibs_dev *dibs)
+{
+ struct ism_dev *ism = dibs->drv_priv;
+
+ if (!ism || !ism->pdev)
+ return 0;
+
+ return to_zpci(ism->pdev)->pchid;
+}
+
+static int ism_match_event_type(u32 s390_event_type)
+{
+ switch (s390_event_type) {
+ case ISM_EVENT_BUF:
+ return DIBS_BUF_EVENT;
+ case ISM_EVENT_DEV:
+ return DIBS_DEV_EVENT;
+ case ISM_EVENT_SWR:
+ return DIBS_SW_EVENT;
+ default:
+ return DIBS_OTHER_TYPE;
+ }
+}
+
+static int ism_match_event_subtype(u32 s390_event_subtype)
+{
+ switch (s390_event_subtype) {
+ case ISM_BUF_DMB_UNREGISTERED:
+ return DIBS_BUF_UNREGISTERED;
+ case ISM_DEV_GID_DISABLED:
+ return DIBS_DEV_DISABLED;
+ case ISM_DEV_GID_ERR_STATE:
+ return DIBS_DEV_ERR_STATE;
+ default:
+ return DIBS_OTHER_SUBTYPE;
+ }
+}
static void ism_handle_event(struct ism_dev *ism)
{
+ struct dibs_dev *dibs = ism->dibs;
+ struct dibs_event event;
struct ism_event *entry;
- struct ism_client *clt;
+ struct dibs_client *clt;
int i;
while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
- if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
+ if (++ism->ieq_idx == ARRAY_SIZE(ism->ieq->entry))
ism->ieq_idx = 0;
entry = &ism->ieq->entry[ism->ieq_idx];
debug_event(ism_debug_info, 2, entry, sizeof(*entry));
- for (i = 0; i < max_client; ++i) {
- clt = ism->subs[i];
+ __memset(&event, 0, sizeof(event));
+ event.type = ism_match_event_type(entry->type);
+ if (event.type == DIBS_SW_EVENT)
+ event.subtype = entry->code;
+ else
+ event.subtype = ism_match_event_subtype(entry->code);
+ event.time = entry->time;
+ event.data = entry->info;
+ switch (event.type) {
+ case DIBS_BUF_EVENT:
+ event.buffer_tok = entry->tok;
+ break;
+ case DIBS_DEV_EVENT:
+ case DIBS_SW_EVENT:
+ memcpy(&event.gid, &entry->tok, sizeof(u64));
+ }
+ for (i = 0; i < MAX_DIBS_CLIENTS; ++i) {
+ clt = dibs->subs[i];
if (clt)
- clt->handle_event(ism, entry);
+ clt->ops->handle_event(dibs, &event);
}
}
}
@@ -489,14 +495,17 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
{
struct ism_dev *ism = data;
unsigned long bit, end;
+ struct dibs_dev *dibs;
unsigned long *bv;
u16 dmbemask;
u8 client_id;
+ dibs = ism->dibs;
+
bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
- spin_lock(&ism->lock);
+ spin_lock(&dibs->lock);
ism->sba->s = 0;
barrier();
for (bit = 0;;) {
@@ -508,10 +517,13 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
barrier();
- client_id = ism->sba_client_arr[bit];
- if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id]))
+ client_id = dibs->dmb_clientid_arr[bit];
+ if (unlikely(client_id == NO_DIBS_CLIENT ||
+ !dibs->subs[client_id]))
continue;
- ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
+ dibs->subs[client_id]->ops->handle_irq(dibs,
+ bit + ISM_DMB_BIT_OFFSET,
+ dmbemask);
}
if (ism->sba->e) {
@@ -519,28 +531,35 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
barrier();
ism_handle_event(ism);
}
- spin_unlock(&ism->lock);
+ spin_unlock(&dibs->lock);
return IRQ_HANDLED;
}
+static const struct dibs_dev_ops ism_ops = {
+ .get_fabric_id = ism_get_chid,
+ .query_remote_gid = ism_query_rgid,
+ .max_dmbs = ism_max_dmbs,
+ .register_dmb = ism_register_dmb,
+ .unregister_dmb = ism_unregister_dmb,
+ .move_data = ism_move,
+ .add_vlan_id = ism_add_vlan_id,
+ .del_vlan_id = ism_del_vlan_id,
+ .signal_event = ism_signal_ieq,
+};
+
static int ism_dev_init(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
- int i, ret;
+ int ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (ret <= 0)
goto out;
- ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL);
- if (!ism->sba_client_arr)
- goto free_vectors;
- memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS);
-
ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
pci_name(pdev), ism);
if (ret)
- goto free_client_arr;
+ goto free_vectors;
ret = register_sba(ism);
if (ret)
@@ -550,57 +569,33 @@ static int ism_dev_init(struct ism_dev *ism)
if (ret)
goto unreg_sba;
- ret = ism_read_local_gid(ism);
- if (ret)
- goto unreg_ieq;
-
- if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID))
- /* hardware is V2 capable */
- ism_v2_capable = true;
- else
- ism_v2_capable = false;
-
- mutex_lock(&ism_dev_list.mutex);
- mutex_lock(&clients_lock);
- for (i = 0; i < max_client; ++i) {
- if (clients[i]) {
- clients[i]->add(ism);
- ism_setup_forwarding(clients[i], ism);
- }
- }
- mutex_unlock(&clients_lock);
-
- list_add(&ism->list, &ism_dev_list.list);
- mutex_unlock(&ism_dev_list.mutex);
-
query_info(ism);
return 0;
-unreg_ieq:
- unregister_ieq(ism);
unreg_sba:
unregister_sba(ism);
free_irq:
free_irq(pci_irq_vector(pdev, 0), ism);
-free_client_arr:
- kfree(ism->sba_client_arr);
free_vectors:
pci_free_irq_vectors(pdev);
out:
return ret;
}
-static void ism_dev_release(struct device *dev)
+static void ism_dev_exit(struct ism_dev *ism)
{
- struct ism_dev *ism;
-
- ism = container_of(dev, struct ism_dev, dev);
+ struct pci_dev *pdev = ism->pdev;
- kfree(ism);
+ unregister_ieq(ism);
+ unregister_sba(ism);
+ free_irq(pci_irq_vector(pdev, 0), ism);
+ pci_free_irq_vectors(pdev);
}
static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct dibs_dev *dibs;
+ struct zpci_dev *zdev;
struct ism_dev *ism;
int ret;
@@ -608,21 +603,13 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!ism)
return -ENOMEM;
- spin_lock_init(&ism->lock);
spin_lock_init(&ism->cmd_lock);
dev_set_drvdata(&pdev->dev, ism);
ism->pdev = pdev;
- ism->dev.parent = &pdev->dev;
- ism->dev.release = ism_dev_release;
- device_initialize(&ism->dev);
- dev_set_name(&ism->dev, "%s", dev_name(&pdev->dev));
- ret = device_add(&ism->dev);
- if (ret)
- goto err_dev;
ret = pci_enable_device_mem(pdev);
if (ret)
- goto err;
+ goto err_dev;
ret = pci_request_mem_regions(pdev, DRV_NAME);
if (ret)
@@ -636,66 +623,69 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dma_set_max_seg_size(&pdev->dev, SZ_1M);
pci_set_master(pdev);
+ dibs = dibs_dev_alloc();
+ if (!dibs) {
+ ret = -ENOMEM;
+ goto err_resource;
+ }
+ /* set this up before we enable interrupts */
+ ism->dibs = dibs;
+ dibs->drv_priv = ism;
+ dibs->ops = &ism_ops;
+
+ /* enable ism device, but any interrupts and events will be ignored
+ * before dibs_dev_add() adds it to any clients.
+ */
ret = ism_dev_init(ism);
if (ret)
- goto err_resource;
+ goto err_dibs;
+
+ /* after ism_dev_init() we can call ism function to set gid */
+ ret = ism_read_local_gid(dibs);
+ if (ret)
+ goto err_ism;
+
+ dibs->dev.parent = &pdev->dev;
+
+ zdev = to_zpci(pdev);
+ dev_set_name(&dibs->dev, "ism%x", zdev->uid ? zdev->uid : zdev->fid);
+
+ ret = dibs_dev_add(dibs);
+ if (ret)
+ goto err_ism;
return 0;
+err_ism:
+ ism_dev_exit(ism);
+err_dibs:
+ /* pairs with dibs_dev_alloc() */
+ put_device(&dibs->dev);
err_resource:
pci_release_mem_regions(pdev);
err_disable:
pci_disable_device(pdev);
-err:
- device_del(&ism->dev);
err_dev:
dev_set_drvdata(&pdev->dev, NULL);
- put_device(&ism->dev);
+ kfree(ism);
return ret;
}
-static void ism_dev_exit(struct ism_dev *ism)
-{
- struct pci_dev *pdev = ism->pdev;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ism->lock, flags);
- for (i = 0; i < max_client; ++i)
- ism->subs[i] = NULL;
- spin_unlock_irqrestore(&ism->lock, flags);
-
- mutex_lock(&ism_dev_list.mutex);
- mutex_lock(&clients_lock);
- for (i = 0; i < max_client; ++i) {
- if (clients[i])
- clients[i]->remove(ism);
- }
- mutex_unlock(&clients_lock);
-
- if (ism_v2_capable)
- ism_del_vlan_id(ism, ISM_RESERVED_VLANID);
- unregister_ieq(ism);
- unregister_sba(ism);
- free_irq(pci_irq_vector(pdev, 0), ism);
- kfree(ism->sba_client_arr);
- pci_free_irq_vectors(pdev);
- list_del_init(&ism->list);
- mutex_unlock(&ism_dev_list.mutex);
-}
-
static void ism_remove(struct pci_dev *pdev)
{
struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
+ struct dibs_dev *dibs = ism->dibs;
+ dibs_dev_del(dibs);
ism_dev_exit(ism);
+ /* pairs with dibs_dev_alloc() */
+ put_device(&dibs->dev);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
- device_del(&ism->dev);
dev_set_drvdata(&pdev->dev, NULL);
- put_device(&ism->dev);
+ kfree(ism);
}
static struct pci_driver ism_driver = {
@@ -713,8 +703,6 @@ static int __init ism_init(void)
if (!ism_debug_info)
return -ENODEV;
- memset(clients, 0, sizeof(clients));
- max_client = 0;
debug_register_view(ism_debug_info, &debug_hex_ascii_view);
ret = pci_register_driver(&ism_driver);
if (ret)
@@ -731,150 +719,3 @@ static void __exit ism_exit(void)
module_init(ism_init);
module_exit(ism_exit);
-
-/*************************** SMC-D Implementation *****************************/
-
-#if IS_ENABLED(CONFIG_SMC)
-static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
- u32 vid)
-{
- union ism_query_rgid cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.request.hdr.cmd = ISM_QUERY_RGID;
- cmd.request.hdr.len = sizeof(cmd.request);
-
- cmd.request.rgid = rgid;
- cmd.request.vlan_valid = vid_valid;
- cmd.request.vlan_id = vid;
-
- return ism_cmd(ism, &cmd);
-}
-
-static int smcd_query_rgid(struct smcd_dev *smcd, struct smcd_gid *rgid,
- u32 vid_valid, u32 vid)
-{
- return ism_query_rgid(smcd->priv, rgid->gid, vid_valid, vid);
-}
-
-static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
- void *client)
-{
- return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client);
-}
-
-static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
-{
- return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb);
-}
-
-static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
-{
- return ism_add_vlan_id(smcd->priv, vlan_id);
-}
-
-static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
-{
- return ism_del_vlan_id(smcd->priv, vlan_id);
-}
-
-static int smcd_set_vlan_required(struct smcd_dev *smcd)
-{
- return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
-}
-
-static int smcd_reset_vlan_required(struct smcd_dev *smcd)
-{
- return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
-}
-
-static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
- u32 event_code, u64 info)
-{
- union ism_sig_ieq cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
- cmd.request.hdr.len = sizeof(cmd.request);
-
- cmd.request.rgid = rgid;
- cmd.request.trigger_irq = trigger_irq;
- cmd.request.event_code = event_code;
- cmd.request.info = info;
-
- return ism_cmd(ism, &cmd);
-}
-
-static int smcd_signal_ieq(struct smcd_dev *smcd, struct smcd_gid *rgid,
- u32 trigger_irq, u32 event_code, u64 info)
-{
- return ism_signal_ieq(smcd->priv, rgid->gid,
- trigger_irq, event_code, info);
-}
-
-static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
- bool sf, unsigned int offset, void *data,
- unsigned int size)
-{
- return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size);
-}
-
-static int smcd_supports_v2(void)
-{
- return ism_v2_capable;
-}
-
-static u64 ism_get_local_gid(struct ism_dev *ism)
-{
- return ism->local_gid;
-}
-
-static void smcd_get_local_gid(struct smcd_dev *smcd,
- struct smcd_gid *smcd_gid)
-{
- smcd_gid->gid = ism_get_local_gid(smcd->priv);
- smcd_gid->gid_ext = 0;
-}
-
-static u16 ism_get_chid(struct ism_dev *ism)
-{
- if (!ism || !ism->pdev)
- return 0;
-
- return to_zpci(ism->pdev)->pchid;
-}
-
-static u16 smcd_get_chid(struct smcd_dev *smcd)
-{
- return ism_get_chid(smcd->priv);
-}
-
-static inline struct device *smcd_get_dev(struct smcd_dev *dev)
-{
- struct ism_dev *ism = dev->priv;
-
- return &ism->dev;
-}
-
-static const struct smcd_ops ism_ops = {
- .query_remote_gid = smcd_query_rgid,
- .register_dmb = smcd_register_dmb,
- .unregister_dmb = smcd_unregister_dmb,
- .add_vlan_id = smcd_add_vlan_id,
- .del_vlan_id = smcd_del_vlan_id,
- .set_vlan_required = smcd_set_vlan_required,
- .reset_vlan_required = smcd_reset_vlan_required,
- .signal_event = smcd_signal_ieq,
- .move_data = smcd_move,
- .supports_v2 = smcd_supports_v2,
- .get_local_gid = smcd_get_local_gid,
- .get_chid = smcd_get_chid,
- .get_dev = smcd_get_dev,
-};
-
-const struct smcd_ops *ism_get_smcd_ops(void)
-{
- return &ism_ops;
-}
-EXPORT_SYMBOL_GPL(ism_get_smcd_ops);
-#endif