diff options
Diffstat (limited to 'drivers/s390')
170 files changed, 10042 insertions, 12351 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index e3710a762aba..877a9bc7f04b 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -4,10 +4,9 @@ comment "S/390 block device drivers" config DCSSBLK def_tristate m - select FS_DAX_LIMITED - select DAX prompt "DCSSBLK support" - depends on S390 && BLOCK + depends on S390 && BLOCK && ZONE_DEVICE + select FS_DAX help Support for dcss block device diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 215597f73be4..35031357ac4d 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -8,9 +8,7 @@ * Copyright IBM Corp. 1999, 2009 */ -#define KMSG_COMPONENT "dasd" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - +#include <linux/export.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -24,15 +22,13 @@ #include <linux/seq_file.h> #include <linux/vmalloc.h> +#include <asm/machine.h> #include <asm/ccwdev.h> #include <asm/ebcdic.h> #include <asm/idals.h> #include <asm/itcw.h> #include <asm/diag.h> -/* This is ugly... */ -#define PRINTK_HEADER "dasd:" - #include "dasd_int.h" /* * SECTION: Constant definitions to be used within this file @@ -211,19 +207,6 @@ static int dasd_state_known_to_new(struct dasd_device *device) return 0; } -static struct dentry *dasd_debugfs_setup(const char *name, - struct dentry *base_dentry) -{ - struct dentry *pde; - - if (!base_dentry) - return NULL; - pde = debugfs_create_dir(name, base_dentry); - if (!pde || IS_ERR(pde)) - return NULL; - return pde; -} - /* * Request the irq line for the device. */ @@ -238,14 +221,14 @@ static int dasd_state_known_to_basic(struct dasd_device *device) if (rc) return rc; block->debugfs_dentry = - dasd_debugfs_setup(block->gdp->disk_name, + debugfs_create_dir(block->gdp->disk_name, dasd_debugfs_root_entry); dasd_profile_init(&block->profile, block->debugfs_dentry); if (dasd_global_profile_level == DASD_PROFILE_ON) dasd_profile_on(&device->block->profile); } device->debugfs_dentry = - dasd_debugfs_setup(dev_name(&device->cdev->dev), + debugfs_create_dir(dev_name(&device->cdev->dev), dasd_debugfs_root_entry); dasd_profile_init(&device->profile, device->debugfs_dentry); dasd_hosts_init(device->debugfs_dentry, device); @@ -313,39 +296,62 @@ static int dasd_state_basic_to_known(struct dasd_device *device) */ static int dasd_state_basic_to_ready(struct dasd_device *device) { - int rc; - struct dasd_block *block; - struct gendisk *disk; + struct dasd_block *block = device->block; + struct queue_limits lim; + int rc = 0; - rc = 0; - block = device->block; /* make disk known with correct capacity */ - if (block) { - if (block->base->discipline->do_analysis != NULL) - rc = block->base->discipline->do_analysis(block); - if (rc) { - if (rc != -EAGAIN) { - device->state = DASD_STATE_UNFMT; - disk = device->block->gdp; - kobject_uevent(&disk_to_dev(disk)->kobj, - KOBJ_CHANGE); - goto out; - } - return rc; - } - if (device->discipline->setup_blk_queue) - device->discipline->setup_blk_queue(block); - set_capacity(block->gdp, - block->blocks << block->s2b_shift); + if (!block) { device->state = DASD_STATE_READY; - rc = dasd_scan_partitions(block); - if (rc) { - device->state = DASD_STATE_BASIC; + goto out; + } + + if (block->base->discipline->do_analysis != NULL) + rc = block->base->discipline->do_analysis(block); + if (rc) { + if (rc == -EAGAIN) return rc; - } - } else { - device->state = DASD_STATE_READY; + device->state = DASD_STATE_UNFMT; + kobject_uevent(&disk_to_dev(device->block->gdp)->kobj, + KOBJ_CHANGE); + goto out; + } + + lim = queue_limits_start_update(block->gdp->queue); + lim.max_dev_sectors = device->discipline->max_sectors(block); + lim.max_hw_sectors = lim.max_dev_sectors; + lim.logical_block_size = block->bp_block; + /* + * Adjust dma_alignment to match block_size - 1 + * to ensure proper buffer alignment checks in the block layer. + */ + lim.dma_alignment = lim.logical_block_size - 1; + + if (device->discipline->has_discard) { + unsigned int max_bytes; + + lim.discard_granularity = block->bp_block; + + /* Calculate max_discard_sectors and make it PAGE aligned */ + max_bytes = USHRT_MAX * block->bp_block; + max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE); + + lim.max_hw_discard_sectors = max_bytes / block->bp_block; + lim.max_write_zeroes_sectors = lim.max_hw_discard_sectors; + } + rc = queue_limits_commit_update(block->gdp->queue, &lim); + if (rc) + return rc; + + set_capacity(block->gdp, block->blocks << block->s2b_shift); + device->state = DASD_STATE_READY; + + rc = dasd_scan_partitions(block); + if (rc) { + device->state = DASD_STATE_BASIC; + return rc; } + out: if (device->discipline->basic_to_ready) rc = device->discipline->basic_to_ready(device); @@ -412,7 +418,8 @@ dasd_state_ready_to_online(struct dasd_device * device) KOBJ_CHANGE); return 0; } - disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); + disk_uevent(file_bdev(device->block->bdev_file)->bd_disk, + KOBJ_CHANGE); } return 0; } @@ -432,7 +439,8 @@ static int dasd_state_online_to_ready(struct dasd_device *device) device->state = DASD_STATE_READY; if (device->block && !(device->features & DASD_FEATURE_USERAW)) - disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); + disk_uevent(file_bdev(device->block->bdev_file)->bd_disk, + KOBJ_CHANGE); return 0; } @@ -674,18 +682,20 @@ static void dasd_profile_start(struct dasd_block *block, * we count each request only once. */ device = cqr->startdev; - if (device->profile.data) { - counter = 1; /* request is not yet queued on the start device */ - list_for_each(l, &device->ccw_queue) - if (++counter >= 31) - break; - } + if (!device->profile.data) + return; + + spin_lock(get_ccwdev_lock(device->cdev)); + counter = 1; /* request is not yet queued on the start device */ + list_for_each(l, &device->ccw_queue) + if (++counter >= 31) + break; + spin_unlock(get_ccwdev_lock(device->cdev)); + spin_lock(&device->profile.lock); - if (device->profile.data) { - device->profile.data->dasd_io_nr_req[counter]++; - if (rq_data_dir(req) == READ) - device->profile.data->dasd_read_nr_req[counter]++; - } + device->profile.data->dasd_io_nr_req[counter]++; + if (rq_data_dir(req) == READ) + device->profile.data->dasd_read_nr_req[counter]++; spin_unlock(&device->profile.lock); } @@ -951,8 +961,7 @@ static void dasd_stats_array(struct seq_file *m, unsigned int *array) static void dasd_stats_seq_print(struct seq_file *m, struct dasd_profile_info *data) { - seq_printf(m, "start_time %lld.%09ld\n", - (s64)data->starttod.tv_sec, data->starttod.tv_nsec); + seq_printf(m, "start_time %ptSp\n", &data->starttod); seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); seq_printf(m, "total_pav %u\n", data->dasd_io_alias); @@ -1035,19 +1044,9 @@ static const struct file_operations dasd_stats_raw_fops = { static void dasd_profile_init(struct dasd_profile *profile, struct dentry *base_dentry) { - umode_t mode; - struct dentry *pde; - - if (!base_dentry) - return; - profile->dentry = NULL; profile->data = NULL; - mode = (S_IRUSR | S_IWUSR | S_IFREG); - pde = debugfs_create_file("statistics", mode, base_dentry, - profile, &dasd_stats_raw_fops); - if (pde && !IS_ERR(pde)) - profile->dentry = pde; - return; + profile->dentry = debugfs_create_file("statistics", 0600, base_dentry, + profile, &dasd_stats_raw_fops); } static void dasd_profile_exit(struct dasd_profile *profile) @@ -1067,25 +1066,9 @@ static void dasd_statistics_removeroot(void) static void dasd_statistics_createroot(void) { - struct dentry *pde; - - dasd_debugfs_root_entry = NULL; - pde = debugfs_create_dir("dasd", NULL); - if (!pde || IS_ERR(pde)) - goto error; - dasd_debugfs_root_entry = pde; - pde = debugfs_create_dir("global", dasd_debugfs_root_entry); - if (!pde || IS_ERR(pde)) - goto error; - dasd_debugfs_global_entry = pde; + dasd_debugfs_root_entry = debugfs_create_dir("dasd", NULL); + dasd_debugfs_global_entry = debugfs_create_dir("global", dasd_debugfs_root_entry); dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); - return; - -error: - DBF_EVENT(DBF_ERR, "%s", - "Creation of the dasd debugfs interface failed"); - dasd_statistics_removeroot(); - return; } #else @@ -1102,12 +1085,6 @@ static void dasd_statistics_removeroot(void) return; } -int dasd_stats_generic_show(struct seq_file *m, void *v) -{ - seq_puts(m, "Statistics are not activated in this kernel\n"); - return 0; -} - static void dasd_profile_init(struct dasd_profile *profile, struct dentry *base_dentry) { @@ -1152,17 +1129,8 @@ static void dasd_hosts_exit(struct dasd_device *device) static void dasd_hosts_init(struct dentry *base_dentry, struct dasd_device *device) { - struct dentry *pde; - umode_t mode; - - if (!base_dentry) - return; - - mode = S_IRUSR | S_IFREG; - pde = debugfs_create_file("host_access_list", mode, base_dentry, - device, &dasd_hosts_fops); - if (pde && !IS_ERR(pde)) - device->hosts_dentry = pde; + device->hosts_dentry = debugfs_create_file("host_access_list", 0400, base_dentry, + device, &dasd_hosts_fops); } struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, @@ -1303,7 +1271,6 @@ int dasd_term_IO(struct dasd_ccw_req *cqr) { struct dasd_device *device; int retries, rc; - char errorstring[ERRORLENGTH]; /* Check the cqr */ rc = dasd_check_cqr(cqr); @@ -1342,10 +1309,8 @@ int dasd_term_IO(struct dasd_ccw_req *cqr) rc = 0; break; default: - /* internal error 10 - unknown rc*/ - snprintf(errorstring, ERRORLENGTH, "10 %d", rc); - dev_err(&device->cdev->dev, "An error occurred in the " - "DASD device driver, reason=%s\n", errorstring); + dev_err(&device->cdev->dev, + "Unexpected error during request termination %d\n", rc); BUG(); break; } @@ -1364,7 +1329,6 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) { struct dasd_device *device; int rc; - char errorstring[ERRORLENGTH]; /* Check the cqr */ rc = dasd_check_cqr(cqr); @@ -1384,10 +1348,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) return -EPERM; } if (cqr->retries < 0) { - /* internal error 14 - start_IO run out of retries */ - sprintf(errorstring, "14 %p", cqr); - dev_err(&device->cdev->dev, "An error occurred in the DASD " - "device driver, reason=%s\n", errorstring); + dev_err(&device->cdev->dev, + "Start I/O ran out of retries\n"); cqr->status = DASD_CQR_ERROR; return -EIO; } @@ -1465,11 +1427,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) "not accessible"); break; default: - /* internal error 11 - unknown rc */ - snprintf(errorstring, ERRORLENGTH, "11 %d", rc); dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", errorstring); + "Unexpected error during request start %d", rc); BUG(); break; } @@ -1491,7 +1450,7 @@ static void dasd_device_timeout(struct timer_list *t) unsigned long flags; struct dasd_device *device; - device = from_timer(device, t, timer); + device = timer_container_of(device, t, timer); spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); /* re-activate request queue */ dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); @@ -1505,7 +1464,7 @@ static void dasd_device_timeout(struct timer_list *t) void dasd_device_set_timer(struct dasd_device *device, int expires) { if (expires == 0) - del_timer(&device->timer); + timer_delete(&device->timer); else mod_timer(&device->timer, jiffies + expires); } @@ -1516,7 +1475,7 @@ EXPORT_SYMBOL(dasd_device_set_timer); */ void dasd_device_clear_timer(struct dasd_device *device) { - del_timer(&device->timer); + timer_delete(&device->timer); } EXPORT_SYMBOL(dasd_device_clear_timer); @@ -1600,9 +1559,15 @@ static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) if (!sense) return 0; - return !!(sense[1] & SNS1_NO_REC_FOUND) || - !!(sense[1] & SNS1_FILE_PROTECTED) || - scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; + if (sense[1] & SNS1_NO_REC_FOUND) + return 1; + + if ((sense[1] & SNS1_INV_TRACK_FORMAT) && + scsw_is_tm(&irb->scsw) && + !(sense[2] & SNS2_ENV_DATA_PRESENT)) + return 1; + + return 0; } static int dasd_ese_oos_cond(u8 *sense) @@ -1623,7 +1588,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, struct dasd_device *device; unsigned long now; int nrf_suppressed = 0; - int fp_suppressed = 0; + int it_suppressed = 0; struct request *req; u8 *sense = NULL; int expires; @@ -1678,8 +1643,9 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, */ sense = dasd_get_sense(irb); if (sense) { - fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && - test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); + it_suppressed = (sense[1] & SNS1_INV_TRACK_FORMAT) && + !(sense[2] & SNS2_ENV_DATA_PRESENT) && + test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags); nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); @@ -1694,7 +1660,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, return; } } - if (!(fp_suppressed || nrf_suppressed)) + if (!(it_suppressed || nrf_suppressed)) device->discipline->dump_sense_dbf(device, irb, "int"); if (device->features & DASD_FEATURE_ERPLOG) @@ -1906,8 +1872,6 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device, static void __dasd_process_cqr(struct dasd_device *device, struct dasd_ccw_req *cqr) { - char errorstring[ERRORLENGTH]; - switch (cqr->status) { case DASD_CQR_SUCCESS: cqr->status = DASD_CQR_DONE; @@ -1919,11 +1883,8 @@ static void __dasd_process_cqr(struct dasd_device *device, cqr->status = DASD_CQR_TERMINATED; break; default: - /* internal error 12 - wrong cqr status*/ - snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", errorstring); + "Unexpected CQR status %02x", cqr->status); BUG(); } if (cqr->callback) @@ -1988,16 +1949,14 @@ static void __dasd_device_check_expire(struct dasd_device *device) if (device->discipline->term_IO(cqr) != 0) { /* Hmpf, try again in 5 sec */ dev_err(&device->cdev->dev, - "cqr %p timed out (%lus) but cannot be " - "ended, retrying in 5 s\n", - cqr, (cqr->expires/HZ)); + "CQR timed out (%lus) but cannot be ended, retrying in 5s\n", + (cqr->expires / HZ)); cqr->expires += 5*HZ; dasd_device_set_timer(device, 5*HZ); } else { dev_err(&device->cdev->dev, - "cqr %p timed out (%lus), %i retries " - "remaining\n", cqr, (cqr->expires/HZ), - cqr->retries); + "CQR timed out (%lus), %i retries remaining\n", + (cqr->expires / HZ), cqr->retries); } __dasd_device_check_autoquiesce_timeout(device, cqr); } @@ -2116,10 +2075,9 @@ int dasd_flush_device_queue(struct dasd_device *device) case DASD_CQR_IN_IO: rc = device->discipline->term_IO(cqr); if (rc) { - /* unable to terminate requeust */ + /* unable to terminate request */ dev_err(&device->cdev->dev, - "Flushing the DASD request queue " - "failed for request %p\n", cqr); + "Flushing the DASD request queue failed\n"); /* stop flush processing */ goto finished; } @@ -2466,14 +2424,17 @@ retry: rc = 0; list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { /* - * In some cases the 'File Protected' or 'Incorrect Length' - * error might be expected and error recovery would be - * unnecessary in these cases. Check if the according suppress - * bit is set. + * In some cases certain errors might be expected and + * error recovery would be unnecessary in these cases. + * Check if the according suppress bit is set. */ sense = dasd_get_sense(&cqr->irb); - if (sense && sense[1] & SNS1_FILE_PROTECTED && - test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) + if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) && + !(sense[2] & SNS2_ENV_DATA_PRESENT) && + test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags)) + continue; + if (sense && (sense[1] & SNS1_NO_REC_FOUND) && + test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags)) continue; if (scsw_cstat(&cqr->irb.scsw) == 0x40 && test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) @@ -2635,8 +2596,7 @@ static int __dasd_cancel_req(struct dasd_ccw_req *cqr) rc = device->discipline->term_IO(cqr); if (rc) { dev_err(&device->cdev->dev, - "Cancelling request %p failed with rc=%d\n", - cqr, rc); + "Cancelling request failed with rc=%d\n", rc); } else { cqr->stopclk = get_tod_clock(); } @@ -2674,7 +2634,7 @@ static void dasd_block_timeout(struct timer_list *t) unsigned long flags; struct dasd_block *block; - block = from_timer(block, t, timer); + block = timer_container_of(block, t, timer); spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); /* re-activate request queue */ dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); @@ -2689,7 +2649,7 @@ static void dasd_block_timeout(struct timer_list *t) void dasd_block_set_timer(struct dasd_block *block, int expires) { if (expires == 0) - del_timer(&block->timer); + timer_delete(&block->timer); else mod_timer(&block->timer, jiffies + expires); } @@ -2700,7 +2660,7 @@ EXPORT_SYMBOL(dasd_block_set_timer); */ void dasd_block_clear_timer(struct dasd_block *block) { - del_timer(&block->timer); + timer_delete(&block->timer); } EXPORT_SYMBOL(dasd_block_clear_timer); @@ -3110,12 +3070,14 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, PTR_ERR(cqr) == -ENOMEM || PTR_ERR(cqr) == -EAGAIN) { rc = BLK_STS_RESOURCE; - goto out; + } else if (PTR_ERR(cqr) == -EINVAL) { + rc = BLK_STS_INVAL; + } else { + DBF_DEV_EVENT(DBF_ERR, basedev, + "CCW creation failed (rc=%ld) on request %p", + PTR_ERR(cqr), req); + rc = BLK_STS_IOERR; } - DBF_DEV_EVENT(DBF_ERR, basedev, - "CCW creation failed (rc=%ld) on request %p", - PTR_ERR(cqr), req); - rc = BLK_STS_IOERR; goto out; } /* @@ -3313,11 +3275,11 @@ static void dasd_release(struct gendisk *disk) /* * Return disk geometry. */ -static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) +static int dasd_getgeo(struct gendisk *disk, struct hd_geometry *geo) { struct dasd_device *base; - base = dasd_device_from_gendisk(bdev->bd_disk); + base = dasd_device_from_gendisk(disk); if (!base) return -ENODEV; @@ -3327,7 +3289,8 @@ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) return -EINVAL; } base->discipline->fill_geometry(base->block, geo); - geo->start = get_start_sect(bdev) >> base->block->s2b_shift; + // geo->start is left unchanged by the above + geo->start >>= base->block->s2b_shift; dasd_put_device(base); return 0; } @@ -3338,7 +3301,6 @@ dasd_device_operations = { .open = dasd_open, .release = dasd_release, .ioctl = dasd_ioctl, - .compat_ioctl = dasd_ioctl, .getgeo = dasd_getgeo, .set_read_only = dasd_set_read_only, }; @@ -3380,7 +3342,7 @@ int dasd_device_is_ro(struct dasd_device *device) struct diag210 diag_data; int rc; - if (!MACHINE_IS_VM) + if (!machine_is_vm()) return 0; ccw_device_get_id(device->cdev, &dev_id); memset(&diag_data, 0, sizeof(diag_data)); @@ -3404,8 +3366,7 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie) ret = ccw_device_set_online(cdev); if (ret) - pr_warn("%s: Setting the DASD online failed with rc=%d\n", - dev_name(&cdev->dev), ret); + dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret); } /* @@ -3492,8 +3453,11 @@ int dasd_generic_set_online(struct ccw_device *cdev, { struct dasd_discipline *discipline; struct dasd_device *device; + struct device *dev; int rc; + dev = &cdev->dev; + /* first online clears initial online feature flag */ dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); device = dasd_create_device(cdev); @@ -3506,11 +3470,10 @@ int dasd_generic_set_online(struct ccw_device *cdev, /* Try to load the required module. */ rc = request_module(DASD_DIAG_MOD); if (rc) { - pr_warn("%s Setting the DASD online failed " - "because the required module %s " - "could not be loaded (rc=%d)\n", - dev_name(&cdev->dev), DASD_DIAG_MOD, - rc); + dev_warn(dev, "Setting the DASD online failed " + "because the required module %s " + "could not be loaded (rc=%d)\n", + DASD_DIAG_MOD, rc); dasd_delete_device(device); return -ENODEV; } @@ -3518,8 +3481,7 @@ int dasd_generic_set_online(struct ccw_device *cdev, /* Module init could have failed, so check again here after * request_module(). */ if (!dasd_diag_discipline_pointer) { - pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", - dev_name(&cdev->dev)); + dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n"); dasd_delete_device(device); return -ENODEV; } @@ -3529,37 +3491,33 @@ int dasd_generic_set_online(struct ccw_device *cdev, dasd_delete_device(device); return -EINVAL; } + device->base_discipline = base_discipline; if (!try_module_get(discipline->owner)) { - module_put(base_discipline->owner); dasd_delete_device(device); return -EINVAL; } - device->base_discipline = base_discipline; device->discipline = discipline; /* check_device will allocate block device if necessary */ rc = discipline->check_device(device); if (rc) { - pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", - dev_name(&cdev->dev), discipline->name, rc); - module_put(discipline->owner); - module_put(base_discipline->owner); + dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n", + discipline->name, rc); dasd_delete_device(device); return rc; } dasd_set_target_state(device, DASD_STATE_ONLINE); if (device->state <= DASD_STATE_KNOWN) { - pr_warn("%s Setting the DASD online failed because of a missing discipline\n", - dev_name(&cdev->dev)); + dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n"); rc = -ENODEV; dasd_set_target_state(device, DASD_STATE_NEW); if (device->block) dasd_free_block(device->block); dasd_delete_device(device); - } else - pr_debug("dasd_generic device %s found\n", - dev_name(&cdev->dev)); + } else { + dev_dbg(dev, "dasd_generic device found\n"); + } wait_event(dasd_init_waitq, _wait_for_device(device)); @@ -3570,10 +3528,13 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online); int dasd_generic_set_offline(struct ccw_device *cdev) { + int max_count, open_count, rc; struct dasd_device *device; struct dasd_block *block; - int max_count, open_count, rc; unsigned long flags; + struct device *dev; + + dev = &cdev->dev; rc = 0; spin_lock_irqsave(get_ccwdev_lock(cdev), flags); @@ -3590,15 +3551,14 @@ int dasd_generic_set_offline(struct ccw_device *cdev) * in the other openers. */ if (device->block) { - max_count = device->block->bdev ? 0 : -1; + max_count = device->block->bdev_file ? 0 : -1; open_count = atomic_read(&device->block->open_count); if (open_count > max_count) { if (open_count > 0) - pr_warn("%s: The DASD cannot be set offline with open count %i\n", - dev_name(&cdev->dev), open_count); + dev_warn(dev, "The DASD cannot be set offline with open count %i\n", + open_count); else - pr_warn("%s: The DASD cannot be set offline while it is in use\n", - dev_name(&cdev->dev)); + dev_warn(dev, "The DASD cannot be set offline while it is in use\n"); rc = -EBUSY; goto out_err; } @@ -3636,8 +3596,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev) * so sync bdev first and then wait for our queues to become * empty */ - if (device->block) - bdev_mark_dead(device->block->bdev, false); + if (device->block && device->block->bdev_file) + bdev_mark_dead(file_bdev(device->block->bdev_file), false); dasd_schedule_device_bh(device); rc = wait_event_interruptible(shutdown_waitq, _wait_for_empty_queues(device)); @@ -3958,8 +3918,8 @@ static int dasd_handle_autoquiesce(struct dasd_device *device, if (dasd_eer_enabled(device)) dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE); - pr_info("%s: The DASD has been put in the quiesce state\n", - dev_name(&device->cdev->dev)); + dev_info(&device->cdev->dev, + "The DASD has been put in the quiesce state\n"); dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE); if (device->features & DASD_FEATURE_REQUEUEQUIESCE) @@ -3979,16 +3939,14 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, NULL); if (IS_ERR(cqr)) { - /* internal error 13 - Allocating the RDC request failed*/ - dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", "13"); + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", + "Could not allocate RDC request"); return cqr; } ccw = cqr->cpaddr; ccw->cmd_code = CCW_CMD_RDC; - ccw->cda = (__u32)virt_to_phys(cqr->data); + ccw->cda = virt_to_dma32(cqr->data); ccw->flags = 0; ccw->count = rdc_buffer_size; cqr->startdev = device; @@ -4032,7 +3990,7 @@ char *dasd_get_sense(struct irb *irb) if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { if (irb->scsw.tm.tcw) - tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw)); + tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw)); if (tsb && tsb->length == 64 && tsb->flags) switch (tsb->flags & 0x07) { case 1: /* tsa_iostat */ diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 89957bb7244d..d0aa267462c5 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -7,13 +7,9 @@ * */ -#define KMSG_COMPONENT "dasd-eckd" - #include <linux/timer.h> #include <asm/idals.h> -#define PRINTK_HEADER "dasd_erp(3990): " - #include "dasd_int.h" #include "dasd_eckd.h" @@ -220,7 +216,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier) memset(ccw, 0, sizeof(struct ccw1)); ccw->cmd_code = CCW_CMD_DCTL; ccw->count = 4; - ccw->cda = (__u32)virt_to_phys(DCTL_data); + ccw->cda = virt_to_dma32(DCTL_data); dctl_cqr->flags = erp->flags; dctl_cqr->function = dasd_3990_erp_DCTL; dctl_cqr->refers = erp; @@ -398,7 +394,6 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) struct dasd_device *device = erp->startdev; char msg_format = (sense[7] & 0xF0); char msg_no = (sense[7] & 0x0F); - char errorstring[ERRORLENGTH]; switch (msg_format) { case 0x00: /* Format 0 - Program or System Checks */ @@ -1004,12 +999,9 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) } break; - default: /* unknown message format - should not happen - internal error 03 - unknown message format */ - snprintf(errorstring, ERRORLENGTH, "03 %x02", msg_format); + default: dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", errorstring); + "Unknown message format %02x", msg_format); break; } /* end switch message format */ @@ -1056,11 +1048,9 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense) set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags); erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); } else { - /* fatal error - set status to FAILED - internal error 09 - Command Reject */ if (!test_bit(DASD_CQR_SUPPRESS_CR, &erp->flags)) dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, reason=09\n"); + "An I/O command request was rejected\n"); erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); } @@ -1128,13 +1118,7 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense) erp->function = dasd_3990_erp_equip_check; if (sense[1] & SNS1_WRITE_INHIBITED) { - dev_info(&device->cdev->dev, - "Write inhibited path encountered\n"); - - /* vary path offline - internal error 04 - Path should be varied off-line.*/ - dev_err(&device->cdev->dev, "An error occurred in the DASD " - "device driver, reason=%s\n", "04"); + dev_err(&device->cdev->dev, "Write inhibited path encountered\n"); erp = dasd_3990_erp_action_1(erp); @@ -1285,11 +1269,7 @@ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense) erp = dasd_3990_erp_action_4(erp, sense); } else { - /* internal error 06 - The track format is not valid*/ - dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", "06"); - + dev_err(&device->cdev->dev, "Track format is not valid\n"); erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); } @@ -1406,14 +1386,8 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp) struct dasd_device *device = erp->startdev; - /* - * In some cases the 'File Protected' error might be expected and - * log messages shouldn't be written then. - * Check if the according suppress bit is set. - */ - if (!test_bit(DASD_CQR_SUPPRESS_FP, &erp->flags)) - dev_err(&device->cdev->dev, - "Accessing the DASD failed because of a hardware error\n"); + dev_err(&device->cdev->dev, + "Accessing the DASD failed because of a hardware error\n"); return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); @@ -1609,7 +1583,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) { struct dasd_device *device = default_erp->startdev; - __u32 cpa = 0; + dma32_t cpa = 0; struct dasd_ccw_req *cqr; struct dasd_ccw_req *erp; struct DE_eckd_data *DE_data; @@ -1663,9 +1637,8 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) sizeof(struct LO_eckd_data), device); if (IS_ERR(erp)) { - /* internal error 01 - Unable to allocate ERP */ - dev_err(&device->cdev->dev, "An error occurred in the DASD " - "device driver, reason=%s\n", "01"); + DBF_DEV_EVENT(DBF_ERR, device, "%s", + "Unable to allocate ERP request (1B 32)"); return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED); } @@ -1714,7 +1687,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; ccw->flags = CCW_FLAG_CC; ccw->count = 16; - ccw->cda = (__u32)virt_to_phys(DE_data); + ccw->cda = virt_to_dma32(DE_data); /* create LO ccw */ ccw++; @@ -1722,7 +1695,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; ccw->flags = CCW_FLAG_CC; ccw->count = 16; - ccw->cda = (__u32)virt_to_phys(LO_data); + ccw->cda = virt_to_dma32(LO_data); /* TIC to the failed ccw */ ccw++; @@ -1768,7 +1741,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) { struct dasd_device *device = previous_erp->startdev; - __u32 cpa = 0; + dma32_t cpa = 0; struct dasd_ccw_req *cqr; struct dasd_ccw_req *erp; char *LO_data; /* struct LO_eckd_data */ @@ -1807,10 +1780,8 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) cpa = previous_erp->irb.scsw.cmd.cpa; if (cpa == 0) { - /* internal error 02 - - Unable to determine address of the CCW to be restarted */ - dev_err(&device->cdev->dev, "An error occurred in the DASD " - "device driver, reason=%s\n", "02"); + dev_err(&device->cdev->dev, + "Unable to determine address of to be restarted CCW\n"); previous_erp->status = DASD_CQR_FAILED; @@ -2009,15 +1980,9 @@ dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense) { if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) { - - /* set to suspended duplex state then restart - internal error 05 - Set device to suspended duplex state - should be done */ struct dasd_device *device = erp->startdev; dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", "05"); - + "Compound configuration error occurred\n"); } erp->function = dasd_3990_erp_compound_config; @@ -2153,10 +2118,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) erp = dasd_3990_erp_int_req(erp); break; - case 0x0F: /* length mismatch during update write command - internal error 08 - update write command error*/ - dev_err(&device->cdev->dev, "An error occurred in the " - "DASD device driver, reason=%s\n", "08"); + case 0x0F: + dev_err(&device->cdev->dev, + "Update write command error occurred\n"); erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); break; @@ -2165,12 +2129,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) erp = dasd_3990_erp_action_10_32(erp, sense); break; - case 0x15: /* next track outside defined extend - internal error 07 - The next track is not - within the defined storage extent */ + case 0x15: dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", "07"); + "Track outside defined extent error occurred\n"); erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); break; @@ -2419,7 +2380,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) tcw = erp->cpaddr; tsb = (struct tsb *) &tcw[1]; *tcw = *((struct tcw *)cqr->cpaddr); - tcw->tsb = virt_to_phys(tsb); + tcw->tsb = virt_to_dma64(tsb); } else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) { /* PSF cannot be chained from NOOP/TIC */ erp->cpaddr = cqr->cpaddr; @@ -2430,7 +2391,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) ccw->flags = CCW_FLAG_CC; ccw++; ccw->cmd_code = CCW_CMD_TIC; - ccw->cda = (__u32)virt_to_phys(cqr->cpaddr); + ccw->cda = virt_to_dma32(cqr->cpaddr); } erp->flags = cqr->flags; @@ -2663,7 +2624,7 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp) * necessary */ dev_err(&device->cdev->dev, - "ERP %p has run out of retries and failed\n", erp); + "ERP %px has run out of retries and failed\n", erp); erp->status = DASD_CQR_FAILED; } @@ -2704,8 +2665,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head, while (erp_done != erp) { if (erp_done == NULL) /* end of chain reached */ - panic(PRINTK_HEADER "Programming error in ERP! The " - "original request was lost\n"); + panic("Programming error in ERP! The original request was lost\n"); /* remove the request from the device queue */ list_del(&erp_done->blocklist); @@ -2786,11 +2746,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) "ERP chain at BEGINNING of ERP-ACTION\n"); for (temp_erp = cqr; temp_erp != NULL; temp_erp = temp_erp->refers) { - dev_err(&device->cdev->dev, - "ERP %p (%02x) refers to %p\n", - temp_erp, temp_erp->status, - temp_erp->refers); + "ERP %px (%02x) refers to %px\n", + temp_erp, temp_erp->status, temp_erp->refers); } } @@ -2837,11 +2795,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) "ERP chain at END of ERP-ACTION\n"); for (temp_erp = erp; temp_erp != NULL; temp_erp = temp_erp->refers) { - dev_err(&device->cdev->dev, - "ERP %p (%02x) refers to %p\n", - temp_erp, temp_erp->status, - temp_erp->refers); + "ERP %px (%02x) refers to %px\n", + temp_erp, temp_erp->status, temp_erp->refers); } } diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index c9740ae88d1a..f7e768d8ca76 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -6,20 +6,12 @@ * Author(s): Stefan Weinhuber <wein@de.ibm.com> */ -#define KMSG_COMPONENT "dasd-eckd" - #include <linux/list.h> #include <linux/slab.h> #include <asm/ebcdic.h> #include "dasd_int.h" #include "dasd_eckd.h" -#ifdef PRINTK_HEADER -#undef PRINTK_HEADER -#endif /* PRINTK_HEADER */ -#define PRINTK_HEADER "dasd(eckd):" - - /* * General concept of alias management: * - PAV and DASD alias management is specific to the eckd discipline. @@ -443,7 +435,7 @@ static int read_unit_address_configuration(struct dasd_device *device, ccw->cmd_code = DASD_ECKD_CCW_PSF; ccw->count = sizeof(struct dasd_psf_prssd_data); ccw->flags |= CCW_FLAG_CC; - ccw->cda = (__u32)virt_to_phys(prssdp); + ccw->cda = virt_to_dma32(prssdp); /* Read Subsystem Data - feature codes */ memset(lcu->uac, 0, sizeof(*(lcu->uac))); @@ -451,7 +443,7 @@ static int read_unit_address_configuration(struct dasd_device *device, ccw++; ccw->cmd_code = DASD_ECKD_CCW_RSSD; ccw->count = sizeof(*(lcu->uac)); - ccw->cda = (__u32)virt_to_phys(lcu->uac); + ccw->cda = virt_to_dma32(lcu->uac); cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -747,7 +739,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu, ccw->cmd_code = DASD_ECKD_CCW_RSCK; ccw->flags = CCW_FLAG_SLI; ccw->count = 16; - ccw->cda = (__u32)virt_to_phys(cqr->data); + ccw->cda = virt_to_dma32(cqr->data); ((char *)cqr->data)[0] = reason; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 620fab01b710..73972900fc55 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -13,19 +13,17 @@ * */ -#define KMSG_COMPONENT "dasd" - +#include <linux/export.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> +#include <asm/machine.h> #include <asm/debug.h> #include <linux/uaccess.h> #include <asm/ipl.h> -/* This is ugly... */ -#define PRINTK_HEADER "dasd_devmap:" #define DASD_MAX_PARAMS 256 #include "dasd_int.h" @@ -238,7 +236,7 @@ static int __init dasd_parse_keyword(char *keyword) return 0; } if (strncmp("nopav", keyword, length) == 0) { - if (MACHINE_IS_VM) + if (machine_is_vm()) pr_info("'nopav' is not supported on z/VM\n"); else { dasd_nopav = 1; @@ -357,7 +355,8 @@ static int __init dasd_parse_range(const char *range) /* each device in dasd= parameter should be set initially online */ features |= DASD_FEATURE_INITIAL_ONLINE; while (from <= to) { - sprintf(bus_id, "%01x.%01x.%04x", from_id0, from_id1, from++); + scnprintf(bus_id, sizeof(bus_id), + "%01x.%01x.%04x", from_id0, from_id1, from++); devmap = dasd_add_busid(bus_id, features); if (IS_ERR(devmap)) { rc = PTR_ERR(devmap); @@ -859,7 +858,7 @@ dasd_delete_device(struct dasd_device *device) dev_set_drvdata(&device->cdev->dev, NULL); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); - /* Removve copy relation */ + /* Remove copy relation */ dasd_devmap_delete_copy_relation_device(device); /* * Drop ref_count by 3, one for the devmap reference, one for @@ -1114,7 +1113,7 @@ dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf) use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0; else use_diag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USEDIAG) != 0; - return sprintf(buf, use_diag ? "1\n" : "0\n"); + return sysfs_emit(buf, use_diag ? "1\n" : "0\n"); } static ssize_t @@ -1163,7 +1162,7 @@ dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf) use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0; else use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0; - return sprintf(buf, use_raw ? "1\n" : "0\n"); + return sysfs_emit(buf, use_raw ? "1\n" : "0\n"); } static ssize_t @@ -1259,7 +1258,7 @@ dasd_access_show(struct device *dev, struct device_attribute *attr, if (count < 0) return count; - return sprintf(buf, "%d\n", count); + return sysfs_emit(buf, "%d\n", count); } static DEVICE_ATTR(host_access_count, 0444, dasd_access_show, NULL); @@ -1338,19 +1337,19 @@ static ssize_t dasd_alias_show(struct device *dev, device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) - return sprintf(buf, "0\n"); + return sysfs_emit(buf, "0\n"); if (device->discipline && device->discipline->get_uid && !device->discipline->get_uid(device, &uid)) { if (uid.type == UA_BASE_PAV_ALIAS || uid.type == UA_HYPER_PAV_ALIAS) { dasd_put_device(device); - return sprintf(buf, "1\n"); + return sysfs_emit(buf, "1\n"); } } dasd_put_device(device); - return sprintf(buf, "0\n"); + return sysfs_emit(buf, "0\n"); } static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL); @@ -1378,16 +1377,12 @@ static ssize_t dasd_vendor_show(struct device *dev, static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL); -#define UID_STRLEN ( /* vendor */ 3 + 1 + /* serial */ 14 + 1 +\ - /* SSID */ 4 + 1 + /* unit addr */ 2 + 1 +\ - /* vduit */ 32 + 1) - static ssize_t dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) { + char uid_string[DASD_UID_STRLEN]; struct dasd_device *device; struct dasd_uid uid; - char uid_string[UID_STRLEN]; char ua_string[3]; device = dasd_device_from_cdev(to_ccwdev(dev)); @@ -1416,15 +1411,9 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) break; } - if (strlen(uid.vduit) > 0) - snprintf(uid_string, sizeof(uid_string), - "%s.%s.%04x.%s.%s", - uid.vendor, uid.serial, uid.ssid, ua_string, - uid.vduit); - else - snprintf(uid_string, sizeof(uid_string), - "%s.%s.%04x.%s", - uid.vendor, uid.serial, uid.ssid, ua_string); + snprintf(uid_string, sizeof(uid_string), "%s.%s.%04x.%s%s%s", + uid.vendor, uid.serial, uid.ssid, ua_string, + uid.vduit[0] ? "." : "", uid.vduit); } dasd_put_device(device); @@ -1866,7 +1855,7 @@ static ssize_t dasd_pm_show(struct device *dev, device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) - return sprintf(buf, "0\n"); + return sysfs_emit(buf, "0\n"); opm = dasd_path_get_opm(device); nppm = dasd_path_get_nppm(device); @@ -1876,8 +1865,8 @@ static ssize_t dasd_pm_show(struct device *dev, ifccpm = dasd_path_get_ifccpm(device); dasd_put_device(device); - return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm, - cablepm, cuirpm, hpfpm, ifccpm); + return sysfs_emit(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm, + cablepm, cuirpm, hpfpm, ifccpm); } static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL); @@ -2262,13 +2251,19 @@ static ssize_t dasd_copy_pair_store(struct device *dev, /* allocate primary devmap if needed */ prim_devmap = dasd_find_busid(prim_busid); - if (IS_ERR(prim_devmap)) + if (IS_ERR(prim_devmap)) { prim_devmap = dasd_add_busid(prim_busid, DASD_FEATURE_DEFAULT); + if (IS_ERR(prim_devmap)) + return PTR_ERR(prim_devmap); + } /* allocate secondary devmap if needed */ sec_devmap = dasd_find_busid(sec_busid); - if (IS_ERR(sec_devmap)) + if (IS_ERR(sec_devmap)) { sec_devmap = dasd_add_busid(sec_busid, DASD_FEATURE_DEFAULT); + if (IS_ERR(sec_devmap)) + return PTR_ERR(sec_devmap); + } /* setting copy relation is only allowed for offline secondary */ if (sec_devmap->device) diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 2e4e555b37c3..56f1af8a7ddd 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -8,8 +8,6 @@ * */ -#define KMSG_COMPONENT "dasd" - #include <linux/kernel_stat.h> #include <linux/stddef.h> #include <linux/kernel.h> @@ -20,6 +18,7 @@ #include <linux/init.h> #include <linux/jiffies.h> #include <asm/asm-extable.h> +#include <asm/machine.h> #include <asm/dasd.h> #include <asm/debug.h> #include <asm/diag.h> @@ -27,12 +26,12 @@ #include <linux/io.h> #include <asm/irq.h> #include <asm/vtoc.h> +#include <asm/asm.h> #include "dasd_int.h" #include "dasd_diag.h" -#define PRINTK_HEADER "dasd(diag):" - +MODULE_DESCRIPTION("S/390 Support for DIAG access to DASD Disks"); MODULE_LICENSE("GPL"); /* The maximum number of blocks per request (max_blocks) is dependent on the @@ -70,22 +69,24 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */ static inline int __dia250(void *iob, int cmd) { union register_pair rx = { .even = (unsigned long)iob, }; + int cc, exception; typedef union { struct dasd_diag_init_io init_io; struct dasd_diag_rw_io rw_io; } addr_type; - int cc; - cc = 3; - asm volatile( + exception = 1; + asm_inline volatile( " diag %[rx],%[cmd],0x250\n" - "0: ipm %[cc]\n" - " srl %[cc],28\n" + "0: lhi %[exc],0\n" "1:\n" + CC_IPM(cc) EX_TABLE(0b,1b) - : [cc] "+&d" (cc), [rx] "+&d" (rx.pair), "+m" (*(addr_type *)iob) + : CC_OUT(cc, cc), [rx] "+d" (rx.pair), + "+m" (*(addr_type *)iob), [exc] "+d" (exception) : [cmd] "d" (cmd) - : "cc"); + : CC_CLOBBER); + cc = exception ? 3 : CC_TRANSFORM(cc); return cc | rx.odd; } @@ -621,25 +622,9 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, "dump sense not available for DIAG data"); } -/* - * Initialize block layer request queue. - */ -static void dasd_diag_setup_blk_queue(struct dasd_block *block) +static unsigned int dasd_diag_max_sectors(struct dasd_block *block) { - unsigned int logical_block_size = block->bp_block; - struct request_queue *q = block->gdp->queue; - int max; - - max = DIAG_MAX_BLOCKS << block->s2b_shift; - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - q->limits.max_dev_sectors = max; - blk_queue_logical_block_size(q, logical_block_size); - blk_queue_max_hw_sectors(q, max); - blk_queue_max_segments(q, USHRT_MAX); - /* With page sized segments each segment can be translated into one idaw/tidaw */ - blk_queue_max_segment_size(q, PAGE_SIZE); - blk_queue_segment_boundary(q, PAGE_SIZE - 1); - blk_queue_dma_alignment(q, PAGE_SIZE - 1); + return DIAG_MAX_BLOCKS << block->s2b_shift; } static int dasd_diag_pe_handler(struct dasd_device *device, @@ -652,10 +637,10 @@ static struct dasd_discipline dasd_diag_discipline = { .owner = THIS_MODULE, .name = "DIAG", .ebcname = "DIAG", + .max_sectors = dasd_diag_max_sectors, .check_device = dasd_diag_check_device, .pe_handler = dasd_diag_pe_handler, .fill_geometry = dasd_diag_fill_geometry, - .setup_blk_queue = dasd_diag_setup_blk_queue, .start_IO = dasd_start_diag, .term_IO = dasd_diag_term_IO, .handle_terminated_request = dasd_diag_handle_terminated_request, @@ -670,7 +655,7 @@ static struct dasd_discipline dasd_diag_discipline = { static int __init dasd_diag_init(void) { - if (!MACHINE_IS_VM) { + if (!machine_is_vm()) { pr_info("Discipline %s cannot be used without z/VM\n", dasd_diag_discipline.name); return -ENODEV; diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 8587e423169e..b08e900687f3 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -10,21 +10,19 @@ * Author.........: Nigel Hislop <hislop_nigel@emc.com> */ -#define KMSG_COMPONENT "dasd-eckd" - #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/hdreg.h> /* HDIO_GETGEO */ #include <linux/bio.h> #include <linux/module.h> -#include <linux/compat.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/io.h> #include <asm/css_chars.h> +#include <asm/machine.h> #include <asm/debug.h> #include <asm/idals.h> #include <asm/ebcdic.h> @@ -37,11 +35,6 @@ #include "dasd_int.h" #include "dasd_eckd.h" -#ifdef PRINTK_HEADER -#undef PRINTK_HEADER -#endif /* PRINTK_HEADER */ -#define PRINTK_HEADER "dasd(eckd):" - /* * raw track access always map to 64k in memory * so it maps to 16 blocks of 4k per track @@ -51,6 +44,7 @@ /* 64k are 128 x 512 byte sectors */ #define DASD_RAW_SECTORS_PER_TRACK 128 +MODULE_DESCRIPTION("S/390 DASD ECKD Disks device driver"); MODULE_LICENSE("GPL"); static struct dasd_discipline dasd_eckd_discipline; @@ -290,7 +284,7 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk, ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; ccw->flags = 0; ccw->count = 16; - ccw->cda = (__u32)virt_to_phys(data); + ccw->cda = virt_to_dma32(data); } memset(data, 0, sizeof(struct DE_eckd_data)); @@ -400,7 +394,7 @@ static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data, ccw->count = 22; else ccw->count = 20; - ccw->cda = (__u32)virt_to_phys(data); + ccw->cda = virt_to_dma32(data); } memset(data, 0, sizeof(*data)); @@ -546,11 +540,11 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, ccw->flags = 0; if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) { ccw->count = sizeof(*pfxdata) + 2; - ccw->cda = (__u32)virt_to_phys(pfxdata); + ccw->cda = virt_to_dma32(pfxdata); memset(pfxdata, 0, sizeof(*pfxdata) + 2); } else { ccw->count = sizeof(*pfxdata); - ccw->cda = (__u32)virt_to_phys(pfxdata); + ccw->cda = virt_to_dma32(pfxdata); memset(pfxdata, 0, sizeof(*pfxdata)); } @@ -617,7 +611,7 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk, ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; ccw->flags = 0; ccw->count = 16; - ccw->cda = (__u32)virt_to_phys(data); + ccw->cda = virt_to_dma32(data); memset(data, 0, sizeof(struct LO_eckd_data)); sector = 0; @@ -832,7 +826,7 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device, ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_RCD; ccw->flags = 0; - ccw->cda = (__u32)virt_to_phys(rcd_buffer); + ccw->cda = virt_to_dma32(rcd_buffer); ccw->count = DASD_ECKD_RCD_DATA_SIZE; cqr->magic = DASD_ECKD_MAGIC; @@ -860,7 +854,7 @@ static void read_conf_cb(struct dasd_ccw_req *cqr, void *data) if (cqr->status != DASD_CQR_DONE) { ccw = cqr->cpaddr; - rcd_buffer = phys_to_virt(ccw->cda); + rcd_buffer = dma32_to_virt(ccw->cda); memset(rcd_buffer, 0, sizeof(*rcd_buffer)); rcd_buffer[0] = 0xE5; @@ -1072,29 +1066,21 @@ static void dasd_eckd_read_fc_security(struct dasd_device *device) } } -static void dasd_eckd_get_uid_string(struct dasd_conf *conf, - char *print_uid) +static void dasd_eckd_get_uid_string(struct dasd_conf *conf, char *print_uid) { struct dasd_uid uid; create_uid(conf, &uid); - if (strlen(uid.vduit) > 0) - snprintf(print_uid, sizeof(*print_uid), - "%s.%s.%04x.%02x.%s", - uid.vendor, uid.serial, uid.ssid, - uid.real_unit_addr, uid.vduit); - else - snprintf(print_uid, sizeof(*print_uid), - "%s.%s.%04x.%02x", - uid.vendor, uid.serial, uid.ssid, - uid.real_unit_addr); + snprintf(print_uid, DASD_UID_STRLEN, "%s.%s.%04x.%02x%s%s", + uid.vendor, uid.serial, uid.ssid, uid.real_unit_addr, + uid.vduit[0] ? "." : "", uid.vduit); } static int dasd_eckd_check_cabling(struct dasd_device *device, void *conf_data, __u8 lpm) { + char print_path_uid[DASD_UID_STRLEN], print_device_uid[DASD_UID_STRLEN]; struct dasd_eckd_private *private = device->private; - char print_path_uid[60], print_device_uid[60]; struct dasd_conf path_conf; path_conf.data = conf_data; @@ -1293,9 +1279,9 @@ static void dasd_eckd_path_available_action(struct dasd_device *device, __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE]; __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm; struct dasd_conf_data *conf_data; + char print_uid[DASD_UID_STRLEN]; struct dasd_conf path_conf; unsigned long flags; - char print_uid[60]; int rc, pos; opm = 0; @@ -1549,7 +1535,7 @@ static int dasd_eckd_read_features(struct dasd_device *device) ccw->cmd_code = DASD_ECKD_CCW_PSF; ccw->count = sizeof(struct dasd_psf_prssd_data); ccw->flags |= CCW_FLAG_CC; - ccw->cda = (__u32)virt_to_phys(prssdp); + ccw->cda = virt_to_dma32(prssdp); /* Read Subsystem Data - feature codes */ features = (struct dasd_rssd_features *) (prssdp + 1); @@ -1558,7 +1544,7 @@ static int dasd_eckd_read_features(struct dasd_device *device) ccw++; ccw->cmd_code = DASD_ECKD_CCW_RSSD; ccw->count = sizeof(struct dasd_rssd_features); - ccw->cda = (__u32)virt_to_phys(features); + ccw->cda = virt_to_dma32(features); cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -1618,7 +1604,7 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device) ccw->cmd_code = DASD_ECKD_CCW_PSF; ccw->count = sizeof(*prssdp); ccw->flags |= CCW_FLAG_CC; - ccw->cda = (__u32)virt_to_phys(prssdp); + ccw->cda = virt_to_dma32(prssdp); /* Read Subsystem Data - Volume Storage Query */ vsq = (struct dasd_rssd_vsq *)(prssdp + 1); @@ -1628,7 +1614,7 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device) ccw->cmd_code = DASD_ECKD_CCW_RSSD; ccw->count = sizeof(*vsq); ccw->flags |= CCW_FLAG_SLI; - ccw->cda = (__u32)virt_to_phys(vsq); + ccw->cda = virt_to_dma32(vsq); cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -1803,7 +1789,7 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device) ccw->cmd_code = DASD_ECKD_CCW_PSF; ccw->count = sizeof(*prssdp); ccw->flags |= CCW_FLAG_CC; - ccw->cda = (__u32)virt_to_phys(prssdp); + ccw->cda = virt_to_dma32(prssdp); lcq = (struct dasd_rssd_lcq *)(prssdp + 1); memset(lcq, 0, sizeof(*lcq)); @@ -1812,7 +1798,7 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device) ccw->cmd_code = DASD_ECKD_CCW_RSSD; ccw->count = sizeof(*lcq); ccw->flags |= CCW_FLAG_SLI; - ccw->cda = (__u32)virt_to_phys(lcq); + ccw->cda = virt_to_dma32(lcq); cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -1909,7 +1895,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, } ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_PSF; - ccw->cda = (__u32)virt_to_phys(psf_ssc_data); + ccw->cda = virt_to_dma32(psf_ssc_data); ccw->count = 66; cqr->startdev = device; @@ -1967,7 +1953,7 @@ static int dasd_eckd_validate_server(struct dasd_device *device, if (private->uid.type == UA_BASE_PAV_ALIAS || private->uid.type == UA_HYPER_PAV_ALIAS) return 0; - if (dasd_nopav || MACHINE_IS_VM) + if (dasd_nopav || machine_is_vm()) enable_pav = 0; else enable_pav = 1; @@ -2265,7 +2251,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; ccw->flags = 0; ccw->count = 8; - ccw->cda = (__u32)virt_to_phys(count_data); + ccw->cda = virt_to_dma32(count_data); ccw++; count_data++; } @@ -2279,7 +2265,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; ccw->flags = 0; ccw->count = 8; - ccw->cda = (__u32)virt_to_phys(count_data); + ccw->cda = virt_to_dma32(count_data); cqr->block = NULL; cqr->startdev = device; @@ -2289,6 +2275,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) cqr->status = DASD_CQR_FILLED; /* Set flags to suppress output for expected errors */ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); + set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags); return cqr; } @@ -2418,7 +2405,7 @@ static int dasd_eckd_end_analysis(struct dasd_block *block) } if (count_area != NULL && count_area->kl == 0) { - /* we found notthing violating our disk layout */ + /* we found nothing violating our disk layout */ if (dasd_check_blocksize(count_area->dl) == 0) block->bp_block = count_area->dl; } @@ -2570,7 +2557,6 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata, cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; /* Set flags to suppress output for expected errors */ - set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); return cqr; @@ -2650,7 +2636,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata, ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; ccw->flags = CCW_FLAG_SLI; ccw->count = 8; - ccw->cda = (__u32)virt_to_phys(fmt_buffer); + ccw->cda = virt_to_dma32(fmt_buffer); ccw++; fmt_buffer++; } @@ -2860,7 +2846,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev, ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; ccw->flags = CCW_FLAG_SLI; ccw->count = 8; - ccw->cda = (__u32)virt_to_phys(ect); + ccw->cda = virt_to_dma32(ect); ccw++; } if ((intensity & ~0x08) & 0x04) { /* erase track */ @@ -2875,7 +2861,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev, ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; ccw->flags = CCW_FLAG_SLI; ccw->count = 8; - ccw->cda = (__u32)virt_to_phys(ect); + ccw->cda = virt_to_dma32(ect); } else { /* write remaining records */ for (i = 0; i < rpt; i++) { ect = (struct eckd_count *) data; @@ -2910,7 +2896,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev, DASD_ECKD_CCW_WRITE_CKD_MT; ccw->flags = CCW_FLAG_SLI; ccw->count = 8; - ccw->cda = (__u32)virt_to_phys(ect); + ccw->cda = virt_to_dma32(ect); ccw++; } } @@ -3851,7 +3837,7 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block, } ccw = cqr->cpaddr; - ccw->cda = (__u32)virt_to_phys(cqr->data); + ccw->cda = virt_to_dma32(cqr->data); ccw->cmd_code = DASD_ECKD_CCW_DSO; ccw->count = size; @@ -3976,7 +3962,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( unsigned int blksize) { struct dasd_eckd_private *private; - unsigned long *idaws; + dma64_t *idaws; struct LO_eckd_data *LO_data; struct dasd_ccw_req *cqr; struct ccw1 *ccw; @@ -4054,8 +4040,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( dasd_sfree_request(cqr, startdev); return ERR_PTR(-EAGAIN); } - idaws = (unsigned long *) (cqr->data + - sizeof(struct PFX_eckd_data)); + idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data)); } else { if (define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, basedev, 0) == -EAGAIN) { @@ -4065,8 +4050,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( dasd_sfree_request(cqr, startdev); return ERR_PTR(-EAGAIN); } - idaws = (unsigned long *) (cqr->data + - sizeof(struct DE_eckd_data)); + idaws = (dma64_t *)(cqr->data + sizeof(struct DE_eckd_data)); } /* Build locate_record+read/write/ccws. */ LO_data = (struct LO_eckd_data *) (idaws + cidaw); @@ -4120,11 +4104,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( ccw->cmd_code = rcmd; ccw->count = count; if (idal_is_needed(dst, blksize)) { - ccw->cda = (__u32)virt_to_phys(idaws); + ccw->cda = virt_to_dma32(idaws); ccw->flags = CCW_FLAG_IDA; idaws = idal_create_words(idaws, dst, blksize); } else { - ccw->cda = (__u32)virt_to_phys(dst); + ccw->cda = virt_to_dma32(dst); ccw->flags = 0; } ccw++; @@ -4146,8 +4130,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( /* Set flags to suppress output for expected errors */ if (dasd_eckd_is_ese(basedev)) { - set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); - set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); } @@ -4167,7 +4149,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( unsigned int blk_per_trk, unsigned int blksize) { - unsigned long *idaws; + dma64_t *idaws; struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; @@ -4237,7 +4219,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( * (or 2K blocks on 31-bit) * - the scope of a ccw and it's idal ends with the track boundaries */ - idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data)); + idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data)); recid = first_rec; new_track = 1; end_idaw = 0; @@ -4258,7 +4240,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( ccw[-1].flags |= CCW_FLAG_CC; ccw->cmd_code = cmd; ccw->count = len_to_track_end; - ccw->cda = (__u32)virt_to_phys(idaws); + ccw->cda = virt_to_dma32(idaws); ccw->flags = CCW_FLAG_IDA; ccw++; recid += count; @@ -4274,7 +4256,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( * idaw ends */ if (!idaw_dst) { - if ((__u32)virt_to_phys(dst) & (IDA_BLOCK_SIZE - 1)) { + if ((unsigned long)(dst) & (IDA_BLOCK_SIZE - 1)) { dasd_sfree_request(cqr, startdev); return ERR_PTR(-ERANGE); } else @@ -4294,7 +4276,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( * idal_create_words will handle cases where idaw_len * is larger then IDA_BLOCK_SIZE */ - if (!((__u32)virt_to_phys(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1))) + if (!((unsigned long)(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1))) end_idaw = 1; /* We also need to end the idaw at track end */ if (!len_to_track_end) { @@ -4578,9 +4560,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( len_to_track_end = 0; /* * A tidaw can address 4k of memory, but must not cross page boundaries - * We can let the block layer handle this by setting - * blk_queue_segment_boundary to page boundaries and - * blk_max_segment_size to page size when setting up the request queue. + * We can let the block layer handle this by setting seg_boundary_mask + * to page boundaries and max_segment_size to page size when setting up + * the request queue. * For write requests, a TIDAW must not cross track boundaries, because * we have to set the CBC flag on the last tidaw for each track. */ @@ -4649,9 +4631,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( /* Set flags to suppress output for expected errors */ if (dasd_eckd_is_ese(basedev)) { - set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); - set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); + set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags); } return cqr; @@ -4753,11 +4734,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, struct req_iterator iter; struct dasd_ccw_req *cqr; unsigned int trkcount; - unsigned long *idaws; unsigned int size; unsigned char cmd; struct bio_vec bv; struct ccw1 *ccw; + dma64_t *idaws; int use_prefix; void *data; char *dst; @@ -4838,7 +4819,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, trkcount, cmd, basedev, 0, 0); } - idaws = (unsigned long *)(cqr->data + size); + idaws = (dma64_t *)(cqr->data + size); len_to_track_end = 0; if (start_padding_sectors) { ccw[-1].flags |= CCW_FLAG_CC; @@ -4847,7 +4828,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, ccw->count = 57326; /* 64k map to one track */ len_to_track_end = 65536 - start_padding_sectors * 512; - ccw->cda = (__u32)virt_to_phys(idaws); + ccw->cda = virt_to_dma32(idaws); ccw->flags |= CCW_FLAG_IDA; ccw->flags |= CCW_FLAG_SLI; ccw++; @@ -4866,7 +4847,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, ccw->count = 57326; /* 64k map to one track */ len_to_track_end = 65536; - ccw->cda = (__u32)virt_to_phys(idaws); + ccw->cda = virt_to_dma32(idaws); ccw->flags |= CCW_FLAG_IDA; ccw->flags |= CCW_FLAG_SLI; ccw++; @@ -4923,9 +4904,9 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) ccw++; if (dst) { if (ccw->flags & CCW_FLAG_IDA) - cda = *((char **)phys_to_virt(ccw->cda)); + cda = dma64_to_virt(*((dma64_t *)dma32_to_virt(ccw->cda))); else - cda = phys_to_virt(ccw->cda); + cda = dma32_to_virt(ccw->cda); if (dst != cda) { if (rq_data_dir(req) == READ) memcpy(dst, cda, bv.bv_len); @@ -5075,7 +5056,7 @@ dasd_eckd_release(struct dasd_device *device) ccw->cmd_code = DASD_ECKD_CCW_RELEASE; ccw->flags |= CCW_FLAG_SLI; ccw->count = 32; - ccw->cda = (__u32)virt_to_phys(cqr->data); + ccw->cda = virt_to_dma32(cqr->data); cqr->startdev = device; cqr->memdev = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); @@ -5130,7 +5111,7 @@ dasd_eckd_reserve(struct dasd_device *device) ccw->cmd_code = DASD_ECKD_CCW_RESERVE; ccw->flags |= CCW_FLAG_SLI; ccw->count = 32; - ccw->cda = (__u32)virt_to_phys(cqr->data); + ccw->cda = virt_to_dma32(cqr->data); cqr->startdev = device; cqr->memdev = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); @@ -5184,7 +5165,7 @@ dasd_eckd_steal_lock(struct dasd_device *device) ccw->cmd_code = DASD_ECKD_CCW_SLCK; ccw->flags |= CCW_FLAG_SLI; ccw->count = 32; - ccw->cda = (__u32)virt_to_phys(cqr->data); + ccw->cda = virt_to_dma32(cqr->data); cqr->startdev = device; cqr->memdev = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); @@ -5245,7 +5226,7 @@ static int dasd_eckd_snid(struct dasd_device *device, ccw->cmd_code = DASD_ECKD_CCW_SNID; ccw->flags |= CCW_FLAG_SLI; ccw->count = 12; - ccw->cda = (__u32)virt_to_phys(cqr->data); + ccw->cda = virt_to_dma32(cqr->data); cqr->startdev = device; cqr->memdev = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); @@ -5312,7 +5293,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp) ccw->cmd_code = DASD_ECKD_CCW_PSF; ccw->count = sizeof(struct dasd_psf_prssd_data); ccw->flags |= CCW_FLAG_CC; - ccw->cda = (__u32)virt_to_phys(prssdp); + ccw->cda = virt_to_dma32(prssdp); /* Read Subsystem Data - Performance Statistics */ stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); @@ -5321,7 +5302,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp) ccw++; ccw->cmd_code = DASD_ECKD_CCW_RSSD; ccw->count = sizeof(struct dasd_rssd_perf_stats_t); - ccw->cda = (__u32)virt_to_phys(stats); + ccw->cda = virt_to_dma32(stats); cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -5407,16 +5388,6 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) rc = -EFAULT; if (copy_from_user(&usrparm, argp, sizeof(usrparm))) goto out; - if (is_compat_task()) { - /* Make sure pointers are sane even on 31 bit. */ - rc = -EINVAL; - if ((usrparm.psf_data >> 32) != 0) - goto out; - if ((usrparm.rssd_result >> 32) != 0) - goto out; - usrparm.psf_data &= 0x7fffffffULL; - usrparm.rssd_result &= 0x7fffffffULL; - } /* at least 2 bytes are accessed and should be allocated */ if (usrparm.psf_data_len < 2) { DBF_DEV_EVENT(DBF_WARNING, device, @@ -5465,7 +5436,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) ccw->cmd_code = DASD_ECKD_CCW_PSF; ccw->count = usrparm.psf_data_len; ccw->flags |= CCW_FLAG_CC; - ccw->cda = (__u32)virt_to_phys(psf_data); + ccw->cda = virt_to_dma32(psf_data); ccw++; @@ -5473,7 +5444,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) ccw->cmd_code = DASD_ECKD_CCW_RSSD; ccw->count = usrparm.rssd_result_len; ccw->flags = CCW_FLAG_SLI ; - ccw->cda = (__u32)virt_to_phys(rssd_result); + ccw->cda = virt_to_dma32(rssd_result); rc = dasd_sleep_on(cqr); if (rc) @@ -5529,22 +5500,22 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) * and return number of printed chars. */ static void -dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) +dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from, + struct ccw1 *to, char *page) { int len, count; char *datap; len = 0; while (from <= to) { - len += sprintf(page + len, PRINTK_HEADER - " CCW %p: %08X %08X DAT:", + len += sprintf(page + len, "CCW %px: %08X %08X DAT:", from, ((int *) from)[0], ((int *) from)[1]); /* get pointer to data (consider IDALs) */ if (from->flags & CCW_FLAG_IDA) - datap = (char *)*((addr_t *)phys_to_virt(from->cda)); + datap = dma64_to_virt(*((dma64_t *)dma32_to_virt(from->cda))); else - datap = phys_to_virt(from->cda); + datap = dma32_to_virt(from->cda); /* dump data (max 128 bytes) */ for (count = 0; count < from->count && count < 128; count++) { @@ -5560,7 +5531,7 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) from++; } if (len > 0) - printk(KERN_ERR "%s", page); + dev_err(&device->cdev->dev, "%s", page); } static void @@ -5591,9 +5562,12 @@ dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, struct dasd_ccw_req *req, struct irb *irb) { - char *page; struct ccw1 *first, *last, *fail, *from, *to; + struct device *dev; int len, sl, sct; + char *page; + + dev = &device->cdev->dev; page = (char *) get_zeroed_page(GFP_ATOMIC); if (page == NULL) { @@ -5602,24 +5576,18 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, return; } /* dump the sense data */ - len = sprintf(page, PRINTK_HEADER - " I/O status report for device %s:\n", - dev_name(&device->cdev->dev)); - len += sprintf(page + len, PRINTK_HEADER - " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " - "CS:%02X RC:%d\n", + len = sprintf(page, "I/O status report:\n"); + len += sprintf(page + len, + "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X CS:%02X RC:%d\n", req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), req ? req->intrc : 0); - len += sprintf(page + len, PRINTK_HEADER - " device %s: Failing CCW: %p\n", - dev_name(&device->cdev->dev), - phys_to_virt(irb->scsw.cmd.cpa)); + len += sprintf(page + len, "Failing CCW: %px\n", + dma32_to_virt(irb->scsw.cmd.cpa)); if (irb->esw.esw0.erw.cons) { for (sl = 0; sl < 4; sl++) { - len += sprintf(page + len, PRINTK_HEADER - " Sense(hex) %2d-%2d:", + len += sprintf(page + len, "Sense(hex) %2d-%2d:", (8 * sl), ((8 * sl) + 7)); for (sct = 0; sct < 8; sct++) { @@ -5631,23 +5599,20 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, if (irb->ecw[27] & DASD_SENSE_BIT_0) { /* 24 Byte Sense Data */ - sprintf(page + len, PRINTK_HEADER - " 24 Byte: %x MSG %x, " - "%s MSGb to SYSOP\n", + sprintf(page + len, + "24 Byte: %x MSG %x, %s MSGb to SYSOP\n", irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, irb->ecw[1] & 0x10 ? "" : "no"); } else { /* 32 Byte Sense Data */ - sprintf(page + len, PRINTK_HEADER - " 32 Byte: Format: %x " - "Exception class %x\n", + sprintf(page + len, + "32 Byte: Format: %x Exception class %x\n", irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); } } else { - sprintf(page + len, PRINTK_HEADER - " SORRY - NO VALID SENSE AVAILABLE\n"); + sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n"); } - printk(KERN_ERR "%s", page); + dev_err(dev, "%s", page); if (req) { /* req == NULL for unsolicited interrupts */ @@ -5656,28 +5621,28 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, first = req->cpaddr; for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); to = min(first + 6, last); - printk(KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req); - dasd_eckd_dump_ccw_range(first, to, page); + dev_err(dev, "Related CP in req: %px\n", req); + dasd_eckd_dump_ccw_range(device, first, to, page); /* print failing CCW area (maximum 4) */ /* scsw->cda is either valid or zero */ from = ++to; - fail = phys_to_virt(irb->scsw.cmd.cpa); /* failing CCW */ + fail = dma32_to_virt(irb->scsw.cmd.cpa); /* failing CCW */ if (from < fail - 2) { from = fail - 2; /* there is a gap - print header */ - printk(KERN_ERR PRINTK_HEADER "......\n"); + dev_err(dev, "......\n"); } to = min(fail + 1, last); - dasd_eckd_dump_ccw_range(from, to, page + len); + dasd_eckd_dump_ccw_range(device, from, to, page + len); /* print last CCWs (maximum 2) */ len = 0; from = max(from, ++to); if (from < last - 1) { from = last - 1; /* there is a gap - print header */ - printk(KERN_ERR PRINTK_HEADER "......\n"); + dev_err(dev, "......\n"); } - dasd_eckd_dump_ccw_range(from, last, page + len); + dasd_eckd_dump_ccw_range(device, from, last, page + len); } free_page((unsigned long) page); } @@ -5701,11 +5666,9 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, return; } /* dump the sense data */ - len = sprintf(page, PRINTK_HEADER - " I/O status report for device %s:\n", - dev_name(&device->cdev->dev)); - len += sprintf(page + len, PRINTK_HEADER - " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " + len = sprintf(page, "I/O status report:\n"); + len += sprintf(page + len, + "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " "CS:%02X fcxs:%02X schxs:%02X RC:%d\n", req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), @@ -5713,58 +5676,46 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, irb->scsw.tm.fcxs, (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq, req ? req->intrc : 0); - len += sprintf(page + len, PRINTK_HEADER - " device %s: Failing TCW: %p\n", - dev_name(&device->cdev->dev), - phys_to_virt(irb->scsw.tm.tcw)); + len += sprintf(page + len, "Failing TCW: %px\n", + dma32_to_virt(irb->scsw.tm.tcw)); tsb = NULL; sense = NULL; if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01)) - tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw)); + tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw)); if (tsb) { - len += sprintf(page + len, PRINTK_HEADER - " tsb->length %d\n", tsb->length); - len += sprintf(page + len, PRINTK_HEADER - " tsb->flags %x\n", tsb->flags); - len += sprintf(page + len, PRINTK_HEADER - " tsb->dcw_offset %d\n", tsb->dcw_offset); - len += sprintf(page + len, PRINTK_HEADER - " tsb->count %d\n", tsb->count); + len += sprintf(page + len, "tsb->length %d\n", tsb->length); + len += sprintf(page + len, "tsb->flags %x\n", tsb->flags); + len += sprintf(page + len, "tsb->dcw_offset %d\n", tsb->dcw_offset); + len += sprintf(page + len, "tsb->count %d\n", tsb->count); residual = tsb->count - 28; - len += sprintf(page + len, PRINTK_HEADER - " residual %d\n", residual); + len += sprintf(page + len, "residual %d\n", residual); switch (tsb->flags & 0x07) { case 1: /* tsa_iostat */ - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.iostat.dev_time %d\n", + len += sprintf(page + len, "tsb->tsa.iostat.dev_time %d\n", tsb->tsa.iostat.dev_time); - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.iostat.def_time %d\n", + len += sprintf(page + len, "tsb->tsa.iostat.def_time %d\n", tsb->tsa.iostat.def_time); - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.iostat.queue_time %d\n", + len += sprintf(page + len, "tsb->tsa.iostat.queue_time %d\n", tsb->tsa.iostat.queue_time); - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.iostat.dev_busy_time %d\n", + len += sprintf(page + len, "tsb->tsa.iostat.dev_busy_time %d\n", tsb->tsa.iostat.dev_busy_time); - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.iostat.dev_act_time %d\n", + len += sprintf(page + len, "tsb->tsa.iostat.dev_act_time %d\n", tsb->tsa.iostat.dev_act_time); sense = tsb->tsa.iostat.sense; break; case 2: /* ts_ddpc */ - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); + len += sprintf(page + len, "tsb->tsa.ddpc.rc %d\n", + tsb->tsa.ddpc.rc); for (sl = 0; sl < 2; sl++) { - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.ddpc.rcq %2d-%2d: ", + len += sprintf(page + len, + "tsb->tsa.ddpc.rcq %2d-%2d: ", (8 * sl), ((8 * sl) + 7)); rcq = tsb->tsa.ddpc.rcq; for (sct = 0; sct < 8; sct++) { - len += sprintf(page + len, " %02x", + len += sprintf(page + len, "%02x", rcq[8 * sl + sct]); } len += sprintf(page + len, "\n"); @@ -5772,15 +5723,15 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, sense = tsb->tsa.ddpc.sense; break; case 3: /* tsa_intrg */ - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.intrg.: not supported yet\n"); + len += sprintf(page + len, + "tsb->tsa.intrg.: not supported yet\n"); break; } if (sense) { for (sl = 0; sl < 4; sl++) { - len += sprintf(page + len, PRINTK_HEADER - " Sense(hex) %2d-%2d:", + len += sprintf(page + len, + "Sense(hex) %2d-%2d:", (8 * sl), ((8 * sl) + 7)); for (sct = 0; sct < 8; sct++) { len += sprintf(page + len, " %02x", @@ -5791,27 +5742,23 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, if (sense[27] & DASD_SENSE_BIT_0) { /* 24 Byte Sense Data */ - sprintf(page + len, PRINTK_HEADER - " 24 Byte: %x MSG %x, " - "%s MSGb to SYSOP\n", + sprintf(page + len, + "24 Byte: %x MSG %x, %s MSGb to SYSOP\n", sense[7] >> 4, sense[7] & 0x0f, sense[1] & 0x10 ? "" : "no"); } else { /* 32 Byte Sense Data */ - sprintf(page + len, PRINTK_HEADER - " 32 Byte: Format: %x " - "Exception class %x\n", + sprintf(page + len, + "32 Byte: Format: %x Exception class %x\n", sense[6] & 0x0f, sense[22] >> 4); } } else { - sprintf(page + len, PRINTK_HEADER - " SORRY - NO VALID SENSE AVAILABLE\n"); + sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n"); } } else { - sprintf(page + len, PRINTK_HEADER - " SORRY - NO TSB DATA AVAILABLE\n"); + sprintf(page + len, "SORRY - NO TSB DATA AVAILABLE\n"); } - printk(KERN_ERR "%s", page); + dev_err(&device->cdev->dev, "%s", page); free_page((unsigned long) page); } @@ -5820,43 +5767,39 @@ static void dasd_eckd_dump_sense(struct dasd_device *device, { u8 *sense = dasd_get_sense(irb); - if (scsw_is_tm(&irb->scsw)) { - /* - * In some cases the 'File Protected' or 'Incorrect Length' - * error might be expected and log messages shouldn't be written - * then. Check if the according suppress bit is set. - */ - if (sense && (sense[1] & SNS1_FILE_PROTECTED) && - test_bit(DASD_CQR_SUPPRESS_FP, &req->flags)) - return; - if (scsw_cstat(&irb->scsw) == 0x40 && - test_bit(DASD_CQR_SUPPRESS_IL, &req->flags)) - return; + /* + * In some cases certain errors might be expected and + * log messages shouldn't be written then. + * Check if the according suppress bit is set. + */ + if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) && + !(sense[2] & SNS2_ENV_DATA_PRESENT) && + test_bit(DASD_CQR_SUPPRESS_IT, &req->flags)) + return; - dasd_eckd_dump_sense_tcw(device, req, irb); - } else { - /* - * In some cases the 'Command Reject' or 'No Record Found' - * error might be expected and log messages shouldn't be - * written then. Check if the according suppress bit is set. - */ - if (sense && sense[0] & SNS0_CMD_REJECT && - test_bit(DASD_CQR_SUPPRESS_CR, &req->flags)) - return; + if (sense && sense[0] & SNS0_CMD_REJECT && + test_bit(DASD_CQR_SUPPRESS_CR, &req->flags)) + return; - if (sense && sense[1] & SNS1_NO_REC_FOUND && - test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags)) - return; + if (sense && sense[1] & SNS1_NO_REC_FOUND && + test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags)) + return; + if (scsw_cstat(&irb->scsw) == 0x40 && + test_bit(DASD_CQR_SUPPRESS_IL, &req->flags)) + return; + + if (scsw_is_tm(&irb->scsw)) + dasd_eckd_dump_sense_tcw(device, req, irb); + else dasd_eckd_dump_sense_ccw(device, req, irb); - } } static int dasd_eckd_reload_device(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; + char print_uid[DASD_UID_STRLEN]; int rc, old_base; - char print_uid[60]; struct dasd_uid uid; unsigned long flags; @@ -5945,7 +5888,7 @@ retry: ccw->count = sizeof(struct dasd_psf_prssd_data); ccw->flags |= CCW_FLAG_CC; ccw->flags |= CCW_FLAG_SLI; - ccw->cda = (__u32)virt_to_phys(prssdp); + ccw->cda = virt_to_dma32(prssdp); /* Read Subsystem Data - message buffer */ message_buf = (struct dasd_rssd_messages *) (prssdp + 1); @@ -5955,7 +5898,7 @@ retry: ccw->cmd_code = DASD_ECKD_CCW_RSSD; ccw->count = sizeof(struct dasd_rssd_messages); ccw->flags |= CCW_FLAG_SLI; - ccw->cda = (__u32)virt_to_phys(message_buf); + ccw->cda = virt_to_dma32(message_buf); cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -6036,14 +5979,14 @@ static int dasd_eckd_query_host_access(struct dasd_device *device, ccw->count = sizeof(struct dasd_psf_prssd_data); ccw->flags |= CCW_FLAG_CC; ccw->flags |= CCW_FLAG_SLI; - ccw->cda = (__u32)virt_to_phys(prssdp); + ccw->cda = virt_to_dma32(prssdp); /* Read Subsystem Data - query host access */ ccw++; ccw->cmd_code = DASD_ECKD_CCW_RSSD; ccw->count = sizeof(struct dasd_psf_query_host_access); ccw->flags |= CCW_FLAG_SLI; - ccw->cda = (__u32)virt_to_phys(host_access); + ccw->cda = virt_to_dma32(host_access); cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -6196,6 +6139,7 @@ static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid struct dasd_copy_relation *copy; struct dasd_block *block; struct gendisk *gdp; + int rc; copy = device->copy; if (!copy) @@ -6230,6 +6174,13 @@ static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid /* swap blocklayer device link */ gdp = block->gdp; dasd_add_link_to_gendisk(gdp, secondary); + rc = device_move(disk_to_dev(gdp), &secondary->cdev->dev, DPM_ORDER_NONE); + if (rc) { + dev_err(&primary->cdev->dev, + "copy_pair_swap: moving blockdevice parent %s->%s failed (%d)\n", + dev_name(&primary->cdev->dev), + dev_name(&secondary->cdev->dev), rc); + } /* re-enable device */ dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC); @@ -6278,14 +6229,14 @@ static int dasd_eckd_query_pprc_status(struct dasd_device *device, ccw->count = sizeof(struct dasd_psf_prssd_data); ccw->flags |= CCW_FLAG_CC; ccw->flags |= CCW_FLAG_SLI; - ccw->cda = (__u32)(addr_t)prssdp; + ccw->cda = virt_to_dma32(prssdp); /* Read Subsystem Data - query host access */ ccw++; ccw->cmd_code = DASD_ECKD_CCW_RSSD; ccw->count = sizeof(*pprc_data); ccw->flags |= CCW_FLAG_SLI; - ccw->cda = (__u32)(addr_t)pprc_data; + ccw->cda = virt_to_dma32(pprc_data); cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -6379,7 +6330,7 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, psf_cuir->ssid = device->path[pos].ssid; ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_PSF; - ccw->cda = (__u32)virt_to_phys(psf_cuir); + ccw->cda = virt_to_dma32(psf_cuir); ccw->flags = CCW_FLAG_SLI; ccw->count = sizeof(struct dasd_psf_cuir_response); @@ -6865,17 +6816,9 @@ static void dasd_eckd_handle_hpf_error(struct dasd_device *device, dasd_schedule_requeue(device); } -/* - * Initialize block layer request queue. - */ -static void dasd_eckd_setup_blk_queue(struct dasd_block *block) +static unsigned int dasd_eckd_max_sectors(struct dasd_block *block) { - unsigned int logical_block_size = block->bp_block; - struct request_queue *q = block->gdp->queue; - struct dasd_device *device = block->base; - int max; - - if (device->features & DASD_FEATURE_USERAW) { + if (block->base->features & DASD_FEATURE_USERAW) { /* * the max_blocks value for raw_track access is 256 * it is higher than the native ECKD value because we @@ -6883,19 +6826,10 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block) * so the max_hw_sectors are * 2048 x 512B = 1024kB = 16 tracks */ - max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift; - } else { - max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift; + return DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift; } - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - q->limits.max_dev_sectors = max; - blk_queue_logical_block_size(q, logical_block_size); - blk_queue_max_hw_sectors(q, max); - blk_queue_max_segments(q, USHRT_MAX); - /* With page sized segments each segment can be translated into one idaw/tidaw */ - blk_queue_max_segment_size(q, PAGE_SIZE); - blk_queue_segment_boundary(q, PAGE_SIZE - 1); - blk_queue_dma_alignment(q, PAGE_SIZE - 1); + + return DASD_ECKD_MAX_BLOCKS << block->s2b_shift; } static struct ccw_driver dasd_eckd_driver = { @@ -6927,7 +6861,7 @@ static struct dasd_discipline dasd_eckd_discipline = { .basic_to_ready = dasd_eckd_basic_to_ready, .online_to_ready = dasd_eckd_online_to_ready, .basic_to_known = dasd_eckd_basic_to_known, - .setup_blk_queue = dasd_eckd_setup_blk_queue, + .max_sectors = dasd_eckd_max_sectors, .fill_geometry = dasd_eckd_fill_geometry, .start_IO = dasd_start_IO, .term_IO = dasd_term_IO, diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index c956de711cf7..b177b7952f2e 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -7,8 +7,7 @@ * Author(s): Stefan Weinhuber <wein@de.ibm.com> */ -#define KMSG_COMPONENT "dasd-eckd" - +#include <linux/export.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/kernel.h> @@ -28,11 +27,6 @@ #include "dasd_int.h" #include "dasd_eckd.h" -#ifdef PRINTK_HEADER -#undef PRINTK_HEADER -#endif /* PRINTK_HEADER */ -#define PRINTK_HEADER "dasd(eer):" - /* * SECTION: the internal buffer */ @@ -492,7 +486,7 @@ int dasd_eer_enable(struct dasd_device *device) ccw->cmd_code = DASD_ECKD_CCW_SNSS; ccw->count = SNSS_DATA_SIZE; ccw->flags = 0; - ccw->cda = (__u32)virt_to_phys(cqr->data); + ccw->cda = virt_to_dma32(cqr->data); cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index c07e6e713518..89d7516b9ec8 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c @@ -9,8 +9,7 @@ * */ -#define KMSG_COMPONENT "dasd" - +#include <linux/export.h> #include <linux/ctype.h> #include <linux/init.h> @@ -18,9 +17,6 @@ #include <asm/ebcdic.h> #include <linux/uaccess.h> -/* This is ugly... */ -#define PRINTK_HEADER "dasd_erp:" - #include "dasd_int.h" struct dasd_ccw_req * @@ -170,12 +166,12 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) device = cqr->startdev; if (cqr->intrc == -ETIMEDOUT) { dev_err(&device->cdev->dev, - "A timeout error occurred for cqr %p\n", cqr); + "A timeout error occurred for cqr %px\n", cqr); return; } if (cqr->intrc == -ENOLINK) { dev_err(&device->cdev->dev, - "A transport error occurred for cqr %p\n", cqr); + "A transport error occurred for cqr %px\n", cqr); return; } /* dump sense data */ diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index c06fa2b27120..c2a87201c153 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -5,7 +5,6 @@ * Copyright IBM Corp. 1999, 2009 */ -#define KMSG_COMPONENT "dasd-fba" #include <linux/stddef.h> #include <linux/kernel.h> @@ -25,11 +24,6 @@ #include "dasd_int.h" #include "dasd_fba.h" -#ifdef PRINTK_HEADER -#undef PRINTK_HEADER -#endif /* PRINTK_HEADER */ -#define PRINTK_HEADER "dasd(fba):" - #define FBA_DEFAULT_RETRIES 32 #define DASD_FBA_CCW_WRITE 0x41 @@ -37,6 +31,7 @@ #define DASD_FBA_CCW_LOCATE 0x43 #define DASD_FBA_CCW_DEFINE_EXTENT 0x63 +MODULE_DESCRIPTION("S/390 DASD FBA Disks device driver"); MODULE_LICENSE("GPL"); static struct dasd_discipline dasd_fba_discipline; @@ -83,7 +78,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT; ccw->flags = 0; ccw->count = 16; - ccw->cda = (__u32)virt_to_phys(data); + ccw->cda = virt_to_dma32(data); memset(data, 0, sizeof (struct DE_fba_data)); if (rw == WRITE) (data->mask).perm = 0x0; @@ -103,7 +98,7 @@ locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, ccw->cmd_code = DASD_FBA_CCW_LOCATE; ccw->flags = 0; ccw->count = 8; - ccw->cda = (__u32)virt_to_phys(data); + ccw->cda = virt_to_dma32(data); memset(data, 0, sizeof (struct LO_fba_data)); if (rw == WRITE) data->operation.cmd = 0x5; @@ -262,7 +257,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count) ccw->cmd_code = DASD_FBA_CCW_WRITE; ccw->flags |= CCW_FLAG_SLI; ccw->count = count; - ccw->cda = (__u32)virt_to_phys(dasd_fba_zero_page); + ccw->cda = virt_to_dma32(dasd_fba_zero_page); } /* @@ -432,7 +427,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular( struct request *req) { struct dasd_fba_private *private = block->base->private; - unsigned long *idaws; + dma64_t *idaws; struct LO_fba_data *LO_data; struct dasd_ccw_req *cqr; struct ccw1 *ccw; @@ -492,7 +487,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular( define_extent(ccw++, cqr->data, rq_data_dir(req), block->bp_block, blk_rq_pos(req), blk_rq_sectors(req)); /* Build locate_record + read/write ccws. */ - idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); + idaws = (dma64_t *)(cqr->data + sizeof(struct DE_fba_data)); LO_data = (struct LO_fba_data *) (idaws + cidaw); /* Locate record for all blocks for smart devices. */ if (private->rdc_data.mode.bits.data_chain != 0) { @@ -528,11 +523,11 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular( ccw->cmd_code = cmd; ccw->count = block->bp_block; if (idal_is_needed(dst, blksize)) { - ccw->cda = (__u32)virt_to_phys(idaws); + ccw->cda = virt_to_dma32(idaws); ccw->flags = CCW_FLAG_IDA; idaws = idal_create_words(idaws, dst, blksize); } else { - ccw->cda = (__u32)virt_to_phys(dst); + ccw->cda = virt_to_dma32(dst); ccw->flags = 0; } ccw++; @@ -590,9 +585,9 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) ccw++; if (dst) { if (ccw->flags & CCW_FLAG_IDA) - cda = *((char **)phys_to_virt(ccw->cda)); + cda = dma64_to_virt(*((dma64_t *)dma32_to_virt(ccw->cda))); else - cda = phys_to_virt(ccw->cda); + cda = dma32_to_virt(ccw->cda); if (dst != cda) { if (rq_data_dir(req) == READ) memcpy(dst, cda, bv.bv_len); @@ -660,30 +655,27 @@ static void dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, struct irb *irb) { - char *page; struct ccw1 *act, *end, *last; int len, sl, sct, count; + struct device *dev; + char *page; + + dev = &device->cdev->dev; page = (char *) get_zeroed_page(GFP_ATOMIC); if (page == NULL) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", - "No memory to dump sense data"); + "No memory to dump sense data"); return; } - len = sprintf(page, PRINTK_HEADER - " I/O status report for device %s:\n", - dev_name(&device->cdev->dev)); - len += sprintf(page + len, PRINTK_HEADER - " in req: %p CS: 0x%02X DS: 0x%02X\n", req, - irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); - len += sprintf(page + len, PRINTK_HEADER - " device %s: Failing CCW: %p\n", - dev_name(&device->cdev->dev), - (void *) (addr_t) irb->scsw.cmd.cpa); + len = sprintf(page, "I/O status report:\n"); + len += sprintf(page + len, "in req: %px CS: 0x%02X DS: 0x%02X\n", + req, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); + len += sprintf(page + len, "Failing CCW: %px\n", + (void *)(u64)dma32_to_u32(irb->scsw.cmd.cpa)); if (irb->esw.esw0.erw.cons) { for (sl = 0; sl < 4; sl++) { - len += sprintf(page + len, PRINTK_HEADER - " Sense(hex) %2d-%2d:", + len += sprintf(page + len, "Sense(hex) %2d-%2d:", (8 * sl), ((8 * sl) + 7)); for (sct = 0; sct < 8; sct++) { @@ -693,47 +685,43 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, len += sprintf(page + len, "\n"); } } else { - len += sprintf(page + len, PRINTK_HEADER - " SORRY - NO VALID SENSE AVAILABLE\n"); + len += sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n"); } - printk(KERN_ERR "%s", page); + dev_err(dev, "%s", page); /* dump the Channel Program */ /* print first CCWs (maximum 8) */ act = req->cpaddr; - for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); + for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); end = min(act + 8, last); - len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req); + len = sprintf(page, "Related CP in req: %px\n", req); while (act <= end) { - len += sprintf(page + len, PRINTK_HEADER - " CCW %p: %08X %08X DAT:", + len += sprintf(page + len, "CCW %px: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; count += sizeof(int)) len += sprintf(page + len, " %08X", - ((int *) (addr_t) act->cda) + ((int *)dma32_to_virt(act->cda)) [(count>>2)]); len += sprintf(page + len, "\n"); act++; } - printk(KERN_ERR "%s", page); - + dev_err(dev, "%s", page); /* print failing CCW area */ len = 0; - if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) { - act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2; - len += sprintf(page + len, PRINTK_HEADER "......\n"); + if (act < ((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)) - 2) { + act = ((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)) - 2; + len += sprintf(page + len, "......\n"); } - end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last); + end = min((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa) + 2, last); while (act <= end) { - len += sprintf(page + len, PRINTK_HEADER - " CCW %p: %08X %08X DAT:", + len += sprintf(page + len, "CCW %px: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; count += sizeof(int)) len += sprintf(page + len, " %08X", - ((int *) (addr_t) act->cda) + ((int *)dma32_to_virt(act->cda)) [(count>>2)]); len += sprintf(page + len, "\n"); act++; @@ -742,54 +730,27 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, /* print last CCWs */ if (act < last - 2) { act = last - 2; - len += sprintf(page + len, PRINTK_HEADER "......\n"); + len += sprintf(page + len, "......\n"); } while (act <= last) { - len += sprintf(page + len, PRINTK_HEADER - " CCW %p: %08X %08X DAT:", + len += sprintf(page + len, "CCW %px: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; count += sizeof(int)) len += sprintf(page + len, " %08X", - ((int *) (addr_t) act->cda) + ((int *)dma32_to_virt(act->cda)) [(count>>2)]); len += sprintf(page + len, "\n"); act++; } if (len > 0) - printk(KERN_ERR "%s", page); + dev_err(dev, "%s", page); free_page((unsigned long) page); } -/* - * Initialize block layer request queue. - */ -static void dasd_fba_setup_blk_queue(struct dasd_block *block) +static unsigned int dasd_fba_max_sectors(struct dasd_block *block) { - unsigned int logical_block_size = block->bp_block; - struct request_queue *q = block->gdp->queue; - unsigned int max_bytes, max_discard_sectors; - int max; - - max = DASD_FBA_MAX_BLOCKS << block->s2b_shift; - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - q->limits.max_dev_sectors = max; - blk_queue_logical_block_size(q, logical_block_size); - blk_queue_max_hw_sectors(q, max); - blk_queue_max_segments(q, USHRT_MAX); - /* With page sized segments each segment can be translated into one idaw/tidaw */ - blk_queue_max_segment_size(q, PAGE_SIZE); - blk_queue_segment_boundary(q, PAGE_SIZE - 1); - - q->limits.discard_granularity = logical_block_size; - - /* Calculate max_discard_sectors and make it PAGE aligned */ - max_bytes = USHRT_MAX * logical_block_size; - max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE); - max_discard_sectors = max_bytes / logical_block_size; - - blk_queue_max_discard_sectors(q, max_discard_sectors); - blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); + return DASD_FBA_MAX_BLOCKS << block->s2b_shift; } static int dasd_fba_pe_handler(struct dasd_device *device, @@ -802,10 +763,11 @@ static struct dasd_discipline dasd_fba_discipline = { .owner = THIS_MODULE, .name = "FBA ", .ebcname = "FBA ", + .has_discard = true, .check_device = dasd_fba_check_characteristics, .do_analysis = dasd_fba_do_analysis, .pe_handler = dasd_fba_pe_handler, - .setup_blk_queue = dasd_fba_setup_blk_queue, + .max_sectors = dasd_fba_max_sectors, .fill_geometry = dasd_fba_fill_geometry, .start_IO = dasd_start_IO, .term_IO = dasd_term_IO, diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index fe5108a1b332..6ee3d952412e 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -11,8 +11,6 @@ * */ -#define KMSG_COMPONENT "dasd" - #include <linux/interrupt.h> #include <linux/major.h> #include <linux/fs.h> @@ -20,13 +18,11 @@ #include <linux/uaccess.h> -/* This is ugly... */ -#define PRINTK_HEADER "dasd_gendisk:" - #include "dasd_int.h" static unsigned int queue_depth = 32; static unsigned int nr_hw_queues = 4; +static void dasd_gd_free(struct gendisk *gdp); module_param(queue_depth, uint, 0444); MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); @@ -35,30 +31,71 @@ module_param(nr_hw_queues, uint, 0444); MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices"); /* + * Set device name. + * dasda - dasdz : 26 devices + * dasdaa - dasdzz : 676 devices, added up = 702 + * dasdaaa - dasdzzz : 17576 devices, added up = 18278 + * dasdaaaa - dasdzzzz : 456976 devices, added up = 475252 + */ +static int dasd_name_format(char *prefix, int index, char *buf, int buflen) +{ + const int base = 'z' - 'a' + 1; + char *begin = buf + strlen(prefix); + char *end = buf + buflen; + char *p; + int unit; + + p = end - 1; + *p = '\0'; + unit = base; + do { + if (p == begin) + return -EINVAL; + *--p = 'a' + (index % unit); + index = (index / unit) - 1; + } while (index >= 0); + + memmove(begin, p, end - p); + memcpy(buf, prefix, strlen(prefix)); + + return 0; +} + +/* * Allocate and register gendisk structure for device. */ int dasd_gendisk_alloc(struct dasd_block *block) { + struct queue_limits lim = { + /* + * With page sized segments, each segment can be translated into + * one idaw/tidaw. + */ + .max_segment_size = PAGE_SIZE, + .seg_boundary_mask = PAGE_SIZE - 1, + .max_segments = USHRT_MAX, + }; struct gendisk *gdp; struct dasd_device *base; - int len, rc; + unsigned int devindex; + int rc; /* Make sure the minor for this device exists. */ base = block->base; - if (base->devindex >= DASD_PER_MAJOR) + devindex = base->devindex; + if (devindex >= DASD_PER_MAJOR) return -EBUSY; block->tag_set.ops = &dasd_mq_ops; block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); block->tag_set.nr_hw_queues = nr_hw_queues; block->tag_set.queue_depth = queue_depth; - block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; block->tag_set.numa_node = NUMA_NO_NODE; rc = blk_mq_alloc_tag_set(&block->tag_set); if (rc) return rc; - gdp = blk_mq_alloc_disk(&block->tag_set, block); + gdp = blk_mq_alloc_disk(&block->tag_set, &lim, block); if (IS_ERR(gdp)) { blk_mq_free_tag_set(&block->tag_set); return PTR_ERR(gdp); @@ -66,31 +103,17 @@ int dasd_gendisk_alloc(struct dasd_block *block) /* Initialize gendisk structure. */ gdp->major = DASD_MAJOR; - gdp->first_minor = base->devindex << DASD_PARTN_BITS; + gdp->first_minor = devindex << DASD_PARTN_BITS; gdp->minors = 1 << DASD_PARTN_BITS; gdp->fops = &dasd_device_operations; - /* - * Set device name. - * dasda - dasdz : 26 devices - * dasdaa - dasdzz : 676 devices, added up = 702 - * dasdaaa - dasdzzz : 17576 devices, added up = 18278 - * dasdaaaa - dasdzzzz : 456976 devices, added up = 475252 - */ - len = sprintf(gdp->disk_name, "dasd"); - if (base->devindex > 25) { - if (base->devindex > 701) { - if (base->devindex > 18277) - len += sprintf(gdp->disk_name + len, "%c", - 'a'+(((base->devindex-18278) - /17576)%26)); - len += sprintf(gdp->disk_name + len, "%c", - 'a'+(((base->devindex-702)/676)%26)); - } - len += sprintf(gdp->disk_name + len, "%c", - 'a'+(((base->devindex-26)/26)%26)); + rc = dasd_name_format("dasd", devindex, gdp->disk_name, sizeof(gdp->disk_name)); + if (rc) { + DBF_DEV_EVENT(DBF_ERR, block->base, + "setting disk name failed, rc %d", rc); + dasd_gd_free(gdp); + return rc; } - len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26)); if (base->features & DASD_FEATURE_READONLY || test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) @@ -109,14 +132,22 @@ int dasd_gendisk_alloc(struct dasd_block *block) } /* + * Free gendisk structure + */ +static void dasd_gd_free(struct gendisk *gd) +{ + del_gendisk(gd); + gd->private_data = NULL; + put_disk(gd); +} + +/* * Unregister and free gendisk structure for device. */ void dasd_gendisk_free(struct dasd_block *block) { if (block->gdp) { - del_gendisk(block->gdp); - block->gdp->private_data = NULL; - put_disk(block->gdp); + dasd_gd_free(block->gdp); block->gdp = NULL; blk_mq_free_tag_set(&block->tag_set); } @@ -127,15 +158,15 @@ void dasd_gendisk_free(struct dasd_block *block) */ int dasd_scan_partitions(struct dasd_block *block) { - struct block_device *bdev; + struct file *bdev_file; int rc; - bdev = blkdev_get_by_dev(disk_devt(block->gdp), BLK_OPEN_READ, NULL, - NULL); - if (IS_ERR(bdev)) { + bdev_file = bdev_file_open_by_dev(disk_devt(block->gdp), BLK_OPEN_READ, + NULL, NULL); + if (IS_ERR(bdev_file)) { DBF_DEV_EVENT(DBF_ERR, block->base, "scan partitions error, blkdev_get returned %ld", - PTR_ERR(bdev)); + PTR_ERR(bdev_file)); return -ENODEV; } @@ -147,16 +178,15 @@ int dasd_scan_partitions(struct dasd_block *block) "scan partitions error, rc %d", rc); /* - * Since the matching blkdev_put call to the blkdev_get in - * this function is not called before dasd_destroy_partitions - * the offline open_count limit needs to be increased from - * 0 to 1. This is done by setting device->bdev (see - * dasd_generic_set_offline). As long as the partition - * detection is running no offline should be allowed. That - * is why the assignment to device->bdev is done AFTER - * the BLKRRPART ioctl. + * Since the matching fput() call to the + * bdev_file_open_by_path() in this function is not called before + * dasd_destroy_partitions the offline open_count limit needs to be + * increased from 0 to 1. This is done by setting device->bdev_file + * (see dasd_generic_set_offline). As long as the partition detection + * is running no offline should be allowed. That is why the assignment + * to block->bdev_file is done AFTER the BLKRRPART ioctl. */ - block->bdev = bdev; + block->bdev_file = bdev_file; return 0; } @@ -166,21 +196,21 @@ int dasd_scan_partitions(struct dasd_block *block) */ void dasd_destroy_partitions(struct dasd_block *block) { - struct block_device *bdev; + struct file *bdev_file; /* - * Get the bdev pointer from the device structure and clear - * device->bdev to lower the offline open_count limit again. + * Get the bdev_file pointer from the device structure and clear + * device->bdev_file to lower the offline open_count limit again. */ - bdev = block->bdev; - block->bdev = NULL; + bdev_file = block->bdev_file; + block->bdev_file = NULL; - mutex_lock(&bdev->bd_disk->open_mutex); - bdev_disk_changed(bdev->bd_disk, true); - mutex_unlock(&bdev->bd_disk->open_mutex); + mutex_lock(&file_bdev(bdev_file)->bd_disk->open_mutex); + bdev_disk_changed(file_bdev(bdev_file)->bd_disk, true); + mutex_unlock(&file_bdev(bdev_file)->bd_disk->open_mutex); /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */ - blkdev_put(bdev, NULL); + fput(bdev_file); } int dasd_gendisk_init(void) diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 0aa56351da72..81cfb5c89681 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -113,9 +113,6 @@ do { \ __dev_id.ssid, __dev_id.devno, d_data); \ } while (0) -/* limit size for an errorstring */ -#define ERRORLENGTH 30 - /* definition of dbf debug levels */ #define DBF_EMERG 0 /* system is unusable */ #define DBF_ALERT 1 /* action must be taken immediately */ @@ -126,32 +123,6 @@ do { \ #define DBF_INFO 6 /* informational */ #define DBF_DEBUG 6 /* debug-level messages */ -/* messages to be written via klogd and dbf */ -#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\ -do { \ - printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \ - dev_name(&d_device->cdev->dev), d_args); \ - DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \ -} while(0) - -#define MESSAGE(d_loglevel,d_string,d_args...)\ -do { \ - printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \ - DBF_EVENT(DBF_ALERT, d_string, d_args); \ -} while(0) - -/* messages to be written via klogd only */ -#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\ -do { \ - printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \ - dev_name(&d_device->cdev->dev), d_args); \ -} while(0) - -#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\ -do { \ - printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \ -} while(0) - /* Macro to calculate number of blocks per page */ #define BLOCKS_PER_PAGE(blksize) (PAGE_SIZE / blksize) @@ -225,7 +196,7 @@ struct dasd_ccw_req { * The following flags are used to suppress output of certain errors. */ #define DASD_CQR_SUPPRESS_NRF 4 /* Suppress 'No Record Found' error */ -#define DASD_CQR_SUPPRESS_FP 5 /* Suppress 'File Protected' error*/ +#define DASD_CQR_SUPPRESS_IT 5 /* Suppress 'Invalid Track' error*/ #define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */ #define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */ @@ -259,6 +230,10 @@ struct dasd_uid { char vduit[33]; }; +#define DASD_UID_STRLEN ( /* vendor */ 3 + 1 + /* serial */ 14 + 1 + \ + /* SSID */ 4 + 1 + /* unit addr */ 2 + 1 + \ + /* vduit */ 32 + 1) + /* * PPRC Status data */ @@ -279,7 +254,7 @@ struct dasd_pprc_dev_info { __u8 secondary; /* 7 Secondary device address */ __u16 pprc_id; /* 8-9 Peer-to-Peer Remote Copy ID */ __u8 reserved2[12]; /* 10-21 reserved */ - __u16 prim_cu_ssid; /* 22-23 Pimary Control Unit SSID */ + __u16 prim_cu_ssid; /* 22-23 Primary Control Unit SSID */ __u8 reserved3[12]; /* 24-35 reserved */ __u16 sec_cu_ssid; /* 36-37 Secondary Control Unit SSID */ __u8 reserved4[90]; /* 38-127 reserved */ @@ -318,6 +293,7 @@ struct dasd_discipline { struct module *owner; char ebcname[8]; /* a name used for tagging and printks */ char name[8]; /* a name used for tagging and printks */ + bool has_discard; struct list_head list; /* used for list of disciplines */ @@ -356,10 +332,7 @@ struct dasd_discipline { int (*online_to_ready) (struct dasd_device *); int (*basic_to_known)(struct dasd_device *); - /* - * Initialize block layer request queue. - */ - void (*setup_blk_queue)(struct dasd_block *); + unsigned int (*max_sectors)(struct dasd_block *); /* (struct dasd_device *); * Device operation functions. build_cp creates a ccw chain for * a block device request, start_io starts the request and @@ -646,7 +619,7 @@ struct dasd_block { struct gendisk *gdp; spinlock_t request_queue_lock; struct blk_mq_tag_set tag_set; - struct block_device *bdev; + struct file *bdev_file; atomic_t open_count; unsigned long blocks; /* size of volume in blocks */ diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index d55862605b82..f883990be626 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -10,10 +10,8 @@ * i/o controls for the dasd driver. */ -#define KMSG_COMPONENT "dasd" - #include <linux/interrupt.h> -#include <linux/compat.h> +#include <linux/export.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/blkpg.h> @@ -24,12 +22,8 @@ #include <linux/uaccess.h> #include <linux/dasd_mod.h> -/* This is ugly... */ -#define PRINTK_HEADER "dasd_ioctl:" - #include "dasd_int.h" - static int dasd_ioctl_api_version(void __user *argp) { @@ -221,7 +215,7 @@ dasd_format(struct dasd_block *block, struct format_data_t *fdata) * enabling the device later. */ if (fdata->start_unit == 0) { - block->gdp->part0->bd_inode->i_blkbits = + block->gdp->part0->bd_mapping->host->i_blkbits = blksize_bits(fdata->blksize); } @@ -537,7 +531,7 @@ static int __dasd_ioctl_information(struct dasd_block *block, * This must be hidden from user-space. */ dasd_info->open_count = atomic_read(&block->open_count); - if (!block->bdev) + if (!block->bdev_file) dasd_info->open_count++; /* @@ -621,10 +615,7 @@ int dasd_ioctl(struct block_device *bdev, blk_mode_t mode, void __user *argp; int rc; - if (is_compat_task()) - argp = compat_ptr(arg); - else - argp = (void __user *)arg; + argp = (void __user *)arg; if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) return -EINVAL; diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 62a859ea67f8..48e12e81df00 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -11,8 +11,6 @@ * */ -#define KMSG_COMPONENT "dasd" - #include <linux/ctype.h> #include <linux/slab.h> #include <linux/string.h> @@ -23,9 +21,6 @@ #include <asm/debug.h> #include <linux/uaccess.h> -/* This is ugly... */ -#define PRINTK_HEADER "dasd_proc:" - #include "dasd_int.h" static struct proc_dir_entry *dasd_proc_root_entry = NULL; @@ -355,6 +350,7 @@ dasd_proc_init(void) remove_proc_entry("devices", dasd_proc_root_entry); out_nodevices: remove_proc_entry("dasd", NULL); + dasd_proc_root_entry = NULL; out_nodasd: return -ENOENT; } @@ -362,7 +358,11 @@ dasd_proc_init(void) void dasd_proc_exit(void) { + if (!dasd_proc_root_entry) + return; + remove_proc_entry("devices", dasd_proc_root_entry); remove_proc_entry("statistics", dasd_proc_root_entry); remove_proc_entry("dasd", NULL); + dasd_proc_root_entry = NULL; } diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 06bcb6c78909..38e1df8f8a82 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -5,8 +5,7 @@ * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer */ -#define KMSG_COMPONENT "dcssblk" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "dcssblk: " fmt #include <linux/module.h> #include <linux/moduleparam.h> @@ -17,7 +16,6 @@ #include <linux/blkdev.h> #include <linux/completion.h> #include <linux/interrupt.h> -#include <linux/pfn_t.h> #include <linux/uio.h> #include <linux/dax.h> #include <linux/io.h> @@ -33,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk); static void dcssblk_submit_bio(struct bio *bio); static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, void **kaddr, - pfn_t *pfn); + unsigned long *pfn); static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; @@ -80,6 +78,8 @@ struct dcssblk_dev_info { int num_of_segments; struct list_head seg_list; struct dax_device *dax_dev; + struct dev_pagemap pgmap; + void *pgmap_addr; }; struct segment_info { @@ -314,7 +314,7 @@ dcssblk_load_segment(char *name, struct segment_info **seg_info) if (*seg_info == NULL) return -ENOMEM; - strcpy((*seg_info)->segment_name, name); + strscpy((*seg_info)->segment_name, name); /* load the segment */ rc = segment_load(name, SEGMENT_SHARED, @@ -339,7 +339,7 @@ dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf struct dcssblk_dev_info *dev_info; dev_info = container_of(dev, struct dcssblk_dev_info, dev); - return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n"); + return sysfs_emit(buf, dev_info->is_shared ? "1\n" : "0\n"); } static ssize_t @@ -411,13 +411,15 @@ removeseg: segment_unload(entry->segment_name); } list_del(&dev_info->lh); + up_write(&dcssblk_devices_sem); dax_remove_host(dev_info->gd); kill_dax(dev_info->dax_dev); put_dax(dev_info->dax_dev); + if (dev_info->pgmap_addr) + devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap); del_gendisk(dev_info->gd); put_disk(dev_info->gd); - up_write(&dcssblk_devices_sem); if (device_remove_file_self(dev, attr)) { device_unregister(dev); @@ -444,7 +446,7 @@ dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf) struct dcssblk_dev_info *dev_info; dev_info = container_of(dev, struct dcssblk_dev_info, dev); - return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n"); + return sysfs_emit(buf, dev_info->save_pending ? "1\n" : "0\n"); } static ssize_t @@ -506,21 +508,15 @@ static ssize_t dcssblk_seglist_show(struct device *dev, struct device_attribute *attr, char *buf) { - int i; - struct dcssblk_dev_info *dev_info; struct segment_info *entry; + int i; + i = 0; down_read(&dcssblk_devices_sem); dev_info = container_of(dev, struct dcssblk_dev_info, dev); - i = 0; - buf[0] = '\0'; - list_for_each_entry(entry, &dev_info->seg_list, lh) { - strcpy(&buf[i], entry->segment_name); - i += strlen(entry->segment_name); - buf[i] = '\n'; - i++; - } + list_for_each_entry(entry, &dev_info->seg_list, lh) + i += sysfs_emit_at(buf, i, "%s\n", entry->segment_name); up_read(&dcssblk_devices_sem); return i; } @@ -540,16 +536,33 @@ static const struct attribute_group *dcssblk_dev_attr_groups[] = { NULL, }; +static int dcssblk_setup_dax(struct dcssblk_dev_info *dev_info) +{ + struct dax_device *dax_dev; + + dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops); + if (IS_ERR(dax_dev)) + return PTR_ERR(dax_dev); + set_dax_synchronous(dax_dev); + dev_info->dax_dev = dax_dev; + return dax_add_host(dev_info->dax_dev, dev_info->gd); +} + /* * device attribute for adding devices */ static ssize_t dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct queue_limits lim = { + .logical_block_size = 4096, + .features = BLK_FEAT_DAX, + }; int rc, i, j, num_of_segments; struct dcssblk_dev_info *dev_info; struct segment_info *seg_info, *temp; char *local_buf; + void *addr; unsigned long seg_byte_size; dev_info = NULL; @@ -599,7 +612,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char rc = -ENOMEM; goto out; } - strcpy(dev_info->segment_name, local_buf); + strscpy(dev_info->segment_name, local_buf); dev_info->segment_type = seg_info->segment_type; INIT_LIST_HEAD(&dev_info->seg_list); } @@ -629,9 +642,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->dev.release = dcssblk_release_segment; dev_info->dev.groups = dcssblk_dev_attr_groups; INIT_LIST_HEAD(&dev_info->lh); - dev_info->gd = blk_alloc_disk(NUMA_NO_NODE); - if (dev_info->gd == NULL) { - rc = -ENOMEM; + dev_info->gd = blk_alloc_disk(&lim, NUMA_NO_NODE); + if (IS_ERR(dev_info->gd)) { + rc = PTR_ERR(dev_info->gd); goto seg_list_del; } dev_info->gd->major = dcssblk_major; @@ -639,8 +652,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->gd->fops = &dcssblk_devops; dev_info->gd->private_data = dev_info; dev_info->gd->flags |= GENHD_FL_NO_PART; - blk_queue_logical_block_size(dev_info->gd->queue, 4096); - blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors @@ -662,8 +673,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char rc = dcssblk_assign_free_minor(dev_info); if (rc) goto release_gd; - sprintf(dev_info->gd->disk_name, "dcssblk%d", - dev_info->gd->first_minor); + scnprintf(dev_info->gd->disk_name, sizeof(dev_info->gd->disk_name), + "dcssblk%d", dev_info->gd->first_minor); list_add_tail(&dev_info->lh, &dcssblk_devices); if (!try_module_get(THIS_MODULE)) { @@ -677,16 +688,26 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char if (rc) goto put_dev; - dev_info->dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops); - if (IS_ERR(dev_info->dax_dev)) { - rc = PTR_ERR(dev_info->dax_dev); - dev_info->dax_dev = NULL; - goto put_dev; + if (!IS_ALIGNED(dev_info->start, SUBSECTION_SIZE) || + !IS_ALIGNED(dev_info->end + 1, SUBSECTION_SIZE)) { + pr_info("DCSS %s is not aligned to %lu bytes, DAX support disabled\n", + local_buf, SUBSECTION_SIZE); + } else { + dev_info->pgmap.type = MEMORY_DEVICE_FS_DAX; + dev_info->pgmap.range.start = dev_info->start; + dev_info->pgmap.range.end = dev_info->end; + dev_info->pgmap.nr_range = 1; + addr = devm_memremap_pages(&dev_info->dev, &dev_info->pgmap); + if (IS_ERR(addr)) { + rc = PTR_ERR(addr); + goto put_dev; + } + dev_info->pgmap_addr = addr; + rc = dcssblk_setup_dax(dev_info); + if (rc) + goto out_dax; + pr_info("DAX support enabled for DCSS %s\n", local_buf); } - set_dax_synchronous(dev_info->dax_dev); - rc = dax_add_host(dev_info->dax_dev, dev_info->gd); - if (rc) - goto out_dax; get_device(&dev_info->dev); rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL); @@ -713,6 +734,8 @@ out_dax_host: out_dax: kill_dax(dev_info->dax_dev); put_dax(dev_info->dax_dev); + if (dev_info->pgmap_addr) + devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap); put_dev: list_del(&dev_info->lh); put_disk(dev_info->gd); @@ -790,18 +813,19 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch } list_del(&dev_info->lh); + /* unload all related segments */ + list_for_each_entry(entry, &dev_info->seg_list, lh) + segment_unload(entry->segment_name); + up_write(&dcssblk_devices_sem); + dax_remove_host(dev_info->gd); kill_dax(dev_info->dax_dev); put_dax(dev_info->dax_dev); + if (dev_info->pgmap_addr) + devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap); del_gendisk(dev_info->gd); put_disk(dev_info->gd); - /* unload all related segments */ - list_for_each_entry(entry, &dev_info->seg_list, lh) - segment_unload(entry->segment_name); - - up_write(&dcssblk_devices_sem); - device_unregister(&dev_info->dev); put_device(&dev_info->dev); @@ -911,17 +935,16 @@ fail: static long __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) + long nr_pages, void **kaddr, unsigned long *pfn) { resource_size_t offset = pgoff * PAGE_SIZE; unsigned long dev_sz; dev_sz = dev_info->end - dev_info->start + 1; if (kaddr) - *kaddr = (void *) dev_info->start + offset; + *kaddr = __va(dev_info->start + offset); if (pfn) - *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), - PFN_DEV|PFN_SPECIAL); + *pfn = PFN_DOWN(dev_info->start + offset); return (dev_sz - offset) / PAGE_SIZE; } @@ -929,7 +952,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, void **kaddr, - pfn_t *pfn) + unsigned long *pfn) { struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev); @@ -1030,4 +1053,5 @@ MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, " "the contiguous segments - \n" "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\""); +MODULE_DESCRIPTION("S/390 block driver for DCSS memory"); MODULE_LICENSE("GPL"); diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 3a9cc8a4a230..04e84f45dcc9 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -6,8 +6,7 @@ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> */ -#define KMSG_COMPONENT "scm_block" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "scm_block: " fmt #include <linux/interrupt.h> #include <linux/spinlock.h> @@ -17,6 +16,7 @@ #include <linux/blk-mq.h> #include <linux/slab.h> #include <linux/list.h> +#include <linux/io.h> #include <asm/eadm.h> #include "scm_blk.h" @@ -130,7 +130,7 @@ static void scm_request_done(struct scm_request *scmrq) for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { msb = &scmrq->aob->msb[i]; - aidaw = msb->data_addr; + aidaw = (u64)dma64_to_virt(msb->data_addr); if ((msb->flags & MSB_FLAG_IDA) && aidaw && IS_ALIGNED(aidaw, PAGE_SIZE)) @@ -195,12 +195,12 @@ static int scm_request_prepare(struct scm_request *scmrq) msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE; msb->flags |= MSB_FLAG_IDA; - msb->data_addr = (u64) aidaw; + msb->data_addr = virt_to_dma64(aidaw); rq_for_each_segment(bv, req, iter) { WARN_ON(bv.bv_offset); msb->blk_count += bv.bv_len >> 12; - aidaw->data_addr = (u64) page_address(bv.bv_page); + aidaw->data_addr = virt_to_dma64(page_address(bv.bv_page)); aidaw++; } @@ -434,10 +434,16 @@ static const struct blk_mq_ops scm_mq_ops = { int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) { - unsigned int devindex, nr_max_blk; - struct request_queue *rq; + struct queue_limits lim = { + .logical_block_size = 1 << 12, + }; + unsigned int devindex; int len, ret; + lim.max_segments = min(scmdev->nr_max_block, + (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); + lim.max_hw_sectors = lim.max_segments << 3; /* 8 * 512 = blk_size */ + devindex = atomic_inc_return(&nr_devices) - 1; /* scma..scmz + scmaa..scmzz */ if (devindex > 701) { @@ -454,28 +460,17 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) bdev->tag_set.cmd_size = sizeof(blk_status_t); bdev->tag_set.nr_hw_queues = nr_requests; bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; - bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; bdev->tag_set.numa_node = NUMA_NO_NODE; ret = blk_mq_alloc_tag_set(&bdev->tag_set); if (ret) goto out; - bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, scmdev); + bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, &lim, scmdev); if (IS_ERR(bdev->gendisk)) { ret = PTR_ERR(bdev->gendisk); goto out_tag; } - rq = bdev->rq = bdev->gendisk->queue; - nr_max_blk = min(scmdev->nr_max_block, - (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); - - blk_queue_logical_block_size(rq, 1 << 12); - blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ - blk_queue_max_segments(rq, nr_max_blk); - blk_queue_flag_set(QUEUE_FLAG_NONROT, rq); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq); - bdev->gendisk->private_data = scmdev; bdev->gendisk->fops = &scm_blk_devops; bdev->gendisk->major = scm_major; diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c index 69a845eb8b1f..6cffbbe83f89 100644 --- a/drivers/s390/block/scm_drv.c +++ b/drivers/s390/block/scm_drv.c @@ -6,8 +6,7 @@ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> */ -#define KMSG_COMPONENT "scm_block" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "scm_block: " fmt #include <linux/module.h> #include <linux/slab.h> diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig index 8a03af5ee5b3..80c4e5101c97 100644 --- a/drivers/s390/char/Kconfig +++ b/drivers/s390/char/Kconfig @@ -96,7 +96,7 @@ config SCLP_OFB config S390_UV_UAPI def_tristate m prompt "Ultravisor userspace API" - depends on S390 && (KVM || PROTECTED_VIRTUALIZATION_GUEST) + depends on S390 help Selecting exposes parts of the UV interface to userspace by providing a misc character device at /dev/uv. diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index b0f6b3201636..dcbd51152ee3 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -21,6 +21,7 @@ obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ sclp_early.o sclp_early_core.o sclp_sd.o +obj-$(CONFIG_MEMORY_HOTPLUG) += sclp_mem.o obj-$(CONFIG_TN3270) += raw3270.o con3270.o obj-$(CONFIG_TN3270_FS) += fs3270.o @@ -32,7 +33,7 @@ obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o obj-$(CONFIG_PCI) += sclp_pci.o -obj-$(subst m,y,$(CONFIG_ZCRYPT)) += sclp_ap.o +obj-$(subst m,y,$(CONFIG_AP)) += sclp_ap.o obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o obj-$(CONFIG_VMCP) += vmcp.o diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index a1fef666c9b0..56e43d43c713 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -23,6 +23,7 @@ #include <linux/reboot.h> #include <linux/serial.h> /* ASYNC_* flags */ #include <linux/slab.h> +#include <asm/machine.h> #include <asm/ccwdev.h> #include <asm/cio.h> #include <linux/io.h> @@ -79,8 +80,8 @@ struct raw3215_info { struct ccw_device *cdev; /* device for tty driver */ spinlock_t *lock; /* pointer to irq lock */ int flags; /* state flags */ - char *buffer; /* pointer to output buffer */ - char *inbuf; /* pointer to input buffer */ + u8 *buffer; /* pointer to output buffer */ + u8 *inbuf; /* pointer to input buffer */ int head; /* first free byte in output buffer */ int count; /* number of bytes in output buffer */ int written; /* number of bytes in write requests */ @@ -89,7 +90,6 @@ struct raw3215_info { wait_queue_head_t empty_wait; /* wait queue for flushing */ struct timer_list timer; /* timer for delayed output */ int line_pos; /* position on the line (for tabs) */ - char ubuffer[80]; /* copy_from_user buffer */ }; /* array of 3215 devices structures */ @@ -160,7 +160,7 @@ static void raw3215_mk_read_req(struct raw3215_info *raw) ccw->cmd_code = 0x0A; /* read inquiry */ ccw->flags = 0x20; /* ignore incorrect length */ ccw->count = 160; - ccw->cda = (__u32)__pa(raw->inbuf); + ccw->cda = virt_to_dma32(raw->inbuf); } /* @@ -219,7 +219,7 @@ static void raw3215_mk_write_req(struct raw3215_info *raw) ccw[-1].flags |= 0x40; /* use command chaining */ ccw->cmd_code = 0x01; /* write, auto carrier return */ ccw->flags = 0x20; /* ignore incorrect length ind. */ - ccw->cda = (__u32)__pa(raw->buffer + ix); + ccw->cda = virt_to_dma32(raw->buffer + ix); count = len; if (ix + count > RAW3215_BUFFER_SIZE) count = RAW3215_BUFFER_SIZE - ix; @@ -284,7 +284,7 @@ static void raw3215_start_io(struct raw3215_info *raw) */ static void raw3215_timeout(struct timer_list *t) { - struct raw3215_info *raw = from_timer(raw, t, timer); + struct raw3215_info *raw = timer_container_of(raw, t, timer); unsigned long flags; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); @@ -523,12 +523,14 @@ static unsigned int raw3215_make_room(struct raw3215_info *raw, * string without blocking. * Return value is the number of bytes copied. */ -static unsigned int raw3215_addtext(const char *str, unsigned int length, +static unsigned int raw3215_addtext(const u8 *str, size_t length, struct raw3215_info *raw, int opmode, unsigned int todrop) { - unsigned int c, ch, i, blanks, expanded_size = 0; + unsigned int i, blanks, expanded_size = 0; unsigned int column = raw->line_pos; + size_t c; + u8 ch; if (opmode == RAW3215_COUNT) todrop = 0; @@ -559,7 +561,7 @@ static unsigned int raw3215_addtext(const char *str, unsigned int length, if (todrop && expanded_size < todrop) /* Drop head data */ continue; for (i = 0; i < blanks; i++) { - raw->buffer[raw->head] = (char)_ascebc[(int)ch]; + raw->buffer[raw->head] = _ascebc[ch]; raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1); raw->count++; } @@ -571,8 +573,8 @@ static unsigned int raw3215_addtext(const char *str, unsigned int length, /* * String write routine for 3215 devices */ -static void raw3215_write(struct raw3215_info *raw, const char *str, - unsigned int length) +static void raw3215_write(struct raw3215_info *raw, const u8 *str, + size_t length) { unsigned int count, avail; unsigned long flags; @@ -597,7 +599,7 @@ static void raw3215_write(struct raw3215_info *raw, const char *str, /* * Put character routine for 3215 devices */ -static void raw3215_putchar(struct raw3215_info *raw, unsigned char ch) +static void raw3215_putchar(struct raw3215_info *raw, u8 ch) { raw3215_write(raw, &ch, 1); } @@ -802,7 +804,6 @@ static struct attribute *con3215_drv_attrs[] = { static struct attribute_group con3215_drv_attr_group = { .attrs = con3215_drv_attrs, - NULL, }; static const struct attribute_group *con3215_drv_attr_groups[] = { @@ -824,12 +825,10 @@ static struct ccw_driver raw3215_ccw_driver = { .int_class = IRQIO_C15, }; -static void handle_write(struct raw3215_info *raw, const char *str, int count) +static void handle_write(struct raw3215_info *raw, const u8 *str, size_t count) { - int i; - while (count > 0) { - i = min_t(int, count, RAW3215_BUFFER_SIZE - 1); + size_t i = min_t(size_t, count, RAW3215_BUFFER_SIZE - 1); raw3215_write(raw, str, i); count -= i; str += i; @@ -909,7 +908,7 @@ static int __init con3215_init(void) return -ENODEV; /* Set the console mode for VM */ - if (MACHINE_IS_VM) { + if (machine_is_vm()) { cpcmd("TERM CONMODE 3215", NULL, 0, NULL); cpcmd("TERM AUTOCR OFF", NULL, 0, NULL); } @@ -1021,8 +1020,8 @@ static unsigned int tty3215_write_room(struct tty_struct *tty) /* * String write routine for 3215 ttys */ -static int tty3215_write(struct tty_struct *tty, - const unsigned char *buf, int count) +static ssize_t tty3215_write(struct tty_struct *tty, const u8 *buf, + size_t count) { handle_write(tty->driver_data, buf, count); return count; @@ -1031,7 +1030,7 @@ static int tty3215_write(struct tty_struct *tty, /* * Put character routine for 3215 ttys */ -static int tty3215_put_char(struct tty_struct *tty, unsigned char ch) +static int tty3215_put_char(struct tty_struct *tty, u8 ch) { struct raw3215_info *raw = tty->driver_data; diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index d9983550062d..4a7c084e68a6 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -21,8 +21,8 @@ #include <linux/reboot.h> #include <linux/slab.h> #include <linux/memblock.h> -#include <linux/compat.h> +#include <asm/machine.h> #include <asm/ccwdev.h> #include <asm/cio.h> #include <asm/ebcdic.h> @@ -54,7 +54,7 @@ struct tty3270_attribute { }; struct tty3270_cell { - unsigned char character; + u8 character; struct tty3270_attribute attributes; }; @@ -101,6 +101,7 @@ struct tty3270 { /* Input stuff. */ char *prompt; /* Output string for input area. */ + size_t prompt_sz; /* Size of output string. */ char *input; /* Input string for read request. */ struct raw3270_request *read; /* Single read request. */ struct raw3270_request *kreset; /* Single keyboard reset request. */ @@ -123,7 +124,7 @@ struct tty3270 { /* Character array for put_char/flush_chars. */ unsigned int char_count; - char char_buf[TTY3270_CHAR_BUF_SIZE]; + u8 char_buf[TTY3270_CHAR_BUF_SIZE]; }; /* tty3270->update_flags. See tty3270_update for details. */ @@ -205,7 +206,7 @@ static int tty3270_input_size(int cols) static void tty3270_update_prompt(struct tty3270 *tp, char *input) { - strcpy(tp->prompt, input); + strscpy(tp->prompt, input, tp->prompt_sz); tp->update_flags |= TTY_UPDATE_INPUT; tty3270_set_timer(tp, 1); } @@ -523,12 +524,12 @@ static void tty3270_update_lines_all(struct tty3270 *tp, struct raw3270_request */ static void tty3270_update(struct timer_list *t) { - struct tty3270 *tp = from_timer(tp, t, timer); + struct tty3270 *tp = timer_container_of(tp, t, timer); struct raw3270_request *wrq; u8 cmd = TC_WRITE; int rc, len; - wrq = xchg(&tp->write, 0); + wrq = xchg(&tp->write, NULL); if (!wrq) { tty3270_set_timer(tp, 1); return; @@ -746,7 +747,7 @@ static void tty3270_issue_read(struct tty3270 *tp, int lock) struct raw3270_request *rrq; int rc; - rrq = xchg(&tp->read, 0); + rrq = xchg(&tp->read, NULL); if (!rrq) /* Read already scheduled. */ return; @@ -792,7 +793,7 @@ static void tty3270_deactivate(struct raw3270_view *view) { struct tty3270 *tp = container_of(view, struct tty3270, view); - del_timer(&tp->timer); + timer_delete(&tp->timer); } static void tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) @@ -968,8 +969,7 @@ static void tty3270_resize(struct raw3270_view *view, char **old_rcl_lines, **new_rcl_lines; char *old_prompt, *new_prompt; char *old_input, *new_input; - struct tty_struct *tty; - struct winsize ws; + size_t prompt_sz; int new_allocated, old_allocated = tp->allocated_lines; if (old_model == new_model && @@ -981,10 +981,11 @@ static void tty3270_resize(struct raw3270_view *view, return; } - new_input = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL | GFP_DMA); + prompt_sz = tty3270_input_size(new_cols); + new_input = kzalloc(prompt_sz, GFP_KERNEL | GFP_DMA); if (!new_input) return; - new_prompt = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL); + new_prompt = kzalloc(prompt_sz, GFP_KERNEL); if (!new_prompt) goto out_input; screen = tty3270_alloc_screen(tp, new_rows, new_cols, &new_allocated); @@ -1009,6 +1010,7 @@ static void tty3270_resize(struct raw3270_view *view, old_rcl_lines = tp->rcl_lines; tp->input = new_input; tp->prompt = new_prompt; + tp->prompt_sz = prompt_sz; tp->rcl_lines = new_rcl_lines; tp->rcl_read_index = 0; tp->rcl_write_index = 0; @@ -1018,14 +1020,14 @@ static void tty3270_resize(struct raw3270_view *view, kfree(old_prompt); tty3270_free_recall(old_rcl_lines); tty3270_set_timer(tp, 1); - /* Informat tty layer about new size */ - tty = tty_port_tty_get(&tp->port); - if (!tty) - return; - ws.ws_row = tty3270_tty_rows(tp); - ws.ws_col = tp->view.cols; - tty_do_resize(tty, &ws); - tty_kref_put(tty); + /* Inform the tty layer about new size */ + scoped_guard(tty_port_tty, &tp->port) { + struct winsize ws = { + .ws_row = tty3270_tty_rows(tp), + .ws_col = tp->view.cols, + }; + tty_do_resize(scoped_tty(), &ws); + } return; out_screen: tty3270_free_screen(screen, new_rows); @@ -1059,7 +1061,7 @@ static void tty3270_free(struct raw3270_view *view) { struct tty3270 *tp = container_of(view, struct tty3270, view); - del_timer_sync(&tp->timer); + timer_delete_sync(&tp->timer); tty3270_free_screen(tp->screen, tp->allocated_lines); free_page((unsigned long)tp->converted_line); kfree(tp->input); @@ -1095,6 +1097,7 @@ static int tty3270_create_view(int index, struct tty3270 **newtp) { struct tty3270 *tp; + size_t prompt_sz; int rc; if (tty3270_max_index < index + 1) @@ -1124,17 +1127,19 @@ tty3270_create_view(int index, struct tty3270 **newtp) goto out_free_screen; } - tp->input = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL | GFP_DMA); + prompt_sz = tty3270_input_size(tp->view.cols); + tp->input = kzalloc(prompt_sz, GFP_KERNEL | GFP_DMA); if (!tp->input) { rc = -ENOMEM; goto out_free_converted_line; } - tp->prompt = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL); + tp->prompt = kzalloc(prompt_sz, GFP_KERNEL); if (!tp->prompt) { rc = -ENOMEM; goto out_free_input; } + tp->prompt_sz = prompt_sz; tp->rcl_lines = tty3270_alloc_recall(tp->view.cols); if (!tp->rcl_lines) { @@ -1255,7 +1260,7 @@ static unsigned int tty3270_write_room(struct tty_struct *tty) * Insert character into the screen at the current position with the * current color and highlight. This function does NOT do cursor movement. */ -static void tty3270_put_character(struct tty3270 *tp, char ch) +static void tty3270_put_character(struct tty3270 *tp, u8 ch) { struct tty3270_line *line; struct tty3270_cell *cell; @@ -1561,7 +1566,7 @@ static void tty3270_goto_xy(struct tty3270 *tp, int cx, int cy) * Pn is a numeric parameter, a string of zero or more decimal digits. * Ps is a selective parameter. */ -static void tty3270_escape_sequence(struct tty3270 *tp, char ch) +static void tty3270_escape_sequence(struct tty3270 *tp, u8 ch) { enum { ES_NORMAL, ES_ESC, ES_SQUARE, ES_PAREN, ES_GETPARS }; @@ -1656,7 +1661,7 @@ static void tty3270_escape_sequence(struct tty3270 *tp, char ch) else if (tp->esc_par[0] == 6) { /* Cursor report. */ char buf[40]; - sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1); + scnprintf(buf, sizeof(buf), "\033[%d;%dR", tp->cy + 1, tp->cx + 1); kbd_puts_queue(&tp->port, buf); } return; @@ -1726,7 +1731,7 @@ static void tty3270_escape_sequence(struct tty3270 *tp, char ch) * String write routine for 3270 ttys */ static void tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty, - const unsigned char *buf, int count) + const u8 *buf, size_t count) { int i_msg, i; @@ -1803,8 +1808,8 @@ static void tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty, /* * String write routine for 3270 ttys */ -static int tty3270_write(struct tty_struct *tty, - const unsigned char *buf, int count) +static ssize_t tty3270_write(struct tty_struct *tty, const u8 *buf, + size_t count) { struct tty3270 *tp; @@ -1822,7 +1827,7 @@ static int tty3270_write(struct tty_struct *tty, /* * Put single characters to the ttys character buffer */ -static int tty3270_put_char(struct tty_struct *tty, unsigned char ch) +static int tty3270_put_char(struct tty_struct *tty, u8 ch) { struct tty3270 *tp; @@ -1941,21 +1946,6 @@ static int tty3270_ioctl(struct tty_struct *tty, unsigned int cmd, return kbd_ioctl(tp->kbd, cmd, arg); } -#ifdef CONFIG_COMPAT -static long tty3270_compat_ioctl(struct tty_struct *tty, - unsigned int cmd, unsigned long arg) -{ - struct tty3270 *tp; - - tp = tty->driver_data; - if (!tp) - return -ENODEV; - if (tty_io_error(tty)) - return -EIO; - return kbd_ioctl(tp->kbd, cmd, (unsigned long)compat_ptr(arg)); -} -#endif - static const struct tty_operations tty3270_ops = { .install = tty3270_install, .cleanup = tty3270_cleanup, @@ -1970,9 +1960,6 @@ static const struct tty_operations tty3270_ops = { .hangup = tty3270_hangup, .wait_until_sent = tty3270_wait_until_sent, .ioctl = tty3270_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = tty3270_compat_ioctl, -#endif .set_termios = tty3270_set_termios }; @@ -2052,7 +2039,7 @@ con3270_write(struct console *co, const char *str, unsigned int count) { struct tty3270 *tp = co->data; unsigned long flags; - char c; + u8 c; spin_lock_irqsave(&tp->view.lock, flags); while (count--) { @@ -2156,7 +2143,7 @@ con3270_init(void) return -ENODEV; /* Set the console mode for VM */ - if (MACHINE_IS_VM) { + if (machine_is_vm()) { cpcmd("TERM CONMODE 3270", NULL, 0, NULL); cpcmd("TERM AUTOCR OFF", NULL, 0, NULL); } @@ -2185,6 +2172,7 @@ con3270_init(void) console_initcall(con3270_init); #endif +MODULE_DESCRIPTION("IBM/3270 Driver - tty functions"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(IBM_TTY3270_MAJOR); diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c index 65c7f2d565d8..a1e110c96f74 100644 --- a/drivers/s390/char/diag_ftp.c +++ b/drivers/s390/char/diag_ftp.c @@ -7,8 +7,7 @@ * */ -#define KMSG_COMPONENT "hmcdrv" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "hmcdrv: " fmt #include <linux/kernel.h> #include <linux/mm.h> @@ -16,7 +15,7 @@ #include <linux/wait.h> #include <linux/string.h> #include <asm/asm-extable.h> -#include <asm/ctl_reg.h> +#include <asm/ctlreg.h> #include <asm/diag.h> #include "hmcdrv_ftp.h" @@ -106,7 +105,7 @@ static int diag_ftp_2c4(struct diag_ftp_ldfpl *fpl, int rc; diag_stat_inc(DIAG_STAT_X2C4); - asm volatile( + asm_inline volatile( " diag %[addr],%[cmd],0x2c4\n" "0: j 2f\n" "1: la %[rc],%[err]\n" @@ -159,7 +158,7 @@ ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize) goto out; } - len = strscpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident)); + len = strscpy(ldfpl->fident, ftp->fname); if (len < 0) { len = -EINVAL; goto out_free; diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 4f26b0a55620..73555dbe30d0 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -12,7 +12,6 @@ #include <linux/console.h> #include <linux/init.h> #include <linux/interrupt.h> -#include <linux/compat.h> #include <linux/sched/signal.h> #include <linux/module.h> #include <linux/list.h> @@ -126,7 +125,7 @@ static int fs3270_activate(struct raw3270_view *view) raw3270_request_set_cmd(fp->init, TC_EWRITEA); raw3270_request_set_idal(fp->init, fp->rdbuf); fp->init->rescnt = 0; - cp = fp->rdbuf->data[0]; + cp = dma64_to_virt(fp->rdbuf->data[0]); if (fp->rdbuf_size == 0) { /* No saved buffer. Just clear the screen. */ fp->init->ccw.count = 1; @@ -164,7 +163,7 @@ static void fs3270_save_callback(struct raw3270_request *rq, void *data) fp = (struct fs3270 *)rq->view; /* Correct idal buffer element 0 address. */ - fp->rdbuf->data[0] -= 5; + fp->rdbuf->data[0] = dma64_add(fp->rdbuf->data[0], -5); fp->rdbuf->size += 5; /* @@ -202,7 +201,7 @@ static void fs3270_deactivate(struct raw3270_view *view) * room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence * in the activation command. */ - fp->rdbuf->data[0] += 5; + fp->rdbuf->data[0] = dma64_add(fp->rdbuf->data[0], 5); fp->rdbuf->size -= 5; raw3270_request_set_idal(fp->init, fp->rdbuf); fp->init->rescnt = 0; @@ -330,10 +329,7 @@ static long fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) fp = filp->private_data; if (!fp) return -ENODEV; - if (is_compat_task()) - argp = compat_ptr(arg); - else - argp = (char __user *)arg; + argp = (char __user *)arg; rc = 0; mutex_lock(&fs3270_mutex); switch (cmd) { @@ -512,22 +508,20 @@ static const struct file_operations fs3270_fops = { .read = fs3270_read, /* read */ .write = fs3270_write, /* write */ .unlocked_ioctl = fs3270_ioctl, /* ioctl */ - .compat_ioctl = fs3270_ioctl, /* ioctl */ .open = fs3270_open, /* open */ .release = fs3270_close, /* release */ - .llseek = no_llseek, }; static void fs3270_create_cb(int minor) { __register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops); - device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor), + device_create(&class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor), NULL, "3270/tub%d", minor); } static void fs3270_destroy_cb(int minor) { - device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor)); + device_destroy(&class3270, MKDEV(IBM_FS3270_MAJOR, minor)); __unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub"); } @@ -546,7 +540,7 @@ static int __init fs3270_init(void) rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops); if (rc) return rc; - device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0), + device_create(&class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0), NULL, "3270/tub"); raw3270_register_notifier(&fs3270_notifier); return 0; @@ -555,10 +549,11 @@ static int __init fs3270_init(void) static void __exit fs3270_exit(void) { raw3270_unregister_notifier(&fs3270_notifier); - device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0)); + device_destroy(&class3270, MKDEV(IBM_FS3270_MAJOR, 0)); __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270"); } +MODULE_DESCRIPTION("IBM/3270 Driver - fullscreen driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(IBM_FS3270_MAJOR); diff --git a/drivers/s390/char/hmcdrv_cache.c b/drivers/s390/char/hmcdrv_cache.c index 43df27ceec11..85fb689594ca 100644 --- a/drivers/s390/char/hmcdrv_cache.c +++ b/drivers/s390/char/hmcdrv_cache.c @@ -7,8 +7,7 @@ * */ -#define KMSG_COMPONENT "hmcdrv" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "hmcdrv: " fmt #include <linux/kernel.h> #include <linux/mm.h> diff --git a/drivers/s390/char/hmcdrv_dev.c b/drivers/s390/char/hmcdrv_dev.c index 8d50c894711f..04b938c5357f 100644 --- a/drivers/s390/char/hmcdrv_dev.c +++ b/drivers/s390/char/hmcdrv_dev.c @@ -14,8 +14,7 @@ * end read() the response. */ -#define KMSG_COMPONENT "hmcdrv" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "hmcdrv: " fmt #include <linux/kernel.h> #include <linux/module.h> @@ -186,9 +185,6 @@ static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence) if (pos < 0) return -EINVAL; - if (fp->f_pos != pos) - ++fp->f_version; - fp->f_pos = pos; return pos; } @@ -247,24 +243,17 @@ static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf, size_t len, loff_t *pos) { ssize_t retlen; + void *pdata; pr_debug("writing file '/dev/%pD' at pos. %lld with length %zd\n", fp, (long long) *pos, len); if (!fp->private_data) { /* first expect a cmd write */ - fp->private_data = kmalloc(len + 1, GFP_KERNEL); - - if (!fp->private_data) - return -ENOMEM; - - if (!copy_from_user(fp->private_data, ubuf, len)) { - ((char *)fp->private_data)[len] = '\0'; - return len; - } - - kfree(fp->private_data); - fp->private_data = NULL; - return -EFAULT; + pdata = memdup_user_nul(ubuf, len); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + fp->private_data = pdata; + return len; } retlen = hmcdrv_dev_transfer((char *) fp->private_data, diff --git a/drivers/s390/char/hmcdrv_ftp.c b/drivers/s390/char/hmcdrv_ftp.c index 02b6f394aec2..3312b2ac00a9 100644 --- a/drivers/s390/char/hmcdrv_ftp.c +++ b/drivers/s390/char/hmcdrv_ftp.c @@ -6,8 +6,7 @@ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com) */ -#define KMSG_COMPONENT "hmcdrv" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "hmcdrv: " fmt #include <linux/kernel.h> #include <linux/slab.h> @@ -17,6 +16,8 @@ #include <linux/ctype.h> #include <linux/crc16.h> +#include <asm/machine.h> + #include "hmcdrv_ftp.h" #include "hmcdrv_cache.h" #include "sclp_ftp.h" @@ -308,9 +309,9 @@ int hmcdrv_ftp_startup(void) mutex_lock(&hmcdrv_ftp_mutex); /* block transfers while start-up */ if (hmcdrv_ftp_refcnt == 0) { - if (MACHINE_IS_VM) + if (machine_is_vm()) hmcdrv_ftp_funcs = &hmcdrv_ftp_zvm; - else if (MACHINE_IS_LPAR || MACHINE_IS_KVM) + else if (machine_is_lpar() || machine_is_kvm()) hmcdrv_ftp_funcs = &hmcdrv_ftp_lpar; else rc = -EOPNOTSUPP; diff --git a/drivers/s390/char/hmcdrv_mod.c b/drivers/s390/char/hmcdrv_mod.c index 1447d0887225..b1cc5ba9fed8 100644 --- a/drivers/s390/char/hmcdrv_mod.c +++ b/drivers/s390/char/hmcdrv_mod.c @@ -6,8 +6,7 @@ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com) */ -#define KMSG_COMPONENT "hmcdrv" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "hmcdrv: " fmt #include <linux/kernel.h> #include <linux/module.h> diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 567aedc03c76..a45f07a2cc8f 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -7,6 +7,7 @@ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), */ +#include <linux/export.h> #include <linux/module.h> #include <linux/sched/signal.h> #include <linux/slab.h> diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 9fa92e45e0ee..3d84f84b4cbd 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -7,8 +7,7 @@ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> */ -#define KMSG_COMPONENT "monreader" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "monreader: " fmt #include <linux/module.h> #include <linux/moduleparam.h> @@ -24,6 +23,7 @@ #include <linux/slab.h> #include <net/iucv/iucv.h> #include <linux/uaccess.h> +#include <asm/machine.h> #include <asm/ebcdic.h> #include <asm/extmem.h> @@ -111,7 +111,7 @@ static inline unsigned long mon_mca_end(struct mon_msg *monmsg) static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index) { - return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index); + return *((u8 *)__va(mon_mca_start(monmsg)) + monmsg->mca_offset + index); } static inline u32 mon_mca_size(struct mon_msg *monmsg) @@ -121,12 +121,12 @@ static inline u32 mon_mca_size(struct mon_msg *monmsg) static inline u32 mon_rec_start(struct mon_msg *monmsg) { - return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4)); + return *((u32 *)(__va(mon_mca_start(monmsg)) + monmsg->mca_offset + 4)); } static inline u32 mon_rec_end(struct mon_msg *monmsg) { - return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8)); + return *((u32 *)(__va(mon_mca_start(monmsg)) + monmsg->mca_offset + 8)); } static int mon_check_mca(struct mon_msg *monmsg) @@ -392,8 +392,7 @@ static ssize_t mon_read(struct file *filp, char __user *data, mce_start = mon_mca_start(monmsg) + monmsg->mca_offset; if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) { count = min(count, (size_t) mce_start + 12 - monmsg->pos); - ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos, - count); + ret = copy_to_user(data, __va(monmsg->pos), count); if (ret) return -EFAULT; monmsg->pos += count; @@ -406,8 +405,7 @@ static ssize_t mon_read(struct file *filp, char __user *data, if (monmsg->pos <= mon_rec_end(monmsg)) { count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos + 1); - ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos, - count); + ret = copy_to_user(data, __va(monmsg->pos), count); if (ret) return -EFAULT; monmsg->pos += count; @@ -458,7 +456,7 @@ static int __init mon_init(void) { int rc; - if (!MACHINE_IS_VM) { + if (!machine_is_vm()) { pr_err("The z/VM *MONITOR record device driver cannot be " "loaded without z/VM\n"); return -ENODEV; diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index bc5193d81f9c..cf2e51061422 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -7,8 +7,7 @@ * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> */ -#define KMSG_COMPONENT "monwriter" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "monwriter: " fmt #include <linux/module.h> #include <linux/moduleparam.h> @@ -23,6 +22,7 @@ #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/io.h> +#include <asm/machine.h> #include <asm/ebcdic.h> #include <asm/appldata.h> #include <asm/monwriter.h> @@ -293,7 +293,7 @@ static struct miscdevice mon_dev = { static int __init mon_init(void) { - if (!MACHINE_IS_VM) + if (!machine_is_vm()) return -ENODEV; /* * misc_register() has to be the last action in module_init(), because diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 7115c0f85650..55850b5a7f51 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -8,6 +8,7 @@ * Copyright IBM Corp. 2003, 2009 */ +#include <linux/export.h> #include <linux/module.h> #include <linux/err.h> #include <linux/init.h> @@ -17,6 +18,7 @@ #include <linux/types.h> #include <linux/wait.h> +#include <asm/machine.h> #include <asm/ccwdev.h> #include <asm/cio.h> #include <asm/ebcdic.h> @@ -29,7 +31,9 @@ #include <linux/device.h> #include <linux/mutex.h> -struct class *class3270; +const struct class class3270 = { + .name = "3270", +}; EXPORT_SYMBOL(class3270); /* The main 3270 data structure. */ @@ -160,7 +164,8 @@ struct raw3270_request *raw3270_request_alloc(size_t size) /* * Setup ccw. */ - rq->ccw.cda = __pa(rq->buffer); + if (rq->buffer) + rq->ccw.cda = virt_to_dma32(rq->buffer); rq->ccw.flags = CCW_FLAG_SLI; return rq; @@ -186,7 +191,8 @@ int raw3270_request_reset(struct raw3270_request *rq) return -EBUSY; rq->ccw.cmd_code = 0; rq->ccw.count = 0; - rq->ccw.cda = __pa(rq->buffer); + if (rq->buffer) + rq->ccw.cda = virt_to_dma32(rq->buffer); rq->ccw.flags = CCW_FLAG_SLI; rq->rescnt = 0; rq->rc = 0; @@ -221,7 +227,7 @@ EXPORT_SYMBOL(raw3270_request_add_data); */ void raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size) { - rq->ccw.cda = __pa(data); + rq->ccw.cda = virt_to_dma32(data); rq->ccw.count = size; } EXPORT_SYMBOL(raw3270_request_set_data); @@ -231,7 +237,7 @@ EXPORT_SYMBOL(raw3270_request_set_data); */ void raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib) { - rq->ccw.cda = __pa(ib->data); + rq->ccw.cda = virt_to_dma32(ib->data); rq->ccw.count = ib->size; rq->ccw.flags |= CCW_FLAG_IDA; } @@ -577,7 +583,7 @@ static void raw3270_read_modified(struct raw3270 *rp) rp->init_readmod.ccw.cmd_code = TC_READMOD; rp->init_readmod.ccw.flags = CCW_FLAG_SLI; rp->init_readmod.ccw.count = sizeof(rp->init_data); - rp->init_readmod.ccw.cda = (__u32)__pa(rp->init_data); + rp->init_readmod.ccw.cda = virt_to_dma32(rp->init_data); rp->init_readmod.callback = raw3270_read_modified_cb; rp->init_readmod.callback_data = rp->init_data; rp->state = RAW3270_STATE_READMOD; @@ -597,7 +603,7 @@ static void raw3270_writesf_readpart(struct raw3270 *rp) rp->init_readpart.ccw.cmd_code = TC_WRITESF; rp->init_readpart.ccw.flags = CCW_FLAG_SLI; rp->init_readpart.ccw.count = sizeof(wbuf); - rp->init_readpart.ccw.cda = (__u32)__pa(&rp->init_data); + rp->init_readpart.ccw.cda = virt_to_dma32(&rp->init_data); rp->state = RAW3270_STATE_W4ATTN; raw3270_start_irq(&rp->init_view, &rp->init_readpart); } @@ -614,7 +620,7 @@ static void raw3270_reset_device_cb(struct raw3270_request *rq, void *data) if (rq->rc) { /* Reset command failed. */ rp->state = RAW3270_STATE_INIT; - } else if (MACHINE_IS_VM) { + } else if (machine_is_vm()) { raw3270_size_device_vm(rp); raw3270_size_device_done(rp); } else { @@ -635,7 +641,7 @@ static int __raw3270_reset_device(struct raw3270 *rp) rp->init_reset.ccw.cmd_code = TC_EWRITEA; rp->init_reset.ccw.flags = CCW_FLAG_SLI; rp->init_reset.ccw.count = 1; - rp->init_reset.ccw.cda = (__u32)__pa(rp->init_data); + rp->init_reset.ccw.cda = virt_to_dma32(rp->init_data); rp->init_reset.callback = raw3270_reset_device_cb; rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset); if (rc == 0 && rp->state == RAW3270_STATE_INIT) @@ -1316,25 +1322,28 @@ static int raw3270_init(void) return 0; raw3270_registered = 1; rc = ccw_driver_register(&raw3270_ccw_driver); - if (rc == 0) { - /* Create attributes for early (= console) device. */ - mutex_lock(&raw3270_mutex); - class3270 = class_create("3270"); - list_for_each_entry(rp, &raw3270_devices, list) { - get_device(&rp->cdev->dev); - raw3270_create_attributes(rp); - } - mutex_unlock(&raw3270_mutex); + if (rc) + return rc; + rc = class_register(&class3270); + if (rc) + return rc; + /* Create attributes for early (= console) device. */ + mutex_lock(&raw3270_mutex); + list_for_each_entry(rp, &raw3270_devices, list) { + get_device(&rp->cdev->dev); + raw3270_create_attributes(rp); } - return rc; + mutex_unlock(&raw3270_mutex); + return 0; } static void raw3270_exit(void) { ccw_driver_unregister(&raw3270_ccw_driver); - class_destroy(class3270); + class_unregister(&class3270); } +MODULE_DESCRIPTION("IBM/3270 Driver - core functions"); MODULE_LICENSE("GPL"); module_init(raw3270_init); diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index b1beecc7a0a9..5040c7e0e051 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h @@ -14,7 +14,7 @@ struct raw3270; struct raw3270_view; -extern struct class *class3270; +extern const struct class class3270; /* 3270 CCW request */ struct raw3270_request { diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 8f74db689a0c..98e334724a62 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -9,6 +9,7 @@ */ #include <linux/kernel_stat.h> +#include <linux/export.h> #include <linux/module.h> #include <linux/err.h> #include <linux/panic_notifier.h> @@ -76,12 +77,19 @@ unsigned long sclp_console_full; /* The currently active SCLP command word. */ static sclp_cmdw_t active_cmd; +static inline struct sccb_header *sclpint_to_sccb(u32 sccb_int) +{ + if (sccb_int) + return __va(sccb_int); + return NULL; +} + static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err) { struct sclp_trace_entry e; memset(&e, 0, sizeof(e)); - strncpy(e.id, id, sizeof(e.id)); + strtomem(e.id, id); e.a = a; e.b = b; debug_event(&sclp_debug, prio, &e, sizeof(e)); @@ -245,7 +253,6 @@ static void sclp_request_timeout(bool force_restart); static void sclp_process_queue(void); static void __sclp_make_read_req(void); static int sclp_init_mask(int calculate); -static int sclp_init(void); static void __sclp_queue_read_req(void) @@ -262,7 +269,7 @@ __sclp_queue_read_req(void) static inline void __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *)) { - del_timer(&sclp_request_timer); + timer_delete(&sclp_request_timer); sclp_request_timer.function = cb; sclp_request_timer.expires = jiffies + time; add_timer(&sclp_request_timer); @@ -408,7 +415,7 @@ __sclp_start_request(struct sclp_req *req) if (sclp_running_state != sclp_running_state_idle) return 0; - del_timer(&sclp_request_timer); + timer_delete(&sclp_request_timer); rc = sclp_service_call_trace(req->command, req->sccb); req->start_count++; @@ -443,7 +450,7 @@ sclp_process_queue(void) spin_unlock_irqrestore(&sclp_lock, flags); return; } - del_timer(&sclp_request_timer); + timer_delete(&sclp_request_timer); while (!list_empty(&sclp_req_queue)) { req = list_entry(sclp_req_queue.next, struct sclp_req, list); rc = __sclp_start_request(req); @@ -620,7 +627,7 @@ __sclp_find_req(u32 sccb) static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd) { - struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int); + struct sccb_header *sccb = sclpint_to_sccb(sccb_int); struct evbuf_header *evbuf; u16 response; @@ -659,11 +666,11 @@ static void sclp_interrupt_handler(struct ext_code ext_code, /* INT: Interrupt received (a=intparm, b=cmd) */ sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd, - (struct sccb_header *)__va(finished_sccb), + sclpint_to_sccb(finished_sccb), !ok_response(finished_sccb, active_cmd)); if (finished_sccb) { - del_timer(&sclp_request_timer); + timer_delete(&sclp_request_timer); sclp_running_state = sclp_running_state_reset_pending; req = __sclp_find_req(finished_sccb); if (req) { @@ -706,8 +713,8 @@ void sclp_sync_wait(void) { unsigned long long old_tick; + struct ctlreg cr0, cr0_sync; unsigned long flags; - unsigned long cr0, cr0_sync; static u64 sync_count; u64 timeout; int irq_context; @@ -720,7 +727,7 @@ sclp_sync_wait(void) timeout = 0; if (timer_pending(&sclp_request_timer)) { /* Get timeout TOD value */ - timeout = get_tod_clock_fast() + + timeout = get_tod_clock_monotonic() + sclp_tod_from_jiffies(sclp_request_timer.expires - jiffies); } @@ -732,20 +739,20 @@ sclp_sync_wait(void) /* Enable service-signal interruption, disable timer interrupts */ old_tick = local_tick_disable(); trace_hardirqs_on(); - __ctl_store(cr0, 0, 0); - cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK; - cr0_sync |= 1UL << (63 - 54); - __ctl_load(cr0_sync, 0, 0); - __arch_local_irq_stosm(0x01); + local_ctl_store(0, &cr0); + cr0_sync.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK; + cr0_sync.val |= 1UL << (63 - 54); + local_ctl_load(0, &cr0_sync); + arch_local_irq_enable_external(); /* Loop until driver state indicates finished request */ while (sclp_running_state != sclp_running_state_idle) { /* Check for expired request timer */ - if (get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer)) + if (get_tod_clock_monotonic() > timeout && timer_delete(&sclp_request_timer)) sclp_request_timer.function(&sclp_request_timer); cpu_relax(); } local_irq_disable(); - __ctl_load(cr0, 0, 0); + local_ctl_load(0, &cr0); if (!irq_context) _local_bh_enable(); local_tick_enable(old_tick); @@ -1166,7 +1173,7 @@ sclp_check_interface(void) * with IRQs enabled. */ irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); spin_lock_irqsave(&sclp_lock, flags); - del_timer(&sclp_request_timer); + timer_delete(&sclp_request_timer); rc = -EBUSY; if (sclp_init_req.status == SCLP_REQ_DONE) { if (sccb->header.response_code == 0x20) { @@ -1195,7 +1202,8 @@ sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) } static struct notifier_block sclp_reboot_notifier = { - .notifier_call = sclp_reboot_event + .notifier_call = sclp_reboot_event, + .priority = INT_MIN, }; static ssize_t con_pages_show(struct device_driver *dev, char *buf) @@ -1250,8 +1258,7 @@ static struct platform_driver sclp_pdrv = { /* Initialize SCLP driver. Return zero if driver is operational, non-zero * otherwise. */ -static int -sclp_init(void) +int sclp_init(void) { unsigned long flags; int rc = 0; @@ -1293,6 +1300,7 @@ sclp_init(void) fail_unregister_reboot_notifier: unregister_reboot_notifier(&sclp_reboot_notifier); fail_init_state_uninitialized: + list_del(&sclp_state_change_event.list); sclp_init_state = sclp_init_state_uninitialized; free_page((unsigned long) sclp_read_sccb); free_page((unsigned long) sclp_init_sccb); @@ -1303,13 +1311,7 @@ fail_unlock: static __init int sclp_initcall(void) { - int rc; - - rc = platform_driver_register(&sclp_pdrv); - if (rc) - return rc; - - return sclp_init(); + return platform_driver_register(&sclp_pdrv); } arch_initcall(sclp_initcall); diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 6a23ec286c70..b31a680e0871 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -12,8 +12,10 @@ #include <linux/types.h> #include <linux/list.h> #include <asm/asm-extable.h> +#include <asm/machine.h> #include <asm/sclp.h> #include <asm/ebcdic.h> +#include <asm/asm.h> /* maximum number of pages concerning our own memory management */ #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) @@ -84,13 +86,6 @@ typedef unsigned int sclp_cmdw_t; typedef u64 sccb_mask_t; -struct sccb_header { - u16 length; - u8 function_code; - u8 control_mask[3]; - u16 response_code; -} __attribute__((packed)); - struct init_sccb { struct sccb_header header; u16 _reserved; @@ -195,7 +190,9 @@ struct read_info_sccb { u8 byte_134; /* 134 */ u8 cpudirq; /* 135 */ u16 cbl; /* 136-137 */ - u8 _pad_138[EXT_SCCB_READ_SCP - 138]; + u8 byte_138; /* 138 */ + u8 byte_139; /* 139 */ + u8 _pad_140[EXT_SCCB_READ_SCP - 140]; } __packed __aligned(PAGE_SIZE); struct read_storage_sccb { @@ -237,13 +234,6 @@ struct gds_vector { u16 gds_id; } __attribute__((packed)); -struct evbuf_header { - u16 length; - u8 type; - u8 flags; - u16 _reserved; -} __attribute__((packed)); - struct sclp_req { struct list_head list; /* list_head for request queueing. */ sclp_cmdw_t command; /* sclp command to execute */ @@ -325,19 +315,22 @@ struct read_info_sccb * __init sclp_early_get_info(void); /* Perform service call. Return 0 on success, non-zero otherwise. */ static inline int sclp_service_call(sclp_cmdw_t command, void *sccb) { - int cc = 4; /* Initialize for program check handling */ + int cc, exception; - asm volatile( - "0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ - "1: ipm %0\n" - " srl %0,28\n" + exception = 1; + asm_inline volatile( + "0: .insn rre,0xb2200000,%[cmd],%[sccb]\n" /* servc */ + "1: lhi %[exc],0\n" "2:\n" + CC_IPM(cc) EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) - : "+&d" (cc) : "d" (command), "a" (__pa(sccb)) - : "cc", "memory"); - if (cc == 4) + : CC_OUT(cc, cc), [exc] "+d" (exception) + : [cmd] "d" (command), [sccb] "a" (__pa(sccb)) + : CC_CLOBBER_LIST("memory")); + if (exception) return -EINVAL; + cc = CC_TRANSFORM(cc); if (cc == 3) return -EIO; if (cc == 2) @@ -350,21 +343,21 @@ static inline int sclp_service_call(sclp_cmdw_t command, void *sccb) static inline unsigned char sclp_ascebc(unsigned char ch) { - return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch]; + return (machine_is_vm()) ? _ascebc[ch] : _ascebc_500[ch]; } /* translate string from EBCDIC to ASCII */ static inline void sclp_ebcasc_str(char *str, int nr) { - (MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr); + (machine_is_vm()) ? EBCASC(str, nr) : EBCASC_500(str, nr); } /* translate string from ASCII to EBCDIC */ static inline void sclp_ascebc_str(char *str, int nr) { - (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr); + (machine_is_vm()) ? ASCEBC(str, nr) : ASCEBC_500(str, nr); } static inline struct gds_vector * diff --git a/drivers/s390/char/sclp_ap.c b/drivers/s390/char/sclp_ap.c index 0dd1ca712795..18bb018b4e0c 100644 --- a/drivers/s390/char/sclp_ap.c +++ b/drivers/s390/char/sclp_ap.c @@ -4,8 +4,7 @@ * * Copyright IBM Corp. 2020 */ -#define KMSG_COMPONENT "sclp_cmd" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "sclp_cmd: " fmt #include <linux/export.h> #include <linux/slab.h> diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 8b4575a0db9f..be4730936f5c 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -5,30 +5,49 @@ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ -#define KMSG_COMPONENT "sclp_cmd" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "sclp_cmd: " fmt #include <linux/completion.h> -#include <linux/init.h> -#include <linux/errno.h> #include <linux/err.h> -#include <linux/export.h> +#include <linux/errno.h> +#include <linux/init.h> #include <linux/slab.h> #include <linux/string.h> -#include <linux/mm.h> -#include <linux/mmzone.h> -#include <linux/memory.h> -#include <linux/module.h> -#include <asm/ctl_reg.h> #include <asm/chpid.h> -#include <asm/setup.h> -#include <asm/page.h> +#include <asm/ctlreg.h> #include <asm/sclp.h> -#include <asm/numa.h> -#include <asm/facility.h> #include "sclp.h" +/* CPU configuration related functions */ +#define SCLP_CMDW_CONFIGURE_CPU 0x00110001 +#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 +/* Channel path configuration related functions */ +#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001 +#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001 +#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001 + +struct cpu_configure_sccb { + struct sccb_header header; +} __packed __aligned(8); + +struct chp_cfg_sccb { + struct sccb_header header; + u8 ccm; + u8 reserved[6]; + u8 cssid; +} __packed; + +struct chp_info_sccb { + struct sccb_header header; + u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; + u8 standby[SCLP_CHP_INFO_MASK_SIZE]; + u8 configured[SCLP_CHP_INFO_MASK_SIZE]; + u8 ccm; + u8 reserved[6]; + u8 cssid; +} __packed; + static void sclp_sync_callback(struct sclp_req *req, void *data) { struct completion *completion = data; @@ -59,13 +78,11 @@ int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout) request->callback_data = &completion; init_completion(&completion); - /* Perform sclp request. */ rc = sclp_add_request(request); if (rc) goto out; wait_for_completion(&completion); - /* Check response. */ if (request->status != SCLP_REQ_DONE) { pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n", cmd, request->status); @@ -76,22 +93,15 @@ out: return rc; } -/* - * CPU configuration related functions. - */ - -#define SCLP_CMDW_CONFIGURE_CPU 0x00110001 -#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 - int _sclp_get_core_info(struct sclp_core_info *info) { - int rc; - int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE; struct read_cpu_info_sccb *sccb; + int rc, length; if (!SCLP_HAS_CPU_INFO) return -EOPNOTSUPP; + length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE; sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length)); if (!sccb) return -ENOMEM; @@ -109,14 +119,10 @@ int _sclp_get_core_info(struct sclp_core_info *info) } sclp_fill_core_info(info, sccb); out: - free_pages((unsigned long) sccb, get_order(length)); + free_pages((unsigned long)sccb, get_order(length)); return rc; } -struct cpu_configure_sccb { - struct sccb_header header; -} __attribute__((packed, aligned(8))); - static int do_core_configure(sclp_cmdw_t cmd) { struct cpu_configure_sccb *sccb; @@ -125,8 +131,8 @@ static int do_core_configure(sclp_cmdw_t cmd) if (!SCLP_HAS_CPU_RECONFIG) return -EOPNOTSUPP; /* - * This is not going to cross a page boundary since we force - * kmalloc to have a minimum alignment of 8 bytes on s390. + * Use kmalloc to have a minimum alignment of 8 bytes and ensure sccb + * is not going to cross a page boundary. */ sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA); if (!sccb) @@ -160,371 +166,6 @@ int sclp_core_deconfigure(u8 core) return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8); } -#ifdef CONFIG_MEMORY_HOTPLUG - -static DEFINE_MUTEX(sclp_mem_mutex); -static LIST_HEAD(sclp_mem_list); -static u8 sclp_max_storage_id; -static DECLARE_BITMAP(sclp_storage_ids, 256); - -struct memory_increment { - struct list_head list; - u16 rn; - int standby; -}; - -struct assign_storage_sccb { - struct sccb_header header; - u16 rn; -} __packed; - -int arch_get_memory_phys_device(unsigned long start_pfn) -{ - if (!sclp.rzm) - return 0; - return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm); -} - -static unsigned long long rn2addr(u16 rn) -{ - return (unsigned long long) (rn - 1) * sclp.rzm; -} - -static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) -{ - struct assign_storage_sccb *sccb; - int rc; - - sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!sccb) - return -ENOMEM; - sccb->header.length = PAGE_SIZE; - sccb->rn = rn; - rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL); - if (rc) - goto out; - switch (sccb->header.response_code) { - case 0x0020: - case 0x0120: - break; - default: - pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n", - cmd, sccb->header.response_code, rn); - rc = -EIO; - break; - } -out: - free_page((unsigned long) sccb); - return rc; -} - -static int sclp_assign_storage(u16 rn) -{ - unsigned long long start; - int rc; - - rc = do_assign_storage(0x000d0001, rn); - if (rc) - return rc; - start = rn2addr(rn); - storage_key_init_range(start, start + sclp.rzm); - return 0; -} - -static int sclp_unassign_storage(u16 rn) -{ - return do_assign_storage(0x000c0001, rn); -} - -struct attach_storage_sccb { - struct sccb_header header; - u16 :16; - u16 assigned; - u32 :32; - u32 entries[]; -} __packed; - -static int sclp_attach_storage(u8 id) -{ - struct attach_storage_sccb *sccb; - int rc; - int i; - - sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!sccb) - return -ENOMEM; - sccb->header.length = PAGE_SIZE; - sccb->header.function_code = 0x40; - rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb, - SCLP_QUEUE_INTERVAL); - if (rc) - goto out; - switch (sccb->header.response_code) { - case 0x0020: - set_bit(id, sclp_storage_ids); - for (i = 0; i < sccb->assigned; i++) { - if (sccb->entries[i]) - sclp_unassign_storage(sccb->entries[i] >> 16); - } - break; - default: - rc = -EIO; - break; - } -out: - free_page((unsigned long) sccb); - return rc; -} - -static int sclp_mem_change_state(unsigned long start, unsigned long size, - int online) -{ - struct memory_increment *incr; - unsigned long long istart; - int rc = 0; - - list_for_each_entry(incr, &sclp_mem_list, list) { - istart = rn2addr(incr->rn); - if (start + size - 1 < istart) - break; - if (start > istart + sclp.rzm - 1) - continue; - if (online) - rc |= sclp_assign_storage(incr->rn); - else - sclp_unassign_storage(incr->rn); - if (rc == 0) - incr->standby = online ? 0 : 1; - } - return rc ? -EIO : 0; -} - -static bool contains_standby_increment(unsigned long start, unsigned long end) -{ - struct memory_increment *incr; - unsigned long istart; - - list_for_each_entry(incr, &sclp_mem_list, list) { - istart = rn2addr(incr->rn); - if (end - 1 < istart) - continue; - if (start > istart + sclp.rzm - 1) - continue; - if (incr->standby) - return true; - } - return false; -} - -static int sclp_mem_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - unsigned long start, size; - struct memory_notify *arg; - unsigned char id; - int rc = 0; - - arg = data; - start = arg->start_pfn << PAGE_SHIFT; - size = arg->nr_pages << PAGE_SHIFT; - mutex_lock(&sclp_mem_mutex); - for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1) - sclp_attach_storage(id); - switch (action) { - case MEM_GOING_OFFLINE: - /* - * We do not allow to set memory blocks offline that contain - * standby memory. This is done to simplify the "memory online" - * case. - */ - if (contains_standby_increment(start, start + size)) - rc = -EPERM; - break; - case MEM_ONLINE: - case MEM_CANCEL_OFFLINE: - break; - case MEM_GOING_ONLINE: - rc = sclp_mem_change_state(start, size, 1); - break; - case MEM_CANCEL_ONLINE: - sclp_mem_change_state(start, size, 0); - break; - case MEM_OFFLINE: - sclp_mem_change_state(start, size, 0); - break; - default: - rc = -EINVAL; - break; - } - mutex_unlock(&sclp_mem_mutex); - return rc ? NOTIFY_BAD : NOTIFY_OK; -} - -static struct notifier_block sclp_mem_nb = { - .notifier_call = sclp_mem_notifier, -}; - -static void __init align_to_block_size(unsigned long long *start, - unsigned long long *size, - unsigned long long alignment) -{ - unsigned long long start_align, size_align; - - start_align = roundup(*start, alignment); - size_align = rounddown(*start + *size, alignment) - start_align; - - pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n", - *start, size_align >> 20, *size >> 20); - *start = start_align; - *size = size_align; -} - -static void __init add_memory_merged(u16 rn) -{ - unsigned long long start, size, addr, block_size; - static u16 first_rn, num; - - if (rn && first_rn && (first_rn + num == rn)) { - num++; - return; - } - if (!first_rn) - goto skip_add; - start = rn2addr(first_rn); - size = (unsigned long long) num * sclp.rzm; - if (start >= ident_map_size) - goto skip_add; - if (start + size > ident_map_size) - size = ident_map_size - start; - block_size = memory_block_size_bytes(); - align_to_block_size(&start, &size, block_size); - if (!size) - goto skip_add; - for (addr = start; addr < start + size; addr += block_size) - add_memory(0, addr, block_size, MHP_NONE); -skip_add: - first_rn = rn; - num = 1; -} - -static void __init sclp_add_standby_memory(void) -{ - struct memory_increment *incr; - - list_for_each_entry(incr, &sclp_mem_list, list) - if (incr->standby) - add_memory_merged(incr->rn); - add_memory_merged(0); -} - -static void __init insert_increment(u16 rn, int standby, int assigned) -{ - struct memory_increment *incr, *new_incr; - struct list_head *prev; - u16 last_rn; - - new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL); - if (!new_incr) - return; - new_incr->rn = rn; - new_incr->standby = standby; - last_rn = 0; - prev = &sclp_mem_list; - list_for_each_entry(incr, &sclp_mem_list, list) { - if (assigned && incr->rn > rn) - break; - if (!assigned && incr->rn - last_rn > 1) - break; - last_rn = incr->rn; - prev = &incr->list; - } - if (!assigned) - new_incr->rn = last_rn + 1; - if (new_incr->rn > sclp.rnmax) { - kfree(new_incr); - return; - } - list_add(&new_incr->list, prev); -} - -static int __init sclp_detect_standby_memory(void) -{ - struct read_storage_sccb *sccb; - int i, id, assigned, rc; - - if (oldmem_data.start) /* No standby memory in kdump mode */ - return 0; - if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL) - return 0; - rc = -ENOMEM; - sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); - if (!sccb) - goto out; - assigned = 0; - for (id = 0; id <= sclp_max_storage_id; id++) { - memset(sccb, 0, PAGE_SIZE); - sccb->header.length = PAGE_SIZE; - rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb); - if (rc) - goto out; - switch (sccb->header.response_code) { - case 0x0010: - set_bit(id, sclp_storage_ids); - for (i = 0; i < sccb->assigned; i++) { - if (!sccb->entries[i]) - continue; - assigned++; - insert_increment(sccb->entries[i] >> 16, 0, 1); - } - break; - case 0x0310: - break; - case 0x0410: - for (i = 0; i < sccb->assigned; i++) { - if (!sccb->entries[i]) - continue; - assigned++; - insert_increment(sccb->entries[i] >> 16, 1, 1); - } - break; - default: - rc = -EIO; - break; - } - if (!rc) - sclp_max_storage_id = sccb->max_id; - } - if (rc || list_empty(&sclp_mem_list)) - goto out; - for (i = 1; i <= sclp.rnmax - assigned; i++) - insert_increment(0, 1, 0); - rc = register_memory_notifier(&sclp_mem_nb); - if (rc) - goto out; - sclp_add_standby_memory(); -out: - free_page((unsigned long) sccb); - return rc; -} -__initcall(sclp_detect_standby_memory); - -#endif /* CONFIG_MEMORY_HOTPLUG */ - -/* - * Channel path configuration related functions. - */ - -#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001 -#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001 -#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001 - -struct chp_cfg_sccb { - struct sccb_header header; - u8 ccm; - u8 reserved[6]; - u8 cssid; -} __attribute__((packed)); - static int do_chp_configure(sclp_cmdw_t cmd) { struct chp_cfg_sccb *sccb; @@ -532,8 +173,7 @@ static int do_chp_configure(sclp_cmdw_t cmd) if (!SCLP_HAS_CHP_RECONFIG) return -EOPNOTSUPP; - /* Prepare sccb. */ - sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + sccb = (struct chp_cfg_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); @@ -553,7 +193,7 @@ static int do_chp_configure(sclp_cmdw_t cmd) break; } out: - free_page((unsigned long) sccb); + free_page((unsigned long)sccb); return rc; } @@ -581,16 +221,6 @@ int sclp_chp_deconfigure(struct chp_id chpid) return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); } -struct chp_info_sccb { - struct sccb_header header; - u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; - u8 standby[SCLP_CHP_INFO_MASK_SIZE]; - u8 configured[SCLP_CHP_INFO_MASK_SIZE]; - u8 ccm; - u8 reserved[6]; - u8 cssid; -} __attribute__((packed)); - /** * sclp_chp_read_info - perform read channel-path information sclp command * @info: resulting channel-path information data @@ -606,8 +236,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info) if (!SCLP_HAS_CHP_INFO) return -EOPNOTSUPP; - /* Prepare sccb. */ - sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + sccb = (struct chp_info_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); @@ -624,6 +253,6 @@ int sclp_chp_read_info(struct sclp_chp_info *info) memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE); memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE); out: - free_page((unsigned long) sccb); + free_page((unsigned long)sccb); return rc; } diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index e5d947c763ea..d8544c485808 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -109,7 +109,7 @@ static void sclp_console_sync_queue(void) unsigned long flags; spin_lock_irqsave(&sclp_con_lock, flags); - del_timer(&sclp_con_timer); + timer_delete(&sclp_con_timer); while (sclp_con_queue_running) { spin_unlock_irqrestore(&sclp_con_lock, flags); sclp_sync_wait(); @@ -264,6 +264,19 @@ static struct console sclp_console = }; /* + * Release allocated pages. + */ +static void __init __sclp_console_free_pages(void) +{ + struct list_head *page, *p; + + list_for_each_safe(page, p, &sclp_con_pages) { + list_del(page); + free_page((unsigned long)page); + } +} + +/* * called by console_init() in drivers/char/tty_io.c at boot-time. */ static int __init @@ -282,6 +295,10 @@ sclp_console_init(void) /* Allocate pages for output buffering */ for (i = 0; i < sclp_console_pages; i++) { page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!page) { + __sclp_console_free_pages(); + return -ENOMEM; + } list_add_tail(page, &sclp_con_pages); } sclp_conbuf = NULL; diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 10383e936461..9cfbe3fc3dca 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c @@ -3,8 +3,7 @@ * Copyright IBM Corp. 2007 */ -#define KMSG_COMPONENT "sclp_config" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "sclp_config: " fmt #include <linux/init.h> #include <linux/errno.h> @@ -60,7 +59,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work) static void __ref sclp_cpu_change_notify(struct work_struct *work) { lock_device_hotplug(); - smp_rescan_cpus(); + smp_rescan_cpus(false); unlock_device_hotplug(); } @@ -128,7 +127,7 @@ out: } static ssize_t sysfs_ofb_data_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { int rc; diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c index f60d7ea8268d..8e1636bcf8b5 100644 --- a/drivers/s390/char/sclp_cpi_sys.c +++ b/drivers/s390/char/sclp_cpi_sys.c @@ -7,8 +7,7 @@ * Michael Ernst <mernst@de.ibm.com> */ -#define KMSG_COMPONENT "sclp_cpi" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "sclp_cpi: " fmt #include <linux/kernel.h> #include <linux/init.h> @@ -223,7 +222,7 @@ static ssize_t system_name_show(struct kobject *kobj, int rc; mutex_lock(&sclp_cpi_mutex); - rc = snprintf(page, PAGE_SIZE, "%s\n", system_name); + rc = sysfs_emit(page, "%s\n", system_name); mutex_unlock(&sclp_cpi_mutex); return rc; } @@ -255,7 +254,7 @@ static ssize_t sysplex_name_show(struct kobject *kobj, int rc; mutex_lock(&sclp_cpi_mutex); - rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name); + rc = sysfs_emit(page, "%s\n", sysplex_name); mutex_unlock(&sclp_cpi_mutex); return rc; } @@ -287,7 +286,7 @@ static ssize_t system_type_show(struct kobject *kobj, int rc; mutex_lock(&sclp_cpi_mutex); - rc = snprintf(page, PAGE_SIZE, "%s\n", system_type); + rc = sysfs_emit(page, "%s\n", system_type); mutex_unlock(&sclp_cpi_mutex); return rc; } @@ -321,7 +320,7 @@ static ssize_t system_level_show(struct kobject *kobj, mutex_lock(&sclp_cpi_mutex); level = system_level; mutex_unlock(&sclp_cpi_mutex); - return snprintf(page, PAGE_SIZE, "%#018llx\n", level); + return sysfs_emit(page, "%#018llx\n", level); } static ssize_t system_level_store(struct kobject *kobj, diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c index 248b5db3eaa8..e23a97359286 100644 --- a/drivers/s390/char/sclp_ctl.c +++ b/drivers/s390/char/sclp_ctl.c @@ -7,7 +7,6 @@ * Author: Michael Holzheu <holzheu@linux.vnet.ibm.com> */ -#include <linux/compat.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/gfp.h> @@ -43,10 +42,7 @@ static int sclp_ctl_cmdw_supported(unsigned int cmdw) static void __user *u64_to_uptr(u64 value) { - if (is_compat_task()) - return compat_ptr(value); - else - return (void __user *)(unsigned long)value; + return (void __user *)(unsigned long)value; } /* @@ -95,10 +91,7 @@ static long sclp_ctl_ioctl(struct file *filp, unsigned int cmd, { void __user *argp; - if (is_compat_task()) - argp = compat_ptr(arg); - else - argp = (void __user *) arg; + argp = (void __user *)arg; switch (cmd) { case SCLP_CTL_SCCB: return sclp_ctl_ioctl_sccb(argp); @@ -114,8 +107,6 @@ static const struct file_operations sclp_ctl_fops = { .owner = THIS_MODULE, .open = nonseekable_open, .unlocked_ioctl = sclp_ctl_ioctl, - .compat_ioctl = sclp_ctl_ioctl, - .llseek = no_llseek, }; /* diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index fdc8668f3fba..6bf501ad8ff0 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -5,12 +5,12 @@ * Copyright IBM Corp. 2013 */ -#define KMSG_COMPONENT "sclp_early" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "sclp_early: " fmt +#include <linux/export.h> #include <linux/errno.h> #include <linux/memblock.h> -#include <asm/ctl_reg.h> +#include <asm/ctlreg.h> #include <asm/sclp.h> #include <asm/ipl.h> #include <asm/setup.h> @@ -44,15 +44,14 @@ static void __init sclp_early_facilities_detect(void) sclp.has_ibs = !!(sccb->fac117 & 0x20); sclp.has_gisaf = !!(sccb->fac118 & 0x08); sclp.has_hvs = !!(sccb->fac119 & 0x80); + sclp.has_wti = !!(sccb->fac119 & 0x40); sclp.has_kss = !!(sccb->fac98 & 0x01); sclp.has_aisii = !!(sccb->fac118 & 0x40); sclp.has_aeni = !!(sccb->fac118 & 0x20); sclp.has_aisi = !!(sccb->fac118 & 0x10); sclp.has_zpci_lsi = !!(sccb->fac118 & 0x01); - if (sccb->fac85 & 0x02) - S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; - if (sccb->fac91 & 0x40) - S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST; + sclp.has_diag204_bif = !!(sccb->fac98 & 0x80); + sclp.has_diag310 = !!(sccb->fac91 & 0x80); if (sccb->cpuoff > 134) { sclp.has_diag318 = !!(sccb->byte_134 & 0x80); sclp.has_diag320 = !!(sccb->byte_134 & 0x04); @@ -62,6 +61,8 @@ static void __init sclp_early_facilities_detect(void) sclp.has_sipl = !!(sccb->cbl & 0x4000); sclp.has_sipl_eckd = !!(sccb->cbl & 0x2000); } + if (sccb->cpuoff > 139) + sclp.has_diag324 = !!(sccb->byte_139 & 0x80); sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; sclp.rzm <<= 20; @@ -73,7 +74,7 @@ static void __init sclp_early_facilities_detect(void) sclp.hamax = U64_MAX; if (!sccb->hcpua) { - if (MACHINE_IS_VM) + if (machine_is_vm()) sclp.max_cores = 64; else sclp.max_cores = sccb->ncpurl; diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c index dbd5c53d8edf..55e50d428aab 100644 --- a/drivers/s390/char/sclp_early_core.c +++ b/drivers/s390/char/sclp_early_core.c @@ -7,11 +7,13 @@ #include <linux/kernel.h> #include <asm/processor.h> #include <asm/lowcore.h> +#include <asm/ctlreg.h> #include <asm/ebcdic.h> #include <asm/irq.h> #include <asm/sections.h> #include <asm/physmem_info.h> #include <asm/facility.h> +#include <asm/machine.h> #include "sclp.h" #include "sclp_rw.h" @@ -31,17 +33,17 @@ void sclp_early_wait_irq(void) psw_t psw_ext_save, psw_wait; union ctlreg0 cr0, cr0_new; - __ctl_store(cr0.val, 0, 0); + local_ctl_store(0, &cr0.reg); cr0_new.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK; cr0_new.lap = 0; cr0_new.sssm = 1; - __ctl_load(cr0_new.val, 0, 0); + local_ctl_load(0, &cr0_new.reg); - psw_ext_save = S390_lowcore.external_new_psw; + psw_ext_save = get_lowcore()->external_new_psw; psw_mask = __extract_psw(); - S390_lowcore.external_new_psw.mask = psw_mask; + get_lowcore()->external_new_psw.mask = psw_mask; psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT; - S390_lowcore.ext_int_code = 0; + get_lowcore()->ext_int_code = 0; do { asm volatile( @@ -49,16 +51,16 @@ void sclp_early_wait_irq(void) " stg %[addr],%[psw_wait_addr]\n" " stg %[addr],%[psw_ext_addr]\n" " lpswe %[psw_wait]\n" - "0:\n" + "0:" : [addr] "=&d" (addr), [psw_wait_addr] "=Q" (psw_wait.addr), - [psw_ext_addr] "=Q" (S390_lowcore.external_new_psw.addr) + [psw_ext_addr] "=Q" (get_lowcore()->external_new_psw.addr) : [psw_wait] "Q" (psw_wait) : "cc", "memory"); - } while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG); + } while (get_lowcore()->ext_int_code != EXT_IRQ_SERVICE_SIG); - S390_lowcore.external_new_psw = psw_ext_save; - __ctl_load(cr0.val, 0, 0); + get_lowcore()->external_new_psw = psw_ext_save; + local_ctl_load(0, &cr0.reg); } int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb) @@ -334,6 +336,18 @@ int __init sclp_early_get_hsa_size(unsigned long *hsa_size) return 0; } +void __init sclp_early_detect_machine_features(void) +{ + struct read_info_sccb *sccb = &sclp_info_sccb; + + if (!sclp_info_sccb_valid) + return; + if (sccb->fac85 & 0x02) + set_machine_feature(MFEATURE_ESOP); + if (sccb->fac91 & 0x40) + set_machine_feature(MFEATURE_TLB_GUEST); +} + #define SCLP_STORAGE_INFO_FACILITY 0x0000400000000000UL void __weak __init add_physmem_online_range(u64 start, u64 end) {} diff --git a/drivers/s390/char/sclp_ftp.c b/drivers/s390/char/sclp_ftp.c index d27e2cbfbccb..2a1c4b2cafc8 100644 --- a/drivers/s390/char/sclp_ftp.c +++ b/drivers/s390/char/sclp_ftp.c @@ -7,8 +7,7 @@ * */ -#define KMSG_COMPONENT "hmcdrv" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "hmcdrv: " fmt #include <linux/kernel.h> #include <linux/mm.h> diff --git a/drivers/s390/char/sclp_mem.c b/drivers/s390/char/sclp_mem.c new file mode 100644 index 000000000000..676c085b4f8a --- /dev/null +++ b/drivers/s390/char/sclp_mem.c @@ -0,0 +1,521 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Memory hotplug support via sclp + * + * Copyright IBM Corp. 2025 + */ + +#define pr_fmt(fmt) "sclp_mem: " fmt + +#include <linux/cpufeature.h> +#include <linux/container_of.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/kobject.h> +#include <linux/kstrtox.h> +#include <linux/memory.h> +#include <linux/memory_hotplug.h> +#include <linux/mm.h> +#include <linux/mmzone.h> +#include <linux/slab.h> +#include <asm/facility.h> +#include <asm/page.h> +#include <asm/page-states.h> +#include <asm/sclp.h> + +#include "sclp.h" + +#define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001 +#define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001 + +static LIST_HEAD(sclp_mem_list); +static u8 sclp_max_storage_id; +static DECLARE_BITMAP(sclp_storage_ids, 256); + +struct memory_increment { + struct list_head list; + u16 rn; + int standby; +}; + +struct sclp_mem { + struct kobject kobj; + unsigned int id; + unsigned int memmap_on_memory; + unsigned int config; +}; + +struct sclp_mem_arg { + struct sclp_mem *sclp_mems; + struct kset *kset; +}; + +struct assign_storage_sccb { + struct sccb_header header; + u16 rn; +} __packed; + +struct attach_storage_sccb { + struct sccb_header header; + u16 :16; + u16 assigned; + u32 :32; + u32 entries[]; +} __packed; + +int arch_get_memory_phys_device(unsigned long start_pfn) +{ + if (!sclp.rzm) + return 0; + return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm); +} + +static unsigned long rn2addr(u16 rn) +{ + return (unsigned long)(rn - 1) * sclp.rzm; +} + +static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) +{ + struct assign_storage_sccb *sccb; + int rc; + + sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sccb) + return -ENOMEM; + sccb->header.length = PAGE_SIZE; + sccb->rn = rn; + rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL); + if (rc) + goto out; + switch (sccb->header.response_code) { + case 0x0020: + case 0x0120: + break; + default: + pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n", + cmd, sccb->header.response_code, rn); + rc = -EIO; + break; + } +out: + free_page((unsigned long)sccb); + return rc; +} + +static int sclp_assign_storage(u16 rn) +{ + unsigned long start; + int rc; + + rc = do_assign_storage(SCLP_CMDW_ASSIGN_STORAGE, rn); + if (rc) + return rc; + start = rn2addr(rn); + storage_key_init_range(start, start + sclp.rzm); + return 0; +} + +static int sclp_unassign_storage(u16 rn) +{ + return do_assign_storage(SCLP_CMDW_UNASSIGN_STORAGE, rn); +} + +static int sclp_attach_storage(u8 id) +{ + struct attach_storage_sccb *sccb; + int rc, i; + + sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sccb) + return -ENOMEM; + sccb->header.length = PAGE_SIZE; + sccb->header.function_code = 0x40; + rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb, + SCLP_QUEUE_INTERVAL); + if (rc) + goto out; + switch (sccb->header.response_code) { + case 0x0020: + set_bit(id, sclp_storage_ids); + for (i = 0; i < sccb->assigned; i++) { + if (sccb->entries[i]) + sclp_unassign_storage(sccb->entries[i] >> 16); + } + break; + default: + rc = -EIO; + break; + } +out: + free_page((unsigned long)sccb); + return rc; +} + +static int sclp_mem_change_state(unsigned long start, unsigned long size, + int online) +{ + struct memory_increment *incr; + unsigned long istart; + int rc = 0; + + list_for_each_entry(incr, &sclp_mem_list, list) { + istart = rn2addr(incr->rn); + if (start + size - 1 < istart) + break; + if (start > istart + sclp.rzm - 1) + continue; + if (online) + rc |= sclp_assign_storage(incr->rn); + else + sclp_unassign_storage(incr->rn); + if (rc == 0) + incr->standby = online ? 0 : 1; + } + return rc ? -EIO : 0; +} + +static ssize_t sclp_config_mem_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct sclp_mem *sclp_mem = container_of(kobj, struct sclp_mem, kobj); + + return sysfs_emit(buf, "%u\n", READ_ONCE(sclp_mem->config)); +} + +static ssize_t sclp_config_mem_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + unsigned long addr, block_size; + struct sclp_mem *sclp_mem; + struct memory_block *mem; + unsigned char id; + bool value; + int rc; + + rc = kstrtobool(buf, &value); + if (rc) + return rc; + sclp_mem = container_of(kobj, struct sclp_mem, kobj); + block_size = memory_block_size_bytes(); + addr = sclp_mem->id * block_size; + /* + * Hold device_hotplug_lock when adding/removing memory blocks. + * Additionally, also protect calls to find_memory_block() and + * sclp_attach_storage(). + */ + rc = lock_device_hotplug_sysfs(); + if (rc) + goto out; + for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1) + sclp_attach_storage(id); + if (value) { + if (sclp_mem->config) + goto out_unlock; + rc = sclp_mem_change_state(addr, block_size, 1); + if (rc) + goto out_unlock; + /* + * Set entire memory block CMMA state to nodat. Later, when + * page tables pages are allocated via __add_memory(), those + * regions are marked __arch_set_page_dat(). + */ + __arch_set_page_nodat((void *)__va(addr), block_size >> PAGE_SHIFT); + rc = __add_memory(0, addr, block_size, + sclp_mem->memmap_on_memory ? + MHP_MEMMAP_ON_MEMORY : MHP_NONE); + if (rc) { + sclp_mem_change_state(addr, block_size, 0); + goto out_unlock; + } + mem = find_memory_block(pfn_to_section_nr(PFN_DOWN(addr))); + put_device(&mem->dev); + WRITE_ONCE(sclp_mem->config, 1); + } else { + if (!sclp_mem->config) + goto out_unlock; + mem = find_memory_block(pfn_to_section_nr(PFN_DOWN(addr))); + if (mem->state != MEM_OFFLINE) { + put_device(&mem->dev); + rc = -EBUSY; + goto out_unlock; + } + /* drop the ref just got via find_memory_block() */ + put_device(&mem->dev); + sclp_mem_change_state(addr, block_size, 0); + __remove_memory(addr, block_size); + WRITE_ONCE(sclp_mem->config, 0); + } +out_unlock: + unlock_device_hotplug(); +out: + return rc ? rc : count; +} + +static struct kobj_attribute sclp_config_mem_attr = + __ATTR(config, 0644, sclp_config_mem_show, sclp_config_mem_store); + +static ssize_t sclp_memmap_on_memory_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct sclp_mem *sclp_mem = container_of(kobj, struct sclp_mem, kobj); + + return sysfs_emit(buf, "%u\n", READ_ONCE(sclp_mem->memmap_on_memory)); +} + +static ssize_t sclp_memmap_on_memory_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct sclp_mem *sclp_mem; + unsigned long block_size; + struct memory_block *mem; + bool value; + int rc; + + rc = kstrtobool(buf, &value); + if (rc) + return rc; + if (value && !mhp_supports_memmap_on_memory()) + return -EOPNOTSUPP; + rc = lock_device_hotplug_sysfs(); + if (rc) + return rc; + block_size = memory_block_size_bytes(); + sclp_mem = container_of(kobj, struct sclp_mem, kobj); + mem = find_memory_block(pfn_to_section_nr(PFN_DOWN(sclp_mem->id * block_size))); + if (!mem) { + WRITE_ONCE(sclp_mem->memmap_on_memory, value); + } else { + put_device(&mem->dev); + rc = -EBUSY; + } + unlock_device_hotplug(); + return rc ? rc : count; +} + +static const struct kobj_type ktype = { + .sysfs_ops = &kobj_sysfs_ops, +}; + +static struct kobj_attribute sclp_memmap_attr = + __ATTR(memmap_on_memory, 0644, sclp_memmap_on_memory_show, sclp_memmap_on_memory_store); + +static struct attribute *sclp_mem_attrs[] = { + &sclp_config_mem_attr.attr, + &sclp_memmap_attr.attr, + NULL, +}; + +static struct attribute_group sclp_mem_attr_group = { + .attrs = sclp_mem_attrs, +}; + +static int sclp_create_mem(struct sclp_mem *sclp_mem, struct kset *kset, + unsigned int id, bool config, bool memmap_on_memory) +{ + int rc; + + sclp_mem->memmap_on_memory = memmap_on_memory; + sclp_mem->config = config; + sclp_mem->id = id; + kobject_init(&sclp_mem->kobj, &ktype); + rc = kobject_add(&sclp_mem->kobj, &kset->kobj, "memory%d", id); + if (rc) + return rc; + return sysfs_create_group(&sclp_mem->kobj, &sclp_mem_attr_group); +} + +static int sclp_create_configured_mem(struct memory_block *mem, void *argument) +{ + struct sclp_mem *sclp_mems; + struct sclp_mem_arg *arg; + struct kset *kset; + unsigned int id; + + id = mem->dev.id; + arg = (struct sclp_mem_arg *)argument; + sclp_mems = arg->sclp_mems; + kset = arg->kset; + return sclp_create_mem(&sclp_mems[id], kset, id, true, false); +} + +static void __init align_to_block_size(unsigned long *start, + unsigned long *size, + unsigned long alignment) +{ + unsigned long start_align, size_align; + + start_align = roundup(*start, alignment); + size_align = rounddown(*start + *size, alignment) - start_align; + + pr_info("Standby memory at 0x%lx (%luM of %luM usable)\n", + *start, size_align >> 20, *size >> 20); + *start = start_align; + *size = size_align; +} + +static int __init sclp_create_standby_mems_merged(struct sclp_mem *sclp_mems, + struct kset *kset, u16 rn) +{ + unsigned long start, size, addr, block_size; + static u16 first_rn, num; + unsigned int id; + int rc = 0; + + if (rn && first_rn && (first_rn + num == rn)) { + num++; + return rc; + } + if (!first_rn) + goto skip_add; + start = rn2addr(first_rn); + size = (unsigned long)num * sclp.rzm; + if (start >= ident_map_size) + goto skip_add; + if (start + size > ident_map_size) + size = ident_map_size - start; + block_size = memory_block_size_bytes(); + align_to_block_size(&start, &size, block_size); + if (!size) + goto skip_add; + for (addr = start; addr < start + size; addr += block_size) { + id = addr / block_size; + rc = sclp_create_mem(&sclp_mems[id], kset, id, false, + mhp_supports_memmap_on_memory()); + if (rc) + break; + } +skip_add: + first_rn = rn; + num = 1; + return rc; +} + +static int __init sclp_create_standby_mems(struct sclp_mem *sclp_mems, struct kset *kset) +{ + struct memory_increment *incr; + int rc = 0; + + list_for_each_entry(incr, &sclp_mem_list, list) { + if (incr->standby) + rc = sclp_create_standby_mems_merged(sclp_mems, kset, incr->rn); + if (rc) + return rc; + } + return sclp_create_standby_mems_merged(sclp_mems, kset, 0); +} + +static int __init sclp_init_mem(void) +{ + const unsigned long block_size = memory_block_size_bytes(); + unsigned int max_sclp_mems; + struct sclp_mem *sclp_mems; + struct sclp_mem_arg arg; + struct kset *kset; + int rc; + + max_sclp_mems = roundup(sclp.rnmax * sclp.rzm, block_size) / block_size; + /* Allocate memory for all blocks ahead of time. */ + sclp_mems = kcalloc(max_sclp_mems, sizeof(struct sclp_mem), GFP_KERNEL); + if (!sclp_mems) + return -ENOMEM; + kset = kset_create_and_add("memory", NULL, firmware_kobj); + if (!kset) + return -ENOMEM; + /* Initial memory is in the "configured" state already. */ + arg.sclp_mems = sclp_mems; + arg.kset = kset; + rc = for_each_memory_block(&arg, sclp_create_configured_mem); + if (rc) + return rc; + /* Standby memory is "deconfigured". */ + return sclp_create_standby_mems(sclp_mems, kset); +} + +static void __init insert_increment(u16 rn, int standby, int assigned) +{ + struct memory_increment *incr, *new_incr; + struct list_head *prev; + u16 last_rn; + + new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL); + if (!new_incr) + return; + new_incr->rn = rn; + new_incr->standby = standby; + last_rn = 0; + prev = &sclp_mem_list; + list_for_each_entry(incr, &sclp_mem_list, list) { + if (assigned && incr->rn > rn) + break; + if (!assigned && incr->rn - last_rn > 1) + break; + last_rn = incr->rn; + prev = &incr->list; + } + if (!assigned) + new_incr->rn = last_rn + 1; + if (new_incr->rn > sclp.rnmax) { + kfree(new_incr); + return; + } + list_add(&new_incr->list, prev); +} + +static int __init sclp_setup_memory(void) +{ + struct read_storage_sccb *sccb; + int i, id, assigned, rc; + + /* No standby memory in kdump mode */ + if (oldmem_data.start) + return 0; + if ((sclp.facilities & 0xe00000000000UL) != 0xe00000000000UL) + return 0; + rc = -ENOMEM; + sccb = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); + if (!sccb) + goto out; + assigned = 0; + for (id = 0; id <= sclp_max_storage_id; id++) { + memset(sccb, 0, PAGE_SIZE); + sccb->header.length = PAGE_SIZE; + rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb); + if (rc) + goto out; + switch (sccb->header.response_code) { + case 0x0010: + set_bit(id, sclp_storage_ids); + for (i = 0; i < sccb->assigned; i++) { + if (!sccb->entries[i]) + continue; + assigned++; + insert_increment(sccb->entries[i] >> 16, 0, 1); + } + break; + case 0x0310: + break; + case 0x0410: + for (i = 0; i < sccb->assigned; i++) { + if (!sccb->entries[i]) + continue; + assigned++; + insert_increment(sccb->entries[i] >> 16, 1, 1); + } + break; + default: + rc = -EIO; + break; + } + if (!rc) + sclp_max_storage_id = sccb->max_id; + } + if (rc || list_empty(&sclp_mem_list)) + goto out; + for (i = 1; i <= sclp.rnmax - assigned; i++) + insert_increment(0, 1, 0); + rc = sclp_init_mem(); +out: + free_page((unsigned long)sccb); + return rc; +} +__initcall(sclp_setup_memory); diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c index d35f10ea5b52..35f3a4a08b12 100644 --- a/drivers/s390/char/sclp_ocf.c +++ b/drivers/s390/char/sclp_ocf.c @@ -6,9 +6,9 @@ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> */ -#define KMSG_COMPONENT "sclp_ocf" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "sclp_ocf: " fmt +#include <linux/export.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/stat.h> @@ -101,7 +101,7 @@ static ssize_t cpc_name_show(struct kobject *kobj, sclp_ocf_cpc_name_copy(name); name[OCF_LENGTH_CPC_NAME] = 0; EBCASC(name, OCF_LENGTH_CPC_NAME); - return snprintf(page, PAGE_SIZE, "%s\n", name); + return sysfs_emit(page, "%s\n", name); } static struct kobj_attribute cpc_name_attr = @@ -113,7 +113,7 @@ static ssize_t hmc_network_show(struct kobject *kobj, int rc; spin_lock_irq(&sclp_ocf_lock); - rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network); + rc = sysfs_emit(page, "%s\n", hmc_network); spin_unlock_irq(&sclp_ocf_lock); return rc; } diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c index a3e5a5fb0c1e..899063e64aef 100644 --- a/drivers/s390/char/sclp_pci.c +++ b/drivers/s390/char/sclp_pci.c @@ -4,8 +4,7 @@ * * Copyright IBM Corp. 2016 */ -#define KMSG_COMPONENT "sclp_cmd" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "sclp_cmd: " fmt #include <linux/completion.h> #include <linux/export.h> @@ -24,29 +23,11 @@ #define SCLP_ATYPE_PCI 2 -#define SCLP_ERRNOTIFY_AQ_RESET 0 -#define SCLP_ERRNOTIFY_AQ_REPAIR 1 -#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2 - static DEFINE_MUTEX(sclp_pci_mutex); static struct sclp_register sclp_pci_event = { .send_mask = EVTYP_ERRNOTIFY_MASK, }; -struct err_notify_evbuf { - struct evbuf_header header; - u8 action; - u8 atype; - u32 fh; - u32 fid; - u8 data[]; -} __packed; - -struct err_notify_sccb { - struct sccb_header header; - struct err_notify_evbuf evbuf; -} __packed; - struct pci_cfg_sccb { struct sccb_header header; u8 atype; /* adapter type */ @@ -116,6 +97,7 @@ static int sclp_pci_check_report(struct zpci_report_error_header *report) case SCLP_ERRNOTIFY_AQ_RESET: case SCLP_ERRNOTIFY_AQ_REPAIR: case SCLP_ERRNOTIFY_AQ_INFO_LOG: + case SCLP_ERRNOTIFY_AQ_OPTICS_DATA: break; default: return -EINVAL; diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c index f9e164be7568..e4a90a14028a 100644 --- a/drivers/s390/char/sclp_sd.c +++ b/drivers/s390/char/sclp_sd.c @@ -5,20 +5,18 @@ * Copyright IBM Corp. 2017 */ -#define KMSG_COMPONENT "sclp_sd" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "sclp_sd: " fmt #include <linux/completion.h> +#include <linux/jiffies.h> #include <linux/kobject.h> #include <linux/list.h> #include <linux/printk.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/async.h> -#include <linux/export.h> #include <linux/mutex.h> - -#include <asm/pgalloc.h> +#include <linux/pgalloc.h> #include "sclp.h" @@ -28,6 +26,8 @@ #define SD_DI_CONFIG 3 +#define SD_TIMEOUT msecs_to_jiffies(30000) + struct sclp_sd_evbuf { struct evbuf_header hdr; u8 eq; @@ -194,6 +194,10 @@ static int sclp_sd_sync(unsigned long page, u8 eq, u8 di, u64 sat, u64 sa, struct sclp_sd_evbuf *evbuf; int rc; + if (!sclp_sd_register.sclp_send_mask || + !sclp_sd_register.sclp_receive_mask) + return -EIO; + sclp_sd_listener_init(&listener, __pa(sccb)); sclp_sd_listener_add(&listener); @@ -230,9 +234,12 @@ static int sclp_sd_sync(unsigned long page, u8 eq, u8 di, u64 sat, u64 sa, goto out; } if (!(evbuf->rflags & 0x80)) { - rc = wait_for_completion_interruptible(&listener.completion); - if (rc) + rc = wait_for_completion_interruptible_timeout(&listener.completion, SD_TIMEOUT); + if (rc == 0) + rc = -ETIME; + if (rc < 0) goto out; + rc = 0; evbuf = &listener.evbuf; } switch (evbuf->status) { @@ -319,9 +326,15 @@ static int sclp_sd_store_data(struct sclp_sd_data *result, u8 di) rc = sclp_sd_sync(page, SD_EQ_STORE_DATA, di, asce, (u64) data, &dsize, &esize); if (rc) { - /* Cancel running request if interrupted */ - if (rc == -ERESTARTSYS) - sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL); + /* Cancel running request if interrupted or timed out */ + if (rc == -ERESTARTSYS || rc == -ETIME) { + if (sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL)) { + pr_warn("Could not stop Store Data request - leaking at least %zu bytes\n", + (size_t)dsize * PAGE_SIZE); + data = NULL; + asce = 0; + } + } vfree(data); goto out; } @@ -460,7 +473,7 @@ static struct kobj_type sclp_sd_file_ktype = { * on EOF. */ static ssize_t data_read(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buffer, + const struct bin_attribute *attr, char *buffer, loff_t off, size_t size) { struct sclp_sd_file *sd_file = to_sd_file(kobj); diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index e915a343fcf5..ab8f1b758a1a 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c @@ -6,8 +6,7 @@ * Author(s): Michael Holzheu */ -#define KMSG_COMPONENT "sclp_sdias" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "sclp_sdias: " fmt #include <linux/completion.h> #include <linux/sched.h> diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 971fbb52740b..0a92d08830e7 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c @@ -48,7 +48,7 @@ static struct sclp_buffer *sclp_ttybuf; static struct timer_list sclp_tty_timer; static struct tty_port sclp_port; -static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE]; +static u8 sclp_tty_chars[SCLP_TTY_BUF_SIZE]; static unsigned short int sclp_tty_chars_count; struct tty_driver *sclp_tty_driver; @@ -168,7 +168,7 @@ sclp_tty_timeout(struct timer_list *unused) /* * Write a string to the sclp tty. */ -static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail) +static int sclp_tty_write_string(const u8 *str, int count, int may_fail) { unsigned long flags; void *page; @@ -229,8 +229,8 @@ out: * tty device. The characters may come from user space or kernel space. This * routine will return the number of characters actually accepted for writing. */ -static int -sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) +static ssize_t +sclp_tty_write(struct tty_struct *tty, const u8 *buf, size_t count) { if (sclp_tty_chars_count > 0) { sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); @@ -250,7 +250,7 @@ sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) * sclp_write() without final '\n' - will be written. */ static int -sclp_tty_put_char(struct tty_struct *tty, unsigned char ch) +sclp_tty_put_char(struct tty_struct *tty, u8 ch) { sclp_tty_chars[sclp_tty_chars_count++] = ch; if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { @@ -490,6 +490,17 @@ static const struct tty_operations sclp_ops = { .flush_buffer = sclp_tty_flush_buffer, }; +/* Release allocated pages. */ +static void __init __sclp_tty_free_pages(void) +{ + struct list_head *page, *p; + + list_for_each_safe(page, p, &sclp_tty_pages) { + list_del(page); + free_page((unsigned long)page); + } +} + static int __init sclp_tty_init(void) { @@ -499,7 +510,7 @@ sclp_tty_init(void) int rc; /* z/VM multiplexes the line mode output on the 32xx screen */ - if (MACHINE_IS_VM && !CONSOLE_IS_SCLP) + if (machine_is_vm() && !CONSOLE_IS_SCLP) return 0; if (!sclp.has_linemode) return 0; @@ -516,6 +527,7 @@ sclp_tty_init(void) for (i = 0; i < MAX_KMEM_PAGES; i++) { page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (page == NULL) { + __sclp_tty_free_pages(); tty_driver_kref_put(driver); return -ENOMEM; } @@ -524,7 +536,7 @@ sclp_tty_init(void) timer_setup(&sclp_tty_timer, sclp_tty_timeout, 0); sclp_ttybuf = NULL; sclp_tty_buffer_count = 0; - if (MACHINE_IS_VM) { + if (machine_is_vm()) { /* case input lines to lowercase */ sclp_tty_tolower = 1; } diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index a32f34a1c6d2..62979adcb381 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -231,7 +231,7 @@ sclp_vt220_emit_current(void) list_add_tail(&sclp_vt220_current_request->list, &sclp_vt220_outqueue); sclp_vt220_current_request = NULL; - del_timer(&sclp_vt220_timer); + timer_delete(&sclp_vt220_timer); } sclp_vt220_flush_later = 0; } @@ -319,7 +319,7 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request, buffer = (void *) ((addr_t) sccb + sccb->header.length); if (convertlf) { - /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/ + /* Perform Linefeed conversion (0x0a -> 0x0d 0x0a)*/ for (from=0, to=0; (from < count) && (to < sclp_vt220_space_left(request)); from++) { @@ -328,8 +328,8 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request, /* Perform conversion */ if (c == 0x0a) { if (to + 1 < sclp_vt220_space_left(request)) { - ((unsigned char *) buffer)[to++] = c; ((unsigned char *) buffer)[to++] = 0x0d; + ((unsigned char *) buffer)[to++] = c; } else break; @@ -462,8 +462,8 @@ out: * user space or kernel space. This routine will return the * number of characters actually accepted for writing. */ -static int -sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count) +static ssize_t +sclp_vt220_write(struct tty_struct *tty, const u8 *buf, size_t count) { return __sclp_vt220_write(buf, count, 1, 0, 1); } @@ -579,7 +579,7 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp) * done stuffing characters into the driver. */ static int -sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) +sclp_vt220_put_char(struct tty_struct *tty, u8 ch) { return __sclp_vt220_write(&ch, 1, 0, 0, 1); } @@ -798,7 +798,7 @@ sclp_vt220_notify(struct notifier_block *self, sclp_vt220_emit_current(); spin_lock_irqsave(&sclp_vt220_lock, flags); - del_timer(&sclp_vt220_timer); + timer_delete(&sclp_vt220_timer); while (sclp_vt220_queue_running) { spin_unlock_irqrestore(&sclp_vt220_lock, flags); sclp_sync_wait(); diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 4e5d5efa978f..3953b31b0c55 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h @@ -130,6 +130,7 @@ struct tape_request { int retries; /* retry counter for error recovery. */ int rescnt; /* residual count from devstat. */ struct timer_list timer; /* timer for std_assign_timeout(). */ + struct irb irb; /* device status */ /* Callback for delivering final status. */ void (*callback)(struct tape_request *, void *); @@ -151,8 +152,8 @@ struct tape_discipline { int (*setup_device)(struct tape_device *); void (*cleanup_device)(struct tape_device *); int (*irq)(struct tape_device *, struct tape_request *, struct irb *); - struct tape_request *(*read_block)(struct tape_device *, size_t); - struct tape_request *(*write_block)(struct tape_device *, size_t); + struct tape_request *(*read_block)(struct tape_device *); + struct tape_request *(*write_block)(struct tape_device *); void (*process_eov)(struct tape_device*); /* ioctl function for additional ioctls. */ int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long); @@ -172,7 +173,7 @@ struct tape_discipline { /* Char Frontend Data */ struct tape_char_data { - struct idal_buffer *idal_buf; /* idal buffer for user char data */ + struct idal_buffer **ibs; /* idal buffer array for user char data */ int block_size; /* of size block_size. */ }; @@ -234,6 +235,7 @@ struct tape_device { /* Externals from tape_core.c */ extern struct tape_request *tape_alloc_request(int cplength, int datasize); extern void tape_free_request(struct tape_request *); +extern int tape_check_idalbuffer(struct tape_device *device, size_t size); extern int tape_do_io(struct tape_device *, struct tape_request *); extern int tape_do_io_async(struct tape_device *, struct tape_request *); extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *); @@ -305,7 +307,9 @@ tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda) ccw->cmd_code = cmd_code; ccw->flags = CCW_FLAG_CC; ccw->count = memsize; - ccw->cda = (__u32)(addr_t) cda; + ccw->cda = 0; + if (cda) + ccw->cda = virt_to_dma32(cda); return ccw + 1; } @@ -315,7 +319,9 @@ tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda) ccw->cmd_code = cmd_code; ccw->flags = 0; ccw->count = memsize; - ccw->cda = (__u32)(addr_t) cda; + ccw->cda = 0; + if (cda) + ccw->cda = virt_to_dma32(cda); return ccw + 1; } @@ -325,7 +331,7 @@ tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code) ccw->cmd_code = cmd_code; ccw->flags = 0; ccw->count = 0; - ccw->cda = (__u32)(addr_t) &ccw->cmd_code; + ccw->cda = virt_to_dma32(&ccw->cmd_code); return ccw + 1; } @@ -336,19 +342,28 @@ tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count) ccw->cmd_code = cmd_code; ccw->flags = CCW_FLAG_CC; ccw->count = 0; - ccw->cda = (__u32)(addr_t) &ccw->cmd_code; + ccw->cda = virt_to_dma32(&ccw->cmd_code); ccw++; } return ccw; } static inline struct ccw1 * +tape_ccw_dc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal) +{ + ccw->cmd_code = cmd_code; + ccw->flags = CCW_FLAG_DC; + idal_buffer_set_cda(idal, ccw); + return ccw + 1; +} + +static inline struct ccw1 * tape_ccw_cc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal) { ccw->cmd_code = cmd_code; ccw->flags = CCW_FLAG_CC; idal_buffer_set_cda(idal, ccw); - return ccw++; + return ccw + 1; } static inline struct ccw1 * @@ -357,7 +372,7 @@ tape_ccw_end_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal) ccw->cmd_code = cmd_code; ccw->flags = 0; idal_buffer_set_cda(idal, ccw); - return ccw++; + return ccw + 1; } /* Global vars */ diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 751945fb6793..a13e0ac1a4e2 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c @@ -8,9 +8,9 @@ * Martin Schwidefsky <schwidefsky@de.ibm.com> */ -#define KMSG_COMPONENT "tape_34xx" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "tape_34xx: " fmt +#include <linux/export.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> @@ -233,31 +233,6 @@ tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb) return TAPE_IO_SUCCESS; } -/* - * Read Opposite Error Recovery Function: - * Used, when Read Forward does not work - */ -static int -tape_34xx_erp_read_opposite(struct tape_device *device, - struct tape_request *request) -{ - if (request->op == TO_RFO) { - /* - * We did read forward, but the data could not be read - * *correctly*. We transform the request to a read backward - * and try again. - */ - tape_std_read_backward(device, request); - return tape_34xx_erp_retry(request); - } - - /* - * We tried to read forward and backward, but hat no - * success -> failed. - */ - return tape_34xx_erp_failed(request, -EIO); -} - static int tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request, struct irb *irb, int no) @@ -439,9 +414,6 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request, dev_warn (&device->cdev->dev, "A write error on the " "tape cannot be recovered\n"); return tape_34xx_erp_failed(request, -EIO); - case 0x26: - /* Data Check (read opposite) occurred. */ - return tape_34xx_erp_read_opposite(device, request); case 0x28: /* ID-Mark at tape start couldn't be written */ dev_warn (&device->cdev->dev, "Writing the ID-mark " diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 0d484fe43d7e..0d80f43b175d 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c @@ -8,9 +8,9 @@ * Martin Schwidefsky <schwidefsky@de.ibm.com> */ -#define KMSG_COMPONENT "tape_3590" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "tape_3590: " fmt +#include <linux/export.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> @@ -550,31 +550,6 @@ tape_3590_mtseek(struct tape_device *device, int count) } /* - * Read Opposite Error Recovery Function: - * Used, when Read Forward does not work - */ -static void -tape_3590_read_opposite(struct tape_device *device, - struct tape_request *request) -{ - struct tape_3590_disc_data *data; - - /* - * We have allocated 4 ccws in tape_std_read, so we can now - * transform the request to a read backward, followed by a - * forward space block. - */ - request->op = TO_RBA; - tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); - data = device->discdata; - tape_ccw_cc_idal(request->cpaddr + 1, data->read_back_op, - device->char_data.idal_buf); - tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL); - tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL); - DBF_EVENT(6, "xrop ccwg\n"); -} - -/* * Read Attention Msg * This should be done after an interrupt with attention bit (0x80) * in device state. @@ -896,60 +871,6 @@ tape_3590_erp_special_interrupt(struct tape_device *device, } /* - * RDA: Read Alternate - */ -static int -tape_3590_erp_read_alternate(struct tape_device *device, - struct tape_request *request, struct irb *irb) -{ - struct tape_3590_disc_data *data; - - /* - * The issued Read Backward or Read Previous command is not - * supported by the device - * The recovery action should be to issue another command: - * Read Revious: if Read Backward is not supported - * Read Backward: if Read Previous is not supported - */ - data = device->discdata; - if (data->read_back_op == READ_PREVIOUS) { - DBF_EVENT(2, "(%08x): No support for READ_PREVIOUS command\n", - device->cdev_id); - data->read_back_op = READ_BACKWARD; - } else { - DBF_EVENT(2, "(%08x): No support for READ_BACKWARD command\n", - device->cdev_id); - data->read_back_op = READ_PREVIOUS; - } - tape_3590_read_opposite(device, request); - return tape_3590_erp_retry(device, request, irb); -} - -/* - * Error Recovery read opposite - */ -static int -tape_3590_erp_read_opposite(struct tape_device *device, - struct tape_request *request, struct irb *irb) -{ - switch (request->op) { - case TO_RFO: - /* - * We did read forward, but the data could not be read. - * We will read backward and then skip forward again. - */ - tape_3590_read_opposite(device, request); - return tape_3590_erp_retry(device, request, irb); - case TO_RBA: - /* We tried to read forward and backward, but hat no success */ - return tape_3590_erp_failed(device, request, irb, -EIO); - break; - default: - return tape_3590_erp_failed(device, request, irb, -EIO); - } -} - -/* * Print an MIM (Media Information Message) (message code f0) */ static void @@ -1347,10 +1268,6 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, tape_3590_print_era_msg(device, irb); return tape_3590_erp_read_buf_log(device, request, irb); - case 0x2011: - tape_3590_print_era_msg(device, irb); - return tape_3590_erp_read_alternate(device, request, irb); - case 0x2230: case 0x2231: tape_3590_print_era_msg(device, irb); @@ -1404,12 +1321,6 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, tape_3590_print_era_msg(device, irb); return tape_3590_erp_swap(device, request, irb); } - if (sense->rac == 0x26) { - /* Read Opposite */ - tape_3590_print_era_msg(device, irb); - return tape_3590_erp_read_opposite(device, request, - irb); - } return tape_3590_erp_basic(device, request, irb, -EIO); case 0x5020: case 0x5021: @@ -1670,7 +1581,7 @@ tape_3590_init(void) DBF_EVENT(3, "3590 init\n"); - tape_3590_wq = alloc_workqueue("tape_3590", 0, 0); + tape_3590_wq = alloc_workqueue("tape_3590", WQ_PERCPU, 0); if (!tape_3590_wq) return -ENOMEM; diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index cc8237afeffa..c5d3c303c15c 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -10,14 +10,12 @@ * Martin Schwidefsky <schwidefsky@de.ibm.com> */ -#define KMSG_COMPONENT "tape" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "tape: " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/mtio.h> -#include <linux/compat.h> #include <linux/uaccess.h> @@ -37,9 +35,6 @@ static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t static int tapechar_open(struct inode *,struct file *); static int tapechar_release(struct inode *,struct file *); static long tapechar_ioctl(struct file *, unsigned int, unsigned long); -#ifdef CONFIG_COMPAT -static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long); -#endif static const struct file_operations tape_fops = { @@ -47,12 +42,8 @@ static const struct file_operations tape_fops = .read = tapechar_read, .write = tapechar_write, .unlocked_ioctl = tapechar_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = tapechar_compat_ioctl, -#endif .open = tapechar_open, .release = tapechar_release, - .llseek = no_llseek, }; static int tapechar_major = TAPECHAR_MAJOR; @@ -65,7 +56,7 @@ tapechar_setup_device(struct tape_device * device) { char device_name[20]; - sprintf(device_name, "ntibm%i", device->first_minor / 2); + scnprintf(device_name, sizeof(device_name), "ntibm%i", device->first_minor / 2); device->nt = register_tape_dev( &device->cdev->dev, MKDEV(tapechar_major, device->first_minor), @@ -94,33 +85,6 @@ tapechar_cleanup_device(struct tape_device *device) device->nt = NULL; } -static int -tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) -{ - struct idal_buffer *new; - - if (device->char_data.idal_buf != NULL && - device->char_data.idal_buf->size == block_size) - return 0; - - if (block_size > MAX_BLOCKSIZE) { - DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n", - block_size, MAX_BLOCKSIZE); - return -EINVAL; - } - - /* The current idal buffer is not correct. Allocate a new one. */ - new = idal_buffer_alloc(block_size, 0); - if (IS_ERR(new)) - return -ENOMEM; - - if (device->char_data.idal_buf != NULL) - idal_buffer_free(device->char_data.idal_buf); - - device->char_data.idal_buf = new; - - return 0; -} /* * Tape device read function @@ -128,9 +92,12 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) static ssize_t tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) { - struct tape_device *device; struct tape_request *request; + struct ccw1 *ccw, *last_ccw; + struct tape_device *device; + struct idal_buffer **ibs; size_t block_size; + size_t read = 0; int rc; DBF_EVENT(6, "TCHAR:read\n"); @@ -157,24 +124,37 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) block_size = count; } - rc = tapechar_check_idalbuffer(device, block_size); + rc = tape_check_idalbuffer(device, block_size); if (rc) return rc; DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size); /* Let the discipline build the ccw chain. */ - request = device->discipline->read_block(device, block_size); + request = device->discipline->read_block(device); if (IS_ERR(request)) return PTR_ERR(request); /* Execute it. */ rc = tape_do_io(device, request); if (rc == 0) { - rc = block_size - request->rescnt; DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc); - /* Copy data from idal buffer to user space. */ - if (idal_buffer_to_user(device->char_data.idal_buf, - data, rc) != 0) - rc = -EFAULT; + /* Channel Program Address (cpa) points to last CCW + 8 */ + last_ccw = dma32_to_virt(request->irb.scsw.cmd.cpa); + ccw = request->cpaddr; + ibs = device->char_data.ibs; + while (++ccw < last_ccw) { + /* Copy data from idal buffer to user space. */ + if (idal_buffer_to_user(*ibs++, data, ccw->count) != 0) { + rc = -EFAULT; + break; + } + read += ccw->count; + data += ccw->count; + } + if (&last_ccw[-1] == &request->cpaddr[1] && + request->rescnt == last_ccw[-1].count) + rc = 0; + else + rc = read - request->rescnt; } tape_free_request(request); return rc; @@ -186,10 +166,12 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) static ssize_t tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) { - struct tape_device *device; struct tape_request *request; + struct ccw1 *ccw, *last_ccw; + struct tape_device *device; + struct idal_buffer **ibs; + size_t written = 0; size_t block_size; - size_t written; int nblocks; int i, rc; @@ -209,35 +191,45 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t nblocks = 1; } - rc = tapechar_check_idalbuffer(device, block_size); + rc = tape_check_idalbuffer(device, block_size); if (rc) return rc; - DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size); + DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size); DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks); /* Let the discipline build the ccw chain. */ - request = device->discipline->write_block(device, block_size); + request = device->discipline->write_block(device); if (IS_ERR(request)) return PTR_ERR(request); - rc = 0; - written = 0; + for (i = 0; i < nblocks; i++) { - /* Copy data from user space to idal buffer. */ - if (idal_buffer_from_user(device->char_data.idal_buf, - data, block_size)) { - rc = -EFAULT; - break; + size_t wbytes = 0; /* Used to trace written data in dbf */ + + ibs = device->char_data.ibs; + while (ibs && *ibs) { + if (idal_buffer_from_user(*ibs, data, (*ibs)->size)) { + rc = -EFAULT; + goto out; + } + data += (*ibs)->size; + ibs++; } rc = tape_do_io(device, request); if (rc) - break; - DBF_EVENT(6, "TCHAR:wbytes: %lx\n", - block_size - request->rescnt); - written += block_size - request->rescnt; + goto out; + + /* Channel Program Address (cpa) points to last CCW + 8 */ + last_ccw = dma32_to_virt(request->irb.scsw.cmd.cpa); + ccw = request->cpaddr; + while (++ccw < last_ccw) + wbytes += ccw->count; + DBF_EVENT(6, "TCHAR:wbytes: %lx\n", wbytes - request->rescnt); + written += wbytes - request->rescnt; if (request->rescnt != 0) break; - data += block_size; } + +out: tape_free_request(request); if (rc == -ENOSPC) { /* @@ -325,10 +317,8 @@ tapechar_release(struct inode *inode, struct file *filp) } } - if (device->char_data.idal_buf != NULL) { - idal_buffer_free(device->char_data.idal_buf); - device->char_data.idal_buf = NULL; - } + if (device->char_data.ibs) + idal_buffer_array_free(&device->char_data.ibs); tape_release(device); filp->private_data = NULL; tape_put_device(device); @@ -443,25 +433,6 @@ tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data) return rc; } -#ifdef CONFIG_COMPAT -static long -tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data) -{ - struct tape_device *device = filp->private_data; - long rc; - - if (no == MTIOCPOS32) - no = MTIOCPOS; - else if (no == MTIOCGET32) - no = MTIOCGET; - - mutex_lock(&device->mutex); - rc = __tapechar_ioctl(device, no, compat_ptr(data)); - mutex_unlock(&device->mutex); - return rc; -} -#endif /* CONFIG_COMPAT */ - /* * Initialize character device frontend. */ diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c index 277a0f903d11..6fa7b7824856 100644 --- a/drivers/s390/char/tape_class.c +++ b/drivers/s390/char/tape_class.c @@ -8,9 +8,9 @@ * Based on simple class device code by Greg K-H */ -#define KMSG_COMPONENT "tape" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "tape: " fmt +#include <linux/export.h> #include <linux/slab.h> #include "tape_class.h" @@ -22,7 +22,9 @@ MODULE_DESCRIPTION( ); MODULE_LICENSE("GPL"); -static struct class *tape_class; +static const struct class tape_class = { + .name = "tape390", +}; /* * Register a tape device and return a pointer to the cdev structure. @@ -74,7 +76,7 @@ struct tape_class_device *register_tape_dev( if (rc) goto fail_with_cdev; - tcd->class_device = device_create(tape_class, device, + tcd->class_device = device_create(&tape_class, device, tcd->char_device->dev, NULL, "%s", tcd->device_name); rc = PTR_ERR_OR_ZERO(tcd->class_device); @@ -91,7 +93,7 @@ struct tape_class_device *register_tape_dev( return tcd; fail_with_class_device: - device_destroy(tape_class, tcd->char_device->dev); + device_destroy(&tape_class, tcd->char_device->dev); fail_with_cdev: cdev_del(tcd->char_device); @@ -107,7 +109,7 @@ void unregister_tape_dev(struct device *device, struct tape_class_device *tcd) { if (tcd != NULL && !IS_ERR(tcd)) { sysfs_remove_link(&device->kobj, tcd->mode_name); - device_destroy(tape_class, tcd->char_device->dev); + device_destroy(&tape_class, tcd->char_device->dev); cdev_del(tcd->char_device); kfree(tcd); } @@ -117,15 +119,12 @@ EXPORT_SYMBOL(unregister_tape_dev); static int __init tape_init(void) { - tape_class = class_create("tape390"); - - return 0; + return class_register(&tape_class); } static void __exit tape_exit(void) { - class_destroy(tape_class); - tape_class = NULL; + class_unregister(&tape_class); } postcore_initcall(tape_init); diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index a6d2a4792185..0250076a7d9f 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c @@ -11,9 +11,9 @@ * Stefan Bader <shbader@de.ibm.com> */ -#define KMSG_COMPONENT "tape" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "tape: " fmt +#include <linux/export.h> #include <linux/module.h> #include <linux/init.h> // for kernel parameters #include <linux/kmod.h> // for requesting modules @@ -96,7 +96,7 @@ tape_medium_state_show(struct device *dev, struct device_attribute *attr, char * struct tape_device *tdev; tdev = dev_get_drvdata(dev); - return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state); + return sysfs_emit(buf, "%i\n", tdev->medium_state); } static @@ -108,7 +108,7 @@ tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *b struct tape_device *tdev; tdev = dev_get_drvdata(dev); - return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor); + return sysfs_emit(buf, "%i\n", tdev->first_minor); } static @@ -120,8 +120,8 @@ tape_state_show(struct device *dev, struct device_attribute *attr, char *buf) struct tape_device *tdev; tdev = dev_get_drvdata(dev); - return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ? - "OFFLINE" : tape_state_verbose[tdev->tape_state]); + return sysfs_emit(buf, "%s\n", (tdev->first_minor < 0) ? + "OFFLINE" : tape_state_verbose[tdev->tape_state]); } static @@ -135,17 +135,17 @@ tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf tdev = dev_get_drvdata(dev); if (tdev->first_minor < 0) - return scnprintf(buf, PAGE_SIZE, "N/A\n"); + return sysfs_emit(buf, "N/A\n"); spin_lock_irq(get_ccwdev_lock(tdev->cdev)); if (list_empty(&tdev->req_queue)) - rc = scnprintf(buf, PAGE_SIZE, "---\n"); + rc = sysfs_emit(buf, "---\n"); else { struct tape_request *req; req = list_entry(tdev->req_queue.next, struct tape_request, list); - rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]); + rc = sysfs_emit(buf, "%s\n", tape_op_verbose[req->op]); } spin_unlock_irq(get_ccwdev_lock(tdev->cdev)); return rc; @@ -161,7 +161,7 @@ tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf tdev = dev_get_drvdata(dev); - return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size); + return sysfs_emit(buf, "%i\n", tdev->char_data.block_size); } static @@ -725,6 +725,36 @@ tape_free_request (struct tape_request * request) kfree(request); } +int +tape_check_idalbuffer(struct tape_device *device, size_t size) +{ + struct idal_buffer **new; + size_t old_size = 0; + + old_size = idal_buffer_array_datasize(device->char_data.ibs); + if (old_size == size) + return 0; + + if (size > MAX_BLOCKSIZE) { + DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n", + size, MAX_BLOCKSIZE); + return -EINVAL; + } + + /* The current idal buffer is not correct. Allocate a new one. */ + new = idal_buffer_array_alloc(size, 0); + if (IS_ERR(new)) + return -ENOMEM; + + /* Free old idal buffer array */ + if (device->char_data.ibs) + idal_buffer_array_free(&device->char_data.ibs); + + device->char_data.ibs = new; + + return 0; +} + static int __tape_start_io(struct tape_device *device, struct tape_request *request) { @@ -821,7 +851,7 @@ tape_delayed_next_request(struct work_struct *work) static void tape_long_busy_timeout(struct timer_list *t) { - struct tape_device *device = from_timer(device, t, lb_timeout); + struct tape_device *device = timer_container_of(device, t, lb_timeout); struct tape_request *request; spin_lock_irq(get_ccwdev_lock(device->cdev)); @@ -1098,9 +1128,10 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) } /* May be an unsolicited irq */ - if(request != NULL) + if (request != NULL) { request->rescnt = irb->scsw.cmd.count; - else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && + memcpy(&request->irb, irb, sizeof(*irb)); + } else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && !list_empty(&device->req_queue)) { /* Not Ready to Ready after long busy ? */ struct tape_request *req; @@ -1108,7 +1139,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) struct tape_request, list); if (req->status == TAPE_REQUEST_LONG_BUSY) { DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); - if (del_timer(&device->lb_timeout)) { + if (timer_delete(&device->lb_timeout)) { tape_put_device(device); __tape_start_next_request(device); } diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c index 2238d9df6c47..a1e5fab12af2 100644 --- a/drivers/s390/char/tape_proc.c +++ b/drivers/s390/char/tape_proc.c @@ -11,8 +11,7 @@ * PROCFS Functions */ -#define KMSG_COMPONENT "tape" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "tape: " fmt #include <linux/module.h> #include <linux/vmalloc.h> diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c index f7e75d9fedf6..43a5586685ff 100644 --- a/drivers/s390/char/tape_std.c +++ b/drivers/s390/char/tape_std.c @@ -11,9 +11,9 @@ * Stefan Bader <shbader@de.ibm.com> */ -#define KMSG_COMPONENT "tape" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "tape: " fmt +#include <linux/export.h> #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/bio.h> @@ -35,7 +35,8 @@ static void tape_std_assign_timeout(struct timer_list *t) { - struct tape_request * request = from_timer(request, t, timer); + struct tape_request * request = timer_container_of(request, t, + timer); struct tape_device * device = request->device; int rc; @@ -73,7 +74,7 @@ tape_std_assign(struct tape_device *device) rc = tape_do_io_interruptible(device, request); - del_timer_sync(&request->timer); + timer_delete_sync(&request->timer); if (rc != 0) { DBF_EVENT(3, "%08x: assign failed - device might be busy\n", @@ -210,7 +211,7 @@ tape_std_mtload(struct tape_device *device, int count) int tape_std_mtsetblk(struct tape_device *device, int count) { - struct idal_buffer *new; + int rc; DBF_LH(6, "tape_std_mtsetblk(%d)\n", count); if (count <= 0) { @@ -222,26 +223,12 @@ tape_std_mtsetblk(struct tape_device *device, int count) device->char_data.block_size = 0; return 0; } - if (device->char_data.idal_buf != NULL && - device->char_data.idal_buf->size == count) - /* We already have a idal buffer of that size. */ - return 0; - if (count > MAX_BLOCKSIZE) { - DBF_EVENT(3, "Invalid block size (%d > %d) given.\n", - count, MAX_BLOCKSIZE); - return -EINVAL; - } + rc = tape_check_idalbuffer(device, count); + if (rc) + return rc; - /* Allocate a new idal buffer. */ - new = idal_buffer_alloc(count, 0); - if (IS_ERR(new)) - return -ENOMEM; - if (device->char_data.idal_buf != NULL) - idal_buffer_free(device->char_data.idal_buf); - device->char_data.idal_buf = new; device->char_data.block_size = count; - DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size); return 0; @@ -639,63 +626,54 @@ tape_std_mtcompression(struct tape_device *device, int mt_count) * Read Block */ struct tape_request * -tape_std_read_block(struct tape_device *device, size_t count) +tape_std_read_block(struct tape_device *device) { struct tape_request *request; + struct idal_buffer **ibs; + struct ccw1 *ccw; + size_t count; - /* - * We have to alloc 4 ccws in order to be able to transform request - * into a read backward request in error case. - */ - request = tape_alloc_request(4, 0); + ibs = device->char_data.ibs; + count = idal_buffer_array_size(ibs); + request = tape_alloc_request(count + 1 /* MODE_SET_DB */, 0); if (IS_ERR(request)) { DBF_EXCEPTION(6, "xrbl fail"); return request; } request->op = TO_RFO; - tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); - tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD, - device->char_data.idal_buf); + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + while (count-- > 1) + ccw = tape_ccw_dc_idal(ccw, READ_FORWARD, *ibs++); + tape_ccw_end_idal(ccw, READ_FORWARD, *ibs); + DBF_EVENT(6, "xrbl ccwg\n"); return request; } /* - * Read Block backward transformation function. - */ -void -tape_std_read_backward(struct tape_device *device, struct tape_request *request) -{ - /* - * We have allocated 4 ccws in tape_std_read, so we can now - * transform the request to a read backward, followed by a - * forward space block. - */ - request->op = TO_RBA; - tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); - tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD, - device->char_data.idal_buf); - tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL); - tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL); - DBF_EVENT(6, "xrop ccwg");} - -/* * Write Block */ struct tape_request * -tape_std_write_block(struct tape_device *device, size_t count) +tape_std_write_block(struct tape_device *device) { struct tape_request *request; + struct idal_buffer **ibs; + struct ccw1 *ccw; + size_t count; - request = tape_alloc_request(2, 0); + count = idal_buffer_array_size(device->char_data.ibs); + request = tape_alloc_request(count + 1 /* MODE_SET_DB */, 0); if (IS_ERR(request)) { DBF_EXCEPTION(6, "xwbl fail\n"); return request; } request->op = TO_WRI; - tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); - tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD, - device->char_data.idal_buf); + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + ibs = device->char_data.ibs; + while (count-- > 1) + ccw = tape_ccw_dc_idal(ccw, WRITE_CMD, *ibs++); + tape_ccw_end_idal(ccw, WRITE_CMD, *ibs); + DBF_EVENT(6, "xwbl ccwg\n"); return request; } @@ -739,6 +717,5 @@ EXPORT_SYMBOL(tape_std_mterase); EXPORT_SYMBOL(tape_std_mtunload); EXPORT_SYMBOL(tape_std_mtcompression); EXPORT_SYMBOL(tape_std_read_block); -EXPORT_SYMBOL(tape_std_read_backward); EXPORT_SYMBOL(tape_std_write_block); EXPORT_SYMBOL(tape_std_process_eov); diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h index dcc63ff587f9..2cf9f725b3b3 100644 --- a/drivers/s390/char/tape_std.h +++ b/drivers/s390/char/tape_std.h @@ -14,10 +14,9 @@ #include <asm/tape390.h> /* - * Biggest block size to handle. Currently 64K because we only build - * channel programs without data chaining. + * Biggest block size of 256K to handle. */ -#define MAX_BLOCKSIZE 65535 +#define MAX_BLOCKSIZE 262144 /* * The CCW commands for the Tape type of command. @@ -97,10 +96,10 @@ #define SENSE_TAPE_POSITIONING 0x01 /* discipline functions */ -struct tape_request *tape_std_read_block(struct tape_device *, size_t); +struct tape_request *tape_std_read_block(struct tape_device *); void tape_std_read_backward(struct tape_device *device, struct tape_request *request); -struct tape_request *tape_std_write_block(struct tape_device *, size_t); +struct tape_request *tape_std_write_block(struct tape_device *); /* Some non-mtop commands. */ int tape_std_assign(struct tape_device *); diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c index 144cd2e03590..2b83fb6dc1d7 100644 --- a/drivers/s390/char/uvdevice.c +++ b/drivers/s390/char/uvdevice.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright IBM Corp. 2022 + * Copyright IBM Corp. 2022, 2024 * Author(s): Steffen Eiden <seiden@linux.ibm.com> * * This file provides a Linux misc device to give userspace access to some @@ -40,6 +40,7 @@ static const u32 ioctl_nr_to_uvc_bit[] __initconst = { [UVIO_IOCTL_ADD_SECRET_NR] = BIT_UVC_CMD_ADD_SECRET, [UVIO_IOCTL_LIST_SECRETS_NR] = BIT_UVC_CMD_LIST_SECRETS, [UVIO_IOCTL_LOCK_SECRETS_NR] = BIT_UVC_CMD_LOCK_SECRETS, + [UVIO_IOCTL_RETR_SECRET_NR] = BIT_UVC_CMD_RETR_ATTEST, }; static_assert(ARRAY_SIZE(ioctl_nr_to_uvc_bit) == UVIO_IOCTL_NUM_IOCTLS); @@ -62,11 +63,13 @@ static void __init set_supp_uv_cmds(unsigned long *supp_uv_cmds) } /** - * uvio_uvdev_info() - get information about the uvdevice + * uvio_uvdev_info() - Get information about the uvdevice * * @uv_ioctl: ioctl control block * * Lists all IOCTLs that are supported by this uvdevice + * + * Return: 0 on success or a negative error code on error */ static int uvio_uvdev_info(struct uvio_ioctl_cb *uv_ioctl) { @@ -109,6 +112,7 @@ static int uvio_copy_attest_result_to_user(struct uv_cb_attest *uvcb_attest, struct uvio_attest *uvio_attest) { struct uvio_attest __user *user_uvio_attest = (void __user *)uv_ioctl->argument_addr; + u32 __user *user_buf_add_len = (u32 __user *)&user_uvio_attest->add_data_len; void __user *user_buf_add = (void __user *)uvio_attest->add_data_addr; void __user *user_buf_meas = (void __user *)uvio_attest->meas_addr; void __user *user_buf_uid = &user_uvio_attest->config_uid; @@ -117,6 +121,8 @@ static int uvio_copy_attest_result_to_user(struct uv_cb_attest *uvcb_attest, return -EFAULT; if (add_data && copy_to_user(user_buf_add, add_data, uvio_attest->add_data_len)) return -EFAULT; + if (put_user(uvio_attest->add_data_len, user_buf_add_len)) + return -EFAULT; if (copy_to_user(user_buf_uid, uvcb_attest->config_uid, sizeof(uvcb_attest->config_uid))) return -EFAULT; return 0; @@ -174,7 +180,7 @@ static int get_uvio_attest(struct uvio_ioctl_cb *uv_ioctl, struct uvio_attest *u * * Context: might sleep * - * Return: 0 on success or a negative error code on error. + * Return: 0 on success or a negative error code on error */ static int uvio_attestation(struct uvio_ioctl_cb *uv_ioctl) { @@ -234,7 +240,8 @@ out: return ret; } -/** uvio_add_secret() - perform an Add Secret UVC +/** + * uvio_add_secret() - Perform an Add Secret UVC * * @uv_ioctl: ioctl control block * @@ -257,7 +264,7 @@ out: * * Context: might sleep * - * Return: 0 on success or a negative error code on error. + * Return: 0 on success or a negative error code on error */ static int uvio_add_secret(struct uvio_ioctl_cb *uv_ioctl) { @@ -293,7 +300,44 @@ out: return ret; } -/** uvio_list_secrets() - perform a List Secret UVC +/* + * Do the actual secret list creation. Calls the list secrets UVC until there + * is no more space in the user buffer, or the list ends. + */ +static int uvio_get_list(void *zpage, struct uvio_ioctl_cb *uv_ioctl) +{ + const size_t data_off = offsetof(struct uv_secret_list, secrets); + u8 __user *user_buf = (u8 __user *)uv_ioctl->argument_addr; + struct uv_secret_list *list = zpage; + u16 num_secrets_stored = 0; + size_t user_off = data_off; + size_t copy_len; + + do { + uv_list_secrets(list, list->next_secret_idx, &uv_ioctl->uv_rc, + &uv_ioctl->uv_rrc); + if (uv_ioctl->uv_rc != UVC_RC_EXECUTED && + uv_ioctl->uv_rc != UVC_RC_MORE_DATA) + break; + + copy_len = sizeof(list->secrets[0]) * list->num_secr_stored; + if (copy_to_user(user_buf + user_off, list->secrets, copy_len)) + return -EFAULT; + + user_off += copy_len; + num_secrets_stored += list->num_secr_stored; + } while (uv_ioctl->uv_rc == UVC_RC_MORE_DATA && + user_off + sizeof(*list) <= uv_ioctl->argument_len); + + list->num_secr_stored = num_secrets_stored; + if (copy_to_user(user_buf, list, data_off)) + return -EFAULT; + return 0; +} + +/** + * uvio_list_secrets() - Perform a List Secret UVC + * * @uv_ioctl: ioctl control block * * uvio_list_secrets() performs the List Secret Ultravisor Call. It verifies @@ -304,45 +348,43 @@ out: * * The argument specifies the location for the result of the UV-Call. * + * Argument length must be a multiple of a page. + * The list secrets IOCTL will call the list UVC multiple times and fill + * the provided user-buffer with list elements until either the list ends or + * the buffer is full. The list header is merged over all list header from the + * individual UVCs. + * * If the List Secrets UV facility is not present, UV will return invalid * command rc. This won't be fenced in the driver and does not result in a * negative return value. * * Context: might sleep * - * Return: 0 on success or a negative error code on error. + * Return: 0 on success or a negative error code on error */ static int uvio_list_secrets(struct uvio_ioctl_cb *uv_ioctl) { - void __user *user_buf_arg = (void __user *)uv_ioctl->argument_addr; - struct uv_cb_guest_addr uvcb = { - .header.len = sizeof(uvcb), - .header.cmd = UVC_CMD_LIST_SECRETS, - }; - void *secrets = NULL; - int ret = 0; + void *zpage; + int rc; - if (uv_ioctl->argument_len != UVIO_LIST_SECRETS_LEN) + if (uv_ioctl->argument_len == 0 || + uv_ioctl->argument_len % UVIO_LIST_SECRETS_LEN != 0) return -EINVAL; - secrets = kvzalloc(UVIO_LIST_SECRETS_LEN, GFP_KERNEL); - if (!secrets) + zpage = (void *)get_zeroed_page(GFP_KERNEL); + if (!zpage) return -ENOMEM; - uvcb.addr = (u64)secrets; - uv_call_sched(0, (u64)&uvcb); - uv_ioctl->uv_rc = uvcb.header.rc; - uv_ioctl->uv_rrc = uvcb.header.rrc; - - if (copy_to_user(user_buf_arg, secrets, UVIO_LIST_SECRETS_LEN)) - ret = -EFAULT; + rc = uvio_get_list(zpage, uv_ioctl); - kvfree(secrets); - return ret; + free_page((unsigned long)zpage); + return rc; } -/** uvio_lock_secrets() - perform a Lock Secret Store UVC - * @uv_ioctl: ioctl control block +/** + * uvio_lock_secrets() - Perform a Lock Secret Store UVC + * + * @ioctl: ioctl control block * * uvio_lock_secrets() performs the Lock Secret Store Ultravisor Call. It * performs the UV-call and copies the return codes to the ioctl control block. @@ -357,7 +399,7 @@ static int uvio_list_secrets(struct uvio_ioctl_cb *uv_ioctl) * * Context: might sleep * - * Return: 0 on success or a negative error code on error. + * Return: 0 on success or a negative error code on error */ static int uvio_lock_secrets(struct uvio_ioctl_cb *ioctl) { @@ -376,6 +418,59 @@ static int uvio_lock_secrets(struct uvio_ioctl_cb *ioctl) return 0; } +/** + * uvio_retr_secret() - Perform a retrieve secret UVC + * + * @uv_ioctl: ioctl control block. + * + * uvio_retr_secret() performs the Retrieve Secret Ultravisor Call. + * The first two bytes of the argument specify the index of the secret to be + * retrieved. The retrieved secret is copied into the argument buffer if there + * is enough space. + * The argument length must be at least two bytes and at max 8192 bytes. + * + * Context: might sleep + * + * Return: 0 on success or a negative error code on error + */ +static int uvio_retr_secret(struct uvio_ioctl_cb *uv_ioctl) +{ + u16 __user *user_index = (u16 __user *)uv_ioctl->argument_addr; + struct uv_cb_retr_secr uvcb = { + .header.len = sizeof(uvcb), + .header.cmd = UVC_CMD_RETR_SECRET, + }; + u32 buf_len = uv_ioctl->argument_len; + void *buf = NULL; + int ret; + + if (buf_len > UVIO_RETR_SECRET_MAX_LEN || buf_len < sizeof(*user_index)) + return -EINVAL; + + buf = kvzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = -EFAULT; + if (get_user(uvcb.secret_idx, user_index)) + goto err; + + uvcb.buf_addr = (u64)buf; + uvcb.buf_size = buf_len; + uv_call_sched(0, (u64)&uvcb); + + if (copy_to_user((__user void *)uv_ioctl->argument_addr, buf, buf_len)) + goto err; + + ret = 0; + uv_ioctl->uv_rc = uvcb.header.rc; + uv_ioctl->uv_rrc = uvcb.header.rrc; + +err: + kvfree_sensitive(buf, buf_len); + return ret; +} + static int uvio_copy_and_check_ioctl(struct uvio_ioctl_cb *ioctl, void __user *argp, unsigned long cmd) { @@ -429,6 +524,9 @@ static long uvio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case UVIO_IOCTL_LOCK_SECRETS_NR: ret = uvio_lock_secrets(&uv_ioctl); break; + case UVIO_IOCTL_RETR_SECRET_NR: + ret = uvio_retr_secret(&uv_ioctl); + break; default: ret = -ENOIOCTLCMD; break; @@ -445,7 +543,6 @@ static long uvio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) static const struct file_operations uvio_dev_fops = { .owner = THIS_MODULE, .unlocked_ioctl = uvio_ioctl, - .llseek = no_llseek, }; static struct miscdevice uvio_dev_miscdev = { diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index eb0520a9d4af..bde6c9e59166 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -14,15 +14,14 @@ #include <linux/fs.h> #include <linux/init.h> -#include <linux/compat.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/uaccess.h> -#include <linux/export.h> #include <linux/mutex.h> #include <linux/cma.h> #include <linux/mm.h> +#include <asm/machine.h> #include <asm/cpcmd.h> #include <asm/debug.h> #include <asm/vmcp.h> @@ -52,7 +51,7 @@ early_param("vmcp_cma", early_parse_vmcp_cma); void __init vmcp_cma_reserve(void) { - if (!MACHINE_IS_VM) + if (!machine_is_vm()) return; cma_declare_contiguous(0, vmcp_cma_size, 0, 0, 0, false, "vmcp", &vmcp_cma); } @@ -204,10 +203,7 @@ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) int __user *argp; session = file->private_data; - if (is_compat_task()) - argp = compat_ptr(arg); - else - argp = (int __user *)arg; + argp = (int __user *)arg; if (mutex_lock_interruptible(&session->mutex)) return -ERESTARTSYS; switch (cmd) { @@ -241,8 +237,6 @@ static const struct file_operations vmcp_fops = { .read = vmcp_read, .write = vmcp_write, .unlocked_ioctl = vmcp_ioctl, - .compat_ioctl = vmcp_ioctl, - .llseek = no_llseek, }; static struct miscdevice vmcp_dev = { @@ -255,7 +249,7 @@ static int __init vmcp_init(void) { int ret; - if (!MACHINE_IS_VM) + if (!machine_is_vm()) return 0; vmcp_debug = debug_register("vmcp", 1, 1, 240); diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 6946ba9a9de2..383e7e2bd69f 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -11,8 +11,7 @@ * */ -#define KMSG_COMPONENT "vmlogrdr" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "vmlogrdr: " fmt #include <linux/module.h> #include <linux/init.h> @@ -23,6 +22,7 @@ #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/uaccess.h> +#include <asm/machine.h> #include <asm/cpcmd.h> #include <asm/debug.h> #include <asm/ebcdic.h> @@ -96,7 +96,6 @@ static const struct file_operations vmlogrdr_fops = { .open = vmlogrdr_open, .release = vmlogrdr_release, .read = vmlogrdr_read, - .llseek = no_llseek, }; @@ -124,7 +123,7 @@ static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); */ static struct vmlogrdr_priv_t sys_ser[] = { - { .system_service = "*LOGREC ", + { .system_service = { '*', 'L', 'O', 'G', 'R', 'E', 'C', ' ' }, .internal_name = "logrec", .recording_name = "EREP", .minor_num = 0, @@ -133,7 +132,7 @@ static struct vmlogrdr_priv_t sys_ser[] = { .autorecording = 1, .autopurge = 1, }, - { .system_service = "*ACCOUNT", + { .system_service = { '*', 'A', 'C', 'C', 'O', 'U', 'N', 'T' }, .internal_name = "account", .recording_name = "ACCOUNT", .minor_num = 1, @@ -142,7 +141,7 @@ static struct vmlogrdr_priv_t sys_ser[] = { .autorecording = 1, .autopurge = 1, }, - { .system_service = "*SYMPTOM", + { .system_service = { '*', 'S', 'Y', 'M', 'P', 'T', 'O', 'M' }, .internal_name = "symptom", .recording_name = "SYMPTOM", .minor_num = 2, @@ -255,7 +254,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, /* * The recording commands needs to be called with option QID - * for guests that have previlege classes A or B. + * for guests that have privilege classes A or B. * Purging has to be done as separate step, because recording * can't be switched on as long as records are on the queue. * Doing both at the same time doesn't work. @@ -357,7 +356,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) if (connect_rc) { pr_err("vmlogrdr: iucv connection to %s " "failed with rc %i \n", - logptr->system_service, connect_rc); + logptr->internal_name, connect_rc); goto out_path; } @@ -532,7 +531,7 @@ static ssize_t vmlogrdr_autopurge_show(struct device *dev, char *buf) { struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); - return sprintf(buf, "%u\n", priv->autopurge); + return sysfs_emit(buf, "%u\n", priv->autopurge); } @@ -557,7 +556,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev, /* * The recording command needs to be called with option QID - * for guests that have previlege classes A or B. + * for guests that have privilege classes A or B. * Other guests will not recognize the command and we have to * issue the same command without the QID parameter. */ @@ -606,7 +605,7 @@ static ssize_t vmlogrdr_autorecording_show(struct device *dev, char *buf) { struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); - return sprintf(buf, "%u\n", priv->autorecording); + return sysfs_emit(buf, "%u\n", priv->autorecording); } @@ -679,7 +678,9 @@ static const struct attribute_group *vmlogrdr_attr_groups[] = { NULL, }; -static struct class *vmlogrdr_class; +static const struct class vmlogrdr_class = { + .name = "vmlogrdr_class", +}; static struct device_driver vmlogrdr_driver = { .name = "vmlogrdr", .bus = &iucv_bus, @@ -699,12 +700,9 @@ static int vmlogrdr_register_driver(void) if (ret) goto out_iucv; - vmlogrdr_class = class_create("vmlogrdr"); - if (IS_ERR(vmlogrdr_class)) { - ret = PTR_ERR(vmlogrdr_class); - vmlogrdr_class = NULL; + ret = class_register(&vmlogrdr_class); + if (ret) goto out_driver; - } return 0; out_driver: @@ -718,8 +716,7 @@ out: static void vmlogrdr_unregister_driver(void) { - class_destroy(vmlogrdr_class); - vmlogrdr_class = NULL; + class_unregister(&vmlogrdr_class); driver_unregister(&vmlogrdr_driver); iucv_unregister(&vmlogrdr_iucv_handler, 1); } @@ -730,23 +727,9 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) struct device *dev; int ret; - dev = kzalloc(sizeof(struct device), GFP_KERNEL); - if (dev) { - dev_set_name(dev, "%s", priv->internal_name); - dev->bus = &iucv_bus; - dev->parent = iucv_root; - dev->driver = &vmlogrdr_driver; - dev->groups = vmlogrdr_attr_groups; - dev_set_drvdata(dev, priv); - /* - * The release function could be called after the - * module has been unloaded. It's _only_ task is to - * free the struct. Therefore, we specify kfree() - * directly here. (Probably a little bit obfuscating - * but legitime ...). - */ - dev->release = (void (*)(struct device *))kfree; - } else + dev = iucv_alloc_device(vmlogrdr_attr_groups, &vmlogrdr_driver, + priv, priv->internal_name); + if (!dev) return -ENOMEM; ret = device_register(dev); if (ret) { @@ -754,7 +737,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) return ret; } - priv->class_device = device_create(vmlogrdr_class, dev, + priv->class_device = device_create(&vmlogrdr_class, dev, MKDEV(vmlogrdr_major, priv->minor_num), priv, "%s", dev_name(dev)); @@ -771,7 +754,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv) { - device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num)); + device_destroy(&vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num)); if (priv->device != NULL) { device_unregister(priv->device); priv->device=NULL; @@ -826,7 +809,7 @@ static int __init vmlogrdr_init(void) int i; dev_t dev; - if (! MACHINE_IS_VM) { + if (!machine_is_vm()) { pr_err("not running under VM, driver not loaded.\n"); return -ENODEV; } diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 82efdd20ad01..e3e0e9f36527 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -9,8 +9,7 @@ * Frank Munzert <munzert@de.ibm.com> */ -#define KMSG_COMPONENT "vmur" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "vmur: " fmt #include <linux/cdev.h> #include <linux/slab.h> @@ -18,6 +17,7 @@ #include <linux/kobject.h> #include <linux/uaccess.h> +#include <asm/machine.h> #include <asm/cio.h> #include <asm/ccwdev.h> #include <asm/debug.h> @@ -48,7 +48,9 @@ MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); MODULE_LICENSE("GPL"); static dev_t ur_first_dev_maj_min; -static struct class *vmur_class; +static const struct class vmur_class = { + .name = "vmur", +}; static struct debug_info *vmur_dbf; /* We put the device's record length (for writes) in the driver_info field */ @@ -195,7 +197,7 @@ static void free_chan_prog(struct ccw1 *cpa) struct ccw1 *ptr = cpa; while (ptr->cda) { - kfree((void *)(addr_t) ptr->cda); + kfree(dma32_to_virt(ptr->cda)); ptr++; } kfree(cpa); @@ -237,7 +239,7 @@ static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, free_chan_prog(cpa); return ERR_PTR(-ENOMEM); } - cpa[i].cda = (u32)(addr_t) kbuf; + cpa[i].cda = virt_to_dma32(kbuf); if (copy_from_user(kbuf, ubuf, reclen)) { free_chan_prog(cpa); return ERR_PTR(-EFAULT); @@ -343,7 +345,7 @@ static ssize_t ur_attr_reclen_show(struct device *dev, urd = urdev_get_from_cdev(to_ccwdev(dev)); if (!urd) return -ENODEV; - rc = sprintf(buf, "%zu\n", urd->reclen); + rc = sysfs_emit(buf, "%zu\n", urd->reclen); urdev_put(urd); return rc; } @@ -912,7 +914,7 @@ static int ur_set_online(struct ccw_device *cdev) goto fail_free_cdev; } - urd->device = device_create(vmur_class, &cdev->dev, + urd->device = device_create(&vmur_class, &cdev->dev, urd->char_device->dev, NULL, "%s", node_id); if (IS_ERR(urd->device)) { rc = PTR_ERR(urd->device); @@ -958,7 +960,7 @@ static int ur_set_offline_force(struct ccw_device *cdev, int force) /* Work not run yet - need to release reference here */ urdev_put(urd); } - device_destroy(vmur_class, urd->char_device->dev); + device_destroy(&vmur_class, urd->char_device->dev); cdev_del(urd->char_device); urd->char_device = NULL; rc = 0; @@ -1007,7 +1009,7 @@ static int __init ur_init(void) int rc; dev_t dev; - if (!MACHINE_IS_VM) { + if (!machine_is_vm()) { pr_err("The %s cannot be loaded without z/VM\n", ur_banner); return -ENODEV; @@ -1022,11 +1024,9 @@ static int __init ur_init(void) debug_set_level(vmur_dbf, 6); - vmur_class = class_create("vmur"); - if (IS_ERR(vmur_class)) { - rc = PTR_ERR(vmur_class); + rc = class_register(&vmur_class); + if (rc) goto fail_free_dbf; - } rc = ccw_driver_register(&ur_driver); if (rc) @@ -1046,7 +1046,7 @@ static int __init ur_init(void) fail_unregister_driver: ccw_driver_unregister(&ur_driver); fail_class_destroy: - class_destroy(vmur_class); + class_unregister(&vmur_class); fail_free_dbf: debug_unregister(vmur_dbf); return rc; @@ -1056,7 +1056,7 @@ static void __exit ur_exit(void) { unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); ccw_driver_unregister(&ur_driver); - class_destroy(vmur_class); + class_unregister(&vmur_class); debug_unregister(vmur_dbf); pr_info("%s unloaded.\n", ur_banner); } diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index bc3be0330f1d..b26b5fca6ce8 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -9,8 +9,7 @@ * Author(s): Michael Holzheu */ -#define KMSG_COMPONENT "zdump" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zdump: " fmt #include <linux/init.h> #include <linux/slab.h> @@ -29,7 +28,6 @@ #include <asm/irqflags.h> #include <asm/checksum.h> #include <asm/os_info.h> -#include <asm/switch_to.h> #include <asm/maccess.h> #include "sclp.h" @@ -166,7 +164,6 @@ static const struct file_operations zcore_reipl_fops = { .write = zcore_reipl_write, .open = zcore_reipl_open, .release = zcore_reipl_release, - .llseek = no_llseek, }; static ssize_t zcore_hsa_read(struct file *filp, char __user *buf, @@ -201,7 +198,6 @@ static const struct file_operations zcore_hsa_fops = { .write = zcore_hsa_write, .read = zcore_hsa_read, .open = nonseekable_open, - .llseek = no_llseek, }; static int __init check_sdias(void) diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index 34967e67249e..f5c59abba221 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -9,6 +9,7 @@ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ +#include <linux/export.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/kernel_stat.h> @@ -49,8 +50,6 @@ int register_adapter_interrupt(struct airq_struct *airq) return -ENOMEM; airq->flags |= AIRQ_PTR_ALLOCATED; } - if (!airq->lsi_mask) - airq->lsi_mask = 0xff; snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq); CIO_TRACE_EVENT(4, dbf_txt); isc_register(airq->isc); @@ -92,13 +91,12 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy) struct airq_struct *airq; struct hlist_head *head; - set_cpu_flag(CIF_NOHZ_DELAY); tpi_info = &get_irq_regs()->tpi_info; trace_s390_cio_adapter_int(tpi_info); head = &airq_lists[tpi_info->isc]; rcu_read_lock(); hlist_for_each_entry_rcu(airq, head, list) - if ((*airq->lsi_ptr & airq->lsi_mask) != 0) + if (*airq->lsi_ptr != 0) airq->handler(airq, tpi_info); rcu_read_unlock(); diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 93695d535380..738d5e2d5304 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -8,8 +8,7 @@ * Arnd Bergmann (arndb@de.ibm.com) */ -#define KMSG_COMPONENT "cio" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "cio: " fmt #include <linux/init.h> #include <linux/vmalloc.h> diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index aa3292e57e38..185c99c5d4cc 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -7,6 +7,8 @@ * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) */ + +#include <linux/export.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> @@ -31,7 +33,7 @@ * to devices that use multiple subchannels. */ -static struct bus_type ccwgroup_bus_type; +static const struct bus_type ccwgroup_bus_type; static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) { @@ -39,7 +41,7 @@ static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) char str[16]; for (i = 0; i < gdev->count; i++) { - sprintf(str, "cdev%d", i); + scnprintf(str, sizeof(str), "cdev%d", i); sysfs_remove_link(&gdev->dev.kobj, str); sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); } @@ -147,7 +149,7 @@ static ssize_t ccwgroup_online_show(struct device *dev, online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0; - return scnprintf(buf, PAGE_SIZE, "%d\n", online); + return sysfs_emit(buf, "%d\n", online); } /* @@ -240,19 +242,19 @@ static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj, "group_device"); if (rc) { - for (--i; i >= 0; i--) + while (i--) sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); return rc; } } for (i = 0; i < gdev->count; i++) { - sprintf(str, "cdev%d", i); + scnprintf(str, sizeof(str), "cdev%d", i); rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj, str); if (rc) { - for (--i; i >= 0; i--) { - sprintf(str, "cdev%d", i); + while (i--) { + scnprintf(str, sizeof(str), "cdev%d", i); sysfs_remove_link(&gdev->dev.kobj, str); } for (i = 0; i < gdev->count; i++) @@ -465,7 +467,7 @@ static void ccwgroup_shutdown(struct device *dev) gdrv->shutdown(gdev); } -static struct bus_type ccwgroup_bus_type = { +static const struct bus_type ccwgroup_bus_type = { .name = "ccwgroup", .dev_groups = ccwgroup_dev_groups, .remove = ccwgroup_remove, @@ -550,4 +552,5 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev) put_device(&gdev->dev); } EXPORT_SYMBOL(ccwgroup_remove_ccwdev); +MODULE_DESCRIPTION("ccwgroup bus driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c index 73582a0a2622..763f477cc431 100644 --- a/drivers/s390/cio/ccwreq.c +++ b/drivers/s390/cio/ccwreq.c @@ -6,8 +6,7 @@ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ -#define KMSG_COMPONENT "cio" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "cio: " fmt #include <linux/types.h> #include <linux/err.h> diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 5440f285f349..c10e2444507e 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -111,8 +111,9 @@ static int s390_vary_chpid(struct chp_id chpid, int on) char dbf_text[15]; int status; - sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid, - chpid.id); + scnprintf(dbf_text, sizeof(dbf_text), + on ? "varyon%x.%02x" : "varyoff%x.%02x", + chpid.cssid, chpid.id); CIO_TRACE_EVENT(2, dbf_text); status = chp_get_status(chpid); @@ -127,10 +128,9 @@ static int s390_vary_chpid(struct chp_id chpid, int on) /* * Channel measurement related functions */ -static ssize_t chp_measurement_chars_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) +static ssize_t measurement_chars_read(struct file *filp, struct kobject *kobj, + const struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) { struct channel_path *chp; struct device *device; @@ -143,87 +143,92 @@ static ssize_t chp_measurement_chars_read(struct file *filp, return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars, sizeof(chp->cmg_chars)); } +static const BIN_ATTR_ADMIN_RO(measurement_chars, sizeof(struct cmg_chars)); -static const struct bin_attribute chp_measurement_chars_attr = { - .attr = { - .name = "measurement_chars", - .mode = S_IRUSR, - }, - .size = sizeof(struct cmg_chars), - .read = chp_measurement_chars_read, -}; - -static void chp_measurement_copy_block(struct cmg_entry *buf, - struct channel_subsystem *css, - struct chp_id chpid) +static ssize_t measurement_chars_full_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) { - void *area; - struct cmg_entry *entry, reference_buf; - int idx; + struct channel_path *chp = to_channelpath(kobj_to_dev(kobj)); - if (chpid.id < 128) { - area = css->cub_addr1; - idx = chpid.id; - } else { - area = css->cub_addr2; - idx = chpid.id - 128; - } - entry = area + (idx * sizeof(struct cmg_entry)); - do { - memcpy(buf, entry, sizeof(*entry)); - memcpy(&reference_buf, entry, sizeof(*entry)); - } while (reference_buf.values[0] != buf->values[0]); + return memory_read_from_buffer(buf, count, &off, &chp->cmcb, + sizeof(chp->cmcb)); } +static BIN_ATTR_ADMIN_RO(measurement_chars_full, sizeof(struct cmg_cmcb)); -static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) +static ssize_t chp_measurement_copy_block(void *buf, loff_t off, size_t count, + struct kobject *kobj, bool extended) { struct channel_path *chp; struct channel_subsystem *css; struct device *device; unsigned int size; + void *area, *entry; + int id, idx; device = kobj_to_dev(kobj); chp = to_channelpath(device); css = to_css(chp->dev.parent); + id = chp->chpid.id; - size = sizeof(struct cmg_entry); + if (extended) { + /* Check if extended measurement data is available. */ + if (!chp->extended) + return 0; + + size = sizeof(struct cmg_ext_entry); + area = css->ecub[id / CSS_ECUES_PER_PAGE]; + idx = id % CSS_ECUES_PER_PAGE; + } else { + size = sizeof(struct cmg_entry); + area = css->cub[id / CSS_CUES_PER_PAGE]; + idx = id % CSS_CUES_PER_PAGE; + } + entry = area + (idx * size); /* Only allow single reads. */ if (off || count < size) return 0; - chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid); - count = size; - return count; + + memcpy(buf, entry, size); + + return size; +} + +static ssize_t measurement_read(struct file *filp, struct kobject *kobj, + const struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + return chp_measurement_copy_block(buf, off, count, kobj, false); } +static const BIN_ATTR_ADMIN_RO(measurement, sizeof(struct cmg_entry)); -static const struct bin_attribute chp_measurement_attr = { - .attr = { - .name = "measurement", - .mode = S_IRUSR, - }, - .size = sizeof(struct cmg_entry), - .read = chp_measurement_read, +static ssize_t ext_measurement_read(struct file *filp, struct kobject *kobj, + const struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + return chp_measurement_copy_block(buf, off, count, kobj, true); +} +static const BIN_ATTR_ADMIN_RO(ext_measurement, sizeof(struct cmg_ext_entry)); + +static const struct bin_attribute *measurement_attrs[] = { + &bin_attr_measurement_chars, + &bin_attr_measurement_chars_full, + &bin_attr_measurement, + &bin_attr_ext_measurement, + NULL, }; +BIN_ATTRIBUTE_GROUPS(measurement); void chp_remove_cmg_attr(struct channel_path *chp) { - device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); - device_remove_bin_file(&chp->dev, &chp_measurement_attr); + device_remove_groups(&chp->dev, measurement_groups); } int chp_add_cmg_attr(struct channel_path *chp) { - int ret; - - ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); - if (ret) - return ret; - ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); - if (ret) - device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); - return ret; + return device_add_groups(&chp->dev, measurement_groups); } /* @@ -239,7 +244,7 @@ static ssize_t chp_status_show(struct device *dev, status = chp->state; mutex_unlock(&chp->lock); - return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n"); + return status ? sysfs_emit(buf, "online\n") : sysfs_emit(buf, "offline\n"); } static ssize_t chp_status_write(struct device *dev, @@ -320,7 +325,7 @@ static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr, mutex_lock(&chp->lock); type = chp->desc.desc; mutex_unlock(&chp->lock); - return sprintf(buf, "%x\n", type); + return sysfs_emit(buf, "%x\n", type); } static DEVICE_ATTR(type, 0444, chp_type_show, NULL); @@ -333,8 +338,8 @@ static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr, if (!chp) return 0; if (chp->cmg == -1) /* channel measurements not available */ - return sprintf(buf, "unknown\n"); - return sprintf(buf, "%x\n", chp->cmg); + return sysfs_emit(buf, "unknown\n"); + return sysfs_emit(buf, "%d\n", chp->cmg); } static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); @@ -347,8 +352,8 @@ static ssize_t chp_shared_show(struct device *dev, if (!chp) return 0; if (chp->shared == -1) /* channel measurements not available */ - return sprintf(buf, "unknown\n"); - return sprintf(buf, "%x\n", chp->shared); + return sysfs_emit(buf, "unknown\n"); + return sysfs_emit(buf, "%x\n", chp->shared); } static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); @@ -361,7 +366,7 @@ static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr, mutex_lock(&chp->lock); if (chp->desc_fmt1.flags & 0x10) - rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid); + rc = sysfs_emit(buf, "%04x\n", chp->desc_fmt1.chid); else rc = 0; mutex_unlock(&chp->lock); @@ -378,7 +383,7 @@ static ssize_t chp_chid_external_show(struct device *dev, mutex_lock(&chp->lock); if (chp->desc_fmt1.flags & 0x10) - rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0); + rc = sysfs_emit(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0); else rc = 0; mutex_unlock(&chp->lock); @@ -394,15 +399,44 @@ static ssize_t chp_esc_show(struct device *dev, ssize_t rc; mutex_lock(&chp->lock); - rc = sprintf(buf, "%x\n", chp->desc_fmt1.esc); + rc = sysfs_emit(buf, "%x\n", chp->desc_fmt1.esc); mutex_unlock(&chp->lock); return rc; } static DEVICE_ATTR(esc, 0444, chp_esc_show, NULL); +static char apply_max_suffix(unsigned long *value, unsigned long base) +{ + static char suffixes[] = { 0, 'K', 'M', 'G', 'T' }; + int i; + + for (i = 0; i < ARRAY_SIZE(suffixes) - 1; i++) { + if (*value < base || *value % base != 0) + break; + *value /= base; + } + + return suffixes[i]; +} + +static ssize_t speed_bps_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + unsigned long speed = chp->speed; + char suffix; + + suffix = apply_max_suffix(&speed, 1000); + + return suffix ? sysfs_emit(buf, "%lu%c\n", speed, suffix) : + sysfs_emit(buf, "%lu\n", speed); +} + +static DEVICE_ATTR_RO(speed_bps); + static ssize_t util_string_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct channel_path *chp = to_channelpath(kobj_to_dev(kobj)); @@ -415,10 +449,10 @@ static ssize_t util_string_read(struct file *filp, struct kobject *kobj, return rc; } -static BIN_ATTR_RO(util_string, - sizeof(((struct channel_path_desc_fmt3 *)0)->util_str)); +static const BIN_ATTR_RO(util_string, + sizeof(((struct channel_path_desc_fmt3 *)0)->util_str)); -static struct bin_attribute *chp_bin_attrs[] = { +static const struct bin_attribute *const chp_bin_attrs[] = { &bin_attr_util_string, NULL, }; @@ -432,9 +466,10 @@ static struct attribute *chp_attrs[] = { &dev_attr_chid.attr, &dev_attr_chid_external.attr, &dev_attr_esc.attr, + &dev_attr_speed_bps.attr, NULL, }; -static struct attribute_group chp_attr_group = { +static const struct attribute_group chp_attr_group = { .attrs = chp_attrs, .bin_attrs = chp_bin_attrs, }; @@ -661,7 +696,8 @@ static int info_update(void) if (time_after(jiffies, chp_info_expires)) { /* Data is too old, update. */ rc = sclp_chp_read_info(&chp_info); - chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ; + if (!rc) + chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL; } mutex_unlock(&info_lock); diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index 7ee9eba0abcb..391b52a7474c 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h @@ -51,7 +51,10 @@ struct channel_path { /* Channel-measurement related stuff: */ int cmg; int shared; + int extended; + unsigned long speed; struct cmg_chars cmg_chars; + struct cmg_cmcb cmcb; }; /* Return channel_path struct for given chpid. */ diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 0abd77f4b664..fbb58edd6274 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -8,9 +8,9 @@ * Arnd Bergmann (arndb@de.ibm.com) */ -#define KMSG_COMPONENT "cio" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "cio: " fmt +#include <linux/export.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> @@ -24,7 +24,6 @@ #include <asm/crw.h> #include <asm/isc.h> #include <asm/ebcdic.h> -#include <asm/ap.h> #include "css.h" #include "cio.h" @@ -40,6 +39,20 @@ static DEFINE_SPINLOCK(chsc_page_lock); #define SEI_VF_FLA 0xc0 /* VF flag for Full Link Address */ #define SEI_RS_CHPID 0x4 /* 4 in RS field indicates CHPID */ +static BLOCKING_NOTIFIER_HEAD(chsc_notifiers); + +int chsc_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&chsc_notifiers, nb); +} +EXPORT_SYMBOL(chsc_notifier_register); + +int chsc_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&chsc_notifiers, nb); +} +EXPORT_SYMBOL(chsc_notifier_unregister); + /** * chsc_error_from_response() - convert a chsc response to an error * @response: chsc response code @@ -191,7 +204,7 @@ EXPORT_SYMBOL_GPL(chsc_ssqd); * Returns 0 on success. */ int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, - u64 summary_indicator_addr, u64 subchannel_indicator_addr, u8 isc) + dma64_t summary_indicator_addr, dma64_t subchannel_indicator_addr, u8 isc) { memset(scssc, 0, sizeof(*scssc)); scssc->request.length = 0x0fe0; @@ -219,16 +232,16 @@ EXPORT_SYMBOL_GPL(chsc_sadc); static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) { - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); if (sch->driver && sch->driver->chp_event) if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) goto out_unreg; - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); return 0; out_unreg: sch->lpm = 0; - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); css_schedule_eval(sch->schid); return 0; } @@ -239,7 +252,7 @@ void chsc_chp_offline(struct chp_id chpid) struct chp_link link; char dbf_txt[15]; - sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); + scnprintf(dbf_txt, sizeof(dbf_txt), "chpr%x.%02x", chpid.cssid, chpid.id); CIO_TRACE_EVENT(2, dbf_txt); if (chp_get_status(chpid) <= 0) @@ -258,10 +271,10 @@ void chsc_chp_offline(struct chp_id chpid) static int __s390_process_res_acc(struct subchannel *sch, void *data) { - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); if (sch->driver && sch->driver->chp_event) sch->driver->chp_event(sch, data, CHP_ONLINE); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); return 0; } @@ -270,11 +283,11 @@ static void s390_process_res_acc(struct chp_link *link) { char dbf_txt[15]; - sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, - link->chpid.id); + scnprintf(dbf_txt, sizeof(dbf_txt), "accpr%x.%02x", link->chpid.cssid, + link->chpid.id); CIO_TRACE_EVENT( 2, dbf_txt); if (link->fla != 0) { - sprintf(dbf_txt, "fla%x", link->fla); + scnprintf(dbf_txt, sizeof(dbf_txt), "fla%x", link->fla); CIO_TRACE_EVENT( 2, dbf_txt); } /* Wait until previous actions have settled. */ @@ -292,10 +305,10 @@ static void s390_process_res_acc(struct chp_link *link) static int process_fces_event(struct subchannel *sch, void *data) { - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); if (sch->driver && sch->driver->chp_event) sch->driver->chp_event(sch, data, CHP_FCES_EVENT); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); return 0; } @@ -363,7 +376,7 @@ struct lir { #define PARAMS_LEN 10 /* PARAMS=xx,xxxxxx */ #define NODEID_LEN 35 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */ -/* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */ +/* Copy EBCDIC text, convert to ASCII and optionally add delimiter. */ static char *store_ebcdic(char *dest, const char *src, unsigned long len, char delim) { @@ -393,8 +406,8 @@ static void format_node_data(char *params, char *id, struct node_descriptor *nd) memset(id, 0, NODEID_LEN); if (nd->validity != ND_VALIDITY_VALID) { - strncpy(params, "n/a", PARAMS_LEN - 1); - strncpy(id, "n/a", NODEID_LEN - 1); + strscpy(params, "n/a", PARAMS_LEN); + strscpy(id, "n/a", NODEID_LEN); return; } @@ -581,7 +594,8 @@ static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area) if (sei_area->rs != 5) return; - ap_bus_cfg_chg(); + blocking_notifier_call_chain(&chsc_notifiers, + CHSC_NOTIFY_AP_CFG, NULL); } static void chsc_process_sei_fces_event(struct chsc_sei_nt0_area *sei_area) @@ -742,7 +756,7 @@ void chsc_chp_online(struct chp_id chpid) struct chp_link link; char dbf_txt[15]; - sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); + scnprintf(dbf_txt, sizeof(dbf_txt), "cadd%x.%02x", chpid.cssid, chpid.id); CIO_TRACE_EVENT(2, dbf_txt); if (chp_get_status(chpid) != 0) { @@ -769,11 +783,11 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch, memset(&link, 0, sizeof(struct chp_link)); link.chpid = chpid; - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); if (sch->driver && sch->driver->chp_event) sch->driver->chp_event(sch, &link, on ? CHP_VARY_ON : CHP_VARY_OFF); - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); } static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) @@ -844,7 +858,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css) } return ret; cleanup: - for (--i; i >= 0; i--) { + while (i--) { if (!css->chps[i]) continue; chp_remove_cmg_attr(css->chps[i]); @@ -857,22 +871,22 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable) struct { struct chsc_header request; u32 operation_code : 2; - u32 : 30; + u32 : 1; + u32 e : 1; + u32 : 28; u32 key : 4; u32 : 28; - u32 zeroes1; - u32 cub_addr1; - u32 zeroes2; - u32 cub_addr2; - u32 reserved[13]; + dma64_t cub[CSS_NUM_CUB_PAGES]; + dma64_t ecub[CSS_NUM_ECUB_PAGES]; + u32 reserved[5]; struct chsc_header response; u32 status : 8; u32 : 4; u32 fmt : 4; u32 : 16; - } *secm_area; + } __packed *secm_area; unsigned long flags; - int ret, ccode; + int ret, ccode, i; spin_lock_irqsave(&chsc_page_lock, flags); memset(chsc_page, 0, PAGE_SIZE); @@ -881,8 +895,12 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable) secm_area->request.code = 0x0016; secm_area->key = PAGE_DEFAULT_KEY >> 4; - secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; - secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; + secm_area->e = 1; + + for (i = 0; i < CSS_NUM_CUB_PAGES; i++) + secm_area->cub[i] = (__force dma64_t)virt_to_dma32(css->cub[i]); + for (i = 0; i < CSS_NUM_ECUB_PAGES; i++) + secm_area->ecub[i] = virt_to_dma64(css->ecub[i]); secm_area->operation_code = enable ? 0 : 1; @@ -908,19 +926,47 @@ out: return ret; } +static int cub_alloc(struct channel_subsystem *css) +{ + int i; + + for (i = 0; i < CSS_NUM_CUB_PAGES; i++) { + css->cub[i] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!css->cub[i]) + return -ENOMEM; + } + for (i = 0; i < CSS_NUM_ECUB_PAGES; i++) { + css->ecub[i] = (void *)get_zeroed_page(GFP_KERNEL); + if (!css->ecub[i]) + return -ENOMEM; + } + + return 0; +} + +static void cub_free(struct channel_subsystem *css) +{ + int i; + + for (i = 0; i < CSS_NUM_CUB_PAGES; i++) { + free_page((unsigned long)css->cub[i]); + css->cub[i] = NULL; + } + for (i = 0; i < CSS_NUM_ECUB_PAGES; i++) { + free_page((unsigned long)css->ecub[i]); + css->ecub[i] = NULL; + } +} + int chsc_secm(struct channel_subsystem *css, int enable) { int ret; if (enable && !css->cm_enabled) { - css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!css->cub_addr1 || !css->cub_addr2) { - free_page((unsigned long)css->cub_addr1); - free_page((unsigned long)css->cub_addr2); - return -ENOMEM; - } + ret = cub_alloc(css); + if (ret) + goto out; } ret = __chsc_do_secm(css, enable); if (!ret) { @@ -934,10 +980,11 @@ chsc_secm(struct channel_subsystem *css, int enable) } else chsc_remove_cmg_attr(css); } - if (!css->cm_enabled) { - free_page((unsigned long)css->cub_addr1); - free_page((unsigned long)css->cub_addr2); - } + +out: + if (!css->cm_enabled) + cub_free(css); + return ret; } @@ -1019,6 +1066,18 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, } } +static unsigned long scmc_get_speed(u32 s, u32 p) +{ + unsigned long speed = s; + + if (!p) + p = 8; + while (p--) + speed *= 10; + + return speed; +} + int chsc_get_channel_measurement_chars(struct channel_path *chp) { unsigned long flags; @@ -1033,20 +1092,13 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) u32 zeroes1; struct chsc_header response; u32 zeroes2; - u32 not_valid : 1; - u32 shared : 1; - u32 : 22; - u32 chpid : 8; - u32 cmcv : 5; - u32 : 11; - u32 cmgq : 8; - u32 cmg : 8; - u32 zeroes3; - u32 data[NR_MEASUREMENT_CHARS]; + struct cmg_cmcb cmcb; } *scmc_area; chp->shared = -1; chp->cmg = -1; + chp->extended = 0; + chp->speed = 0; if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm) return -EINVAL; @@ -1071,17 +1123,16 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) scmc_area->response.code); goto out; } - if (scmc_area->not_valid) + chp->cmcb = scmc_area->cmcb; + if (scmc_area->cmcb.not_valid) goto out; - chp->cmg = scmc_area->cmg; - chp->shared = scmc_area->shared; - if (chp->cmg != 2 && chp->cmg != 3) { - /* No cmg-dependent data. */ - goto out; - } - chsc_initialize_cmg_chars(chp, scmc_area->cmcv, - (struct cmg_chars *) &scmc_area->data); + chp->cmg = scmc_area->cmcb.cmg; + chp->shared = scmc_area->cmcb.shared; + chp->extended = scmc_area->cmcb.extended; + chp->speed = scmc_get_speed(scmc_area->cmcb.cmgs, scmc_area->cmcb.cmgp); + chsc_initialize_cmg_chars(chp, scmc_area->cmcb.cmcv, + (struct cmg_chars *)&scmc_area->cmcb.data); out: spin_unlock_irqrestore(&chsc_page_lock, flags); return ret; @@ -1091,8 +1142,8 @@ int __init chsc_init(void) { int ret; - sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + sei_page = (void *)get_zeroed_page(GFP_KERNEL); + chsc_page = (void *)get_zeroed_page(GFP_KERNEL); if (!sei_page || !chsc_page) { ret = -ENOMEM; goto out_err; diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index d1caacb08e67..6fe983ebf4b3 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -17,11 +17,32 @@ struct cmg_chars { u32 values[NR_MEASUREMENT_CHARS]; }; +struct cmg_cmcb { + u32 not_valid : 1; + u32 shared : 1; + u32 extended : 1; + u32 : 21; + u32 chpid : 8; + u32 cmcv : 5; + u32 : 7; + u32 cmgp : 4; + u32 cmgq : 8; + u32 cmg : 8; + u32 : 16; + u32 cmgs : 16; + u32 data[NR_MEASUREMENT_CHARS]; +}; + #define NR_MEASUREMENT_ENTRIES 8 struct cmg_entry { u32 values[NR_MEASUREMENT_ENTRIES]; }; +#define NR_EXT_MEASUREMENT_ENTRIES 16 +struct cmg_ext_entry { + u32 values[NR_EXT_MEASUREMENT_ENTRIES]; +}; + struct channel_path_desc_fmt1 { u8 flags; u8 lsn; @@ -91,8 +112,8 @@ struct chsc_scssc_area { u16:16; u32:32; u32:32; - u64 summary_indicator_addr; - u64 subchannel_indicator_addr; + dma64_t summary_indicator_addr; + dma64_t subchannel_indicator_addr; u32 ks:4; u32 kc:4; u32:21; @@ -164,7 +185,7 @@ void chsc_chp_offline(struct chp_id chpid); int chsc_get_channel_measurement_chars(struct channel_path *chp); int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd); int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, - u64 summary_indicator_addr, u64 subchannel_indicator_addr, + dma64_t summary_indicator_addr, dma64_t subchannel_indicator_addr, u8 isc); int chsc_sgib(u32 origin); int chsc_error_from_response(int response); diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 180ab899289c..ce992b2107cb 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -9,7 +9,6 @@ */ #include <linux/slab.h> -#include <linux/compat.h> #include <linux/device.h> #include <linux/io.h> #include <linux/module.h> @@ -211,10 +210,10 @@ static int chsc_async(struct chsc_async_area *chsc_area, chsc_area->header.key = PAGE_DEFAULT_KEY >> 4; while ((sch = chsc_get_next_subchannel(sch))) { - spin_lock(sch->lock); + spin_lock(&sch->lock); private = dev_get_drvdata(&sch->dev); if (private->request) { - spin_unlock(sch->lock); + spin_unlock(&sch->lock); ret = -EBUSY; continue; } @@ -239,7 +238,7 @@ static int chsc_async(struct chsc_async_area *chsc_area, default: ret = -ENODEV; } - spin_unlock(sch->lock); + spin_unlock(&sch->lock); CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n", sch->schid.ssid, sch->schid.sch_no, cc); if (ret == -EINPROGRESS) @@ -293,7 +292,7 @@ static int chsc_ioctl_start(void __user *user_area) if (!css_general_characteristics.dynio) /* It makes no sense to try. */ return -EOPNOTSUPP; - chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); + chsc_area = (void *)get_zeroed_page(GFP_KERNEL); if (!chsc_area) return -ENOMEM; request = kzalloc(sizeof(*request), GFP_KERNEL); @@ -341,7 +340,7 @@ static int chsc_ioctl_on_close_set(void __user *user_area) ret = -ENOMEM; goto out_unlock; } - on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); + on_close_chsc_area = (void *)get_zeroed_page(GFP_KERNEL); if (!on_close_chsc_area) { ret = -ENOMEM; goto out_free_request; @@ -393,7 +392,7 @@ static int chsc_ioctl_start_sync(void __user *user_area) struct chsc_sync_area *chsc_area; int ret, ccode; - chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + chsc_area = (void *)get_zeroed_page(GFP_KERNEL); if (!chsc_area) return -ENOMEM; if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { @@ -439,7 +438,7 @@ static int chsc_ioctl_info_channel_path(void __user *user_cd) u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *scpcd_area; - scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + scpcd_area = (void *)get_zeroed_page(GFP_KERNEL); if (!scpcd_area) return -ENOMEM; cd = kzalloc(sizeof(*cd), GFP_KERNEL); @@ -501,7 +500,7 @@ static int chsc_ioctl_info_cu(void __user *user_cd) u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *scucd_area; - scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + scucd_area = (void *)get_zeroed_page(GFP_KERNEL); if (!scucd_area) return -ENOMEM; cd = kzalloc(sizeof(*cd), GFP_KERNEL); @@ -564,7 +563,7 @@ static int chsc_ioctl_info_sch_cu(void __user *user_cud) u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *sscud_area; - sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + sscud_area = (void *)get_zeroed_page(GFP_KERNEL); if (!sscud_area) return -ENOMEM; cud = kzalloc(sizeof(*cud), GFP_KERNEL); @@ -626,7 +625,7 @@ static int chsc_ioctl_conf_info(void __user *user_ci) u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *sci_area; - sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + sci_area = (void *)get_zeroed_page(GFP_KERNEL); if (!sci_area) return -ENOMEM; ci = kzalloc(sizeof(*ci), GFP_KERNEL); @@ -697,7 +696,7 @@ static int chsc_ioctl_conf_comp_list(void __user *user_ccl) u32 res; } __attribute__ ((packed)) *cssids_parm; - sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + sccl_area = (void *)get_zeroed_page(GFP_KERNEL); if (!sccl_area) return -ENOMEM; ccl = kzalloc(sizeof(*ccl), GFP_KERNEL); @@ -757,7 +756,7 @@ static int chsc_ioctl_chpd(void __user *user_chpd) int ret; chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); - scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + scpd_area = (void *)get_zeroed_page(GFP_KERNEL); if (!scpd_area || !chpd) { ret = -ENOMEM; goto out_free; @@ -797,7 +796,7 @@ static int chsc_ioctl_dcal(void __user *user_dcal) u8 data[PAGE_SIZE - 36]; } __attribute__ ((packed)) *sdcal_area; - sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + sdcal_area = (void *)get_zeroed_page(GFP_KERNEL); if (!sdcal_area) return -ENOMEM; dcal = kzalloc(sizeof(*dcal), GFP_KERNEL); @@ -845,10 +844,7 @@ static long chsc_ioctl(struct file *filp, unsigned int cmd, void __user *argp; CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd); - if (is_compat_task()) - argp = compat_ptr(arg); - else - argp = (void __user *)arg; + argp = (void __user *)arg; switch (cmd) { case CHSC_START: return chsc_ioctl_start(argp); @@ -923,8 +919,6 @@ static const struct file_operations chsc_fops = { .open = chsc_open, .release = chsc_release, .unlocked_ioctl = chsc_ioctl, - .compat_ioctl = chsc_ioctl, - .llseek = no_llseek, }; static struct miscdevice chsc_misc_device = { diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 6127add746d1..70dc8cc76594 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -9,9 +9,9 @@ * Martin Schwidefsky (schwidefsky@de.ibm.com) */ -#define KMSG_COMPONENT "cio" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "cio: " fmt +#include <linux/export.h> #include <linux/ftrace.h> #include <linux/module.h> #include <linux/init.h> @@ -112,7 +112,7 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) if (cio_update_schib(sch)) return -ENODEV; - sprintf(dbf_text, "no%s", dev_name(&sch->dev)); + scnprintf(dbf_text, sizeof(dbf_text), "no%s", dev_name(&sch->dev)); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); @@ -148,7 +148,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ orb->cmd.i2k = 0; orb->cmd.key = key >> 4; /* issue "Start Subchannel" */ - orb->cmd.cpa = (u32)virt_to_phys(cpa); + orb->cmd.cpa = virt_to_dma32(cpa); ccode = ssch(sch->schid, orb); /* process condition code */ @@ -459,10 +459,14 @@ int cio_update_schib(struct subchannel *sch) { struct schib schib; - if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) + if (stsch(sch->schid, &schib)) return -ENODEV; memcpy(&sch->schib, &schib, sizeof(schib)); + + if (!css_sch_is_valid(&schib)) + return -EACCES; + return 0; } EXPORT_SYMBOL_GPL(cio_update_schib); @@ -535,7 +539,6 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy) struct subchannel *sch; struct irb *irb; - set_cpu_flag(CIF_NOHZ_DELAY); tpi_info = &get_irq_regs()->tpi_info; trace_s390_cio_interrupt(tpi_info); irb = this_cpu_ptr(&cio_irb); @@ -546,7 +549,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy) return IRQ_HANDLED; } sch = phys_to_virt(tpi_info->intparm); - spin_lock(sch->lock); + spin_lock(&sch->lock); /* Store interrupt response block to lowcore. */ if (tsch(tpi_info->schid, irb) == 0) { /* Keep subchannel information word up to date. */ @@ -558,7 +561,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy) inc_irq_stat(IRQIO_CIO); } else inc_irq_stat(IRQIO_CIO); - spin_unlock(sch->lock); + spin_unlock(&sch->lock); return IRQ_HANDLED; } @@ -663,7 +666,7 @@ struct subchannel *cio_probe_console(void) if (IS_ERR(sch)) return sch; - lockdep_set_class(sch->lock, &console_sch_key); + lockdep_set_class(&sch->lock, &console_sch_key); isc_register(CONSOLE_ISC); sch->config.isc = CONSOLE_ISC; sch->config.intparm = (u32)virt_to_phys(sch); @@ -717,7 +720,7 @@ int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key) orb->tm.key = key >> 4; orb->tm.b = 1; orb->tm.lpm = lpm ? lpm : sch->lpm; - orb->tm.tcw = (u32)virt_to_phys(tcw); + orb->tm.tcw = virt_to_dma32(tcw); cc = ssch(sch->schid, orb); switch (cc) { case 0: diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index fa8df50bb49e..08a5e9380e75 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -19,7 +19,7 @@ struct pmcw { u32 intparm; /* interruption parameter */ u32 qf : 1; /* qdio facility */ u32 w : 1; - u32 isc : 3; /* interruption sublass */ + u32 isc : 3; /* interruption subclass */ u32 res5 : 3; /* reserved zeros */ u32 ena : 1; /* enabled */ u32 lm : 2; /* limit mode */ @@ -83,7 +83,7 @@ enum sch_todo { /* subchannel data structure used by I/O subroutines */ struct subchannel { struct subchannel_id schid; - spinlock_t *lock; /* subchannel lock */ + spinlock_t lock; /* subchannel lock */ struct mutex reg_mutex; enum { SUBCHANNEL_TYPE_IO = 0, diff --git a/drivers/s390/cio/cio_inject.c b/drivers/s390/cio/cio_inject.c index 8613fa937237..0e18cb921ef6 100644 --- a/drivers/s390/cio/cio_inject.c +++ b/drivers/s390/cio/cio_inject.c @@ -6,8 +6,7 @@ * Author(s): Vineeth Vijayan <vneethv@linux.ibm.com> */ -#define KMSG_COMPONENT "cio" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "cio: " fmt #include <linux/slab.h> #include <linux/spinlock.h> @@ -95,7 +94,7 @@ static ssize_t crw_inject_write(struct file *file, const char __user *buf, return -EINVAL; } - buffer = vmemdup_user(buf, lbuf); + buffer = memdup_user_nul(buf, lbuf); if (IS_ERR(buffer)) return -ENOMEM; diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 5584aa46c94e..7d035e4937ce 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -10,8 +10,7 @@ * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com> */ -#define KMSG_COMPONENT "cio" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "cio: " fmt #include <linux/memblock.h> #include <linux/device.h> @@ -46,7 +45,7 @@ /* indices for READCMB */ enum cmb_index { avg_utilization = -1, - /* basic and exended format: */ + /* basic and extended format: */ cmb_ssch_rsch_count = 0, cmb_sample_count, cmb_device_connect_time, @@ -135,7 +134,7 @@ static inline u64 time_to_nsec(u32 value) * Users are usually interested in average times, * not accumulated time. * This also helps us with atomicity problems - * when reading sinlge values. + * when reading single values. */ static inline u64 time_to_avg_nsec(u32 value, u32 count) { @@ -167,9 +166,10 @@ static inline void cmf_activate(void *area, unsigned int onoff) asm volatile( " lgr 1,%[r1]\n" " lgr 2,%[mbo]\n" - " schm\n" + " schm" : - : [r1] "d" ((unsigned long)onoff), [mbo] "d" (area) + : [r1] "d" ((unsigned long)onoff), + [mbo] "d" (virt_to_phys(area)) : "1", "2"); } @@ -501,8 +501,7 @@ static int alloc_cmb(struct ccw_device *cdev) WARN_ON(!list_empty(&cmb_area.list)); spin_unlock(&cmb_area.lock); - mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA, - get_order(size)); + mem = (void *)__get_free_pages(GFP_KERNEL, get_order(size)); spin_lock(&cmb_area.lock); if (cmb_area.mem) { @@ -977,8 +976,7 @@ static struct cmb_operations cmbops_extended = { static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx) { - return sprintf(buf, "%lld\n", - (unsigned long long) cmf_read(to_ccwdev(dev), idx)); + return sysfs_emit(buf, "%lld\n", cmf_read(to_ccwdev(dev), idx)); } static ssize_t cmb_show_avg_sample_interval(struct device *dev, @@ -998,7 +996,7 @@ static ssize_t cmb_show_avg_sample_interval(struct device *dev, } else interval = -1; spin_unlock_irq(cdev->ccwlock); - return sprintf(buf, "%ld\n", interval); + return sysfs_emit(buf, "%ld\n", interval); } static ssize_t cmb_show_avg_utilization(struct device *dev, @@ -1007,7 +1005,7 @@ static ssize_t cmb_show_avg_utilization(struct device *dev, { unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization); - return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10); + return sysfs_emit(buf, "%02lu.%01lu%%\n", u / 10, u % 10); } #define cmf_attr(name) \ @@ -1080,7 +1078,7 @@ static ssize_t cmb_enable_show(struct device *dev, { struct ccw_device *cdev = to_ccwdev(dev); - return sprintf(buf, "%d\n", cmf_enabled(cdev)); + return sysfs_emit(buf, "%d\n", cmf_enabled(cdev)); } static ssize_t cmb_enable_store(struct device *dev, @@ -1227,7 +1225,7 @@ int cmf_readall(struct ccw_device *cdev, struct cmbdata *data) return cmbops->readall(cdev, data); } -/* Reenable cmf when a disconnected device becomes available again. */ +/* Re-enable cmf when a disconnected device becomes available again. */ int cmf_reenable(struct ccw_device *cdev) { cmbops->reset(cdev); diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c index 7b02a6349c4d..3285ce636c5c 100644 --- a/drivers/s390/cio/crw.c +++ b/drivers/s390/cio/crw.c @@ -12,8 +12,8 @@ #include <linux/kthread.h> #include <linux/init.h> #include <linux/wait.h> +#include <asm/ctlreg.h> #include <asm/crw.h> -#include <asm/ctl_reg.h> #include "ioasm.h" static DEFINE_MUTEX(crw_handler_mutex); @@ -77,9 +77,8 @@ repeat: if (unlikely(chain > 1)) { struct crw tmp_crw; - printk(KERN_WARNING"%s: Code does not support more " - "than two chained crws; please report to " - "linux390@de.ibm.com!\n", __func__); + printk(KERN_WARNING "%s: Code does not support more than two chained crws\n", + __func__); ccode = stcrw(&tmp_crw); printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", @@ -156,7 +155,7 @@ static int __init crw_machine_check_init(void) task = kthread_run(crw_collect_info, NULL, "kmcheck"); if (IS_ERR(task)) return PTR_ERR(task); - ctl_set_bit(14, 28); /* enable channel report MCH */ + system_ctl_set_bit(14, CR14_CHANNEL_REPORT_SUBMASK_BIT); return 0; } device_initcall(crw_machine_check_init); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 3ef636935a54..4c85df7a548e 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -8,8 +8,7 @@ * Cornelia Huck (cornelia.huck@de.ibm.com) */ -#define KMSG_COMPONENT "cio" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "cio: " fmt #include <linux/export.h> #include <linux/init.h> @@ -39,7 +38,7 @@ int max_ssid; #define MAX_CSS_IDX 0 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1]; -static struct bus_type css_bus_type; +static const struct bus_type css_bus_type; int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) @@ -148,16 +147,10 @@ out: static void css_sch_todo(struct work_struct *work); -static int css_sch_create_locks(struct subchannel *sch) +static void css_sch_create_locks(struct subchannel *sch) { - sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL); - if (!sch->lock) - return -ENOMEM; - - spin_lock_init(sch->lock); + spin_lock_init(&sch->lock); mutex_init(&sch->reg_mutex); - - return 0; } static void css_subchannel_release(struct device *dev) @@ -167,7 +160,6 @@ static void css_subchannel_release(struct device *dev) sch->config.intparm = 0; cio_commit_config(sch); kfree(sch->driver_override); - kfree(sch->lock); kfree(sch); } @@ -219,9 +211,7 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid, sch->schib = *schib; sch->st = schib->pmcw.st; - ret = css_sch_create_locks(sch); - if (ret) - goto err; + css_sch_create_locks(sch); INIT_WORK(&sch->todo_work, css_sch_todo); sch->dev.release = &css_subchannel_release; @@ -318,7 +308,7 @@ static ssize_t type_show(struct device *dev, struct device_attribute *attr, { struct subchannel *sch = to_subchannel(dev); - return sprintf(buf, "%01x\n", sch->st); + return sysfs_emit(buf, "%01x\n", sch->st); } static DEVICE_ATTR_RO(type); @@ -328,7 +318,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, { struct subchannel *sch = to_subchannel(dev); - return sprintf(buf, "css:t%01X\n", sch->st); + return sysfs_emit(buf, "css:t%01X\n", sch->st); } static DEVICE_ATTR_RO(modalias); @@ -354,7 +344,7 @@ static ssize_t driver_override_show(struct device *dev, ssize_t len; device_lock(dev); - len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override); + len = sysfs_emit(buf, "%s\n", sch->driver_override); device_unlock(dev); return len; } @@ -389,11 +379,11 @@ static ssize_t chpids_show(struct device *dev, for (chp = 0; chp < 8; chp++) { mask = 0x80 >> chp; if (ssd->path_mask & mask) - ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); + ret += sysfs_emit_at(buf, ret, "%02x ", ssd->chpid[chp].id); else - ret += sprintf(buf + ret, "00 "); + ret += sysfs_emit_at(buf, ret, "00 "); } - ret += sprintf(buf + ret, "\n"); + ret += sysfs_emit_at(buf, ret, "\n"); return ret; } static DEVICE_ATTR_RO(chpids); @@ -405,8 +395,8 @@ static ssize_t pimpampom_show(struct device *dev, struct subchannel *sch = to_subchannel(dev); struct pmcw *pmcw = &sch->schib.pmcw; - return sprintf(buf, "%02x %02x %02x\n", - pmcw->pim, pmcw->pam, pmcw->pom); + return sysfs_emit(buf, "%02x %02x %02x\n", + pmcw->pim, pmcw->pam, pmcw->pom); } static DEVICE_ATTR_RO(pimpampom); @@ -602,12 +592,12 @@ static void css_sch_todo(struct work_struct *work) sch = container_of(work, struct subchannel, todo_work); /* Find out todo. */ - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); todo = sch->todo; CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, sch->schid.sch_no, todo); sch->todo = SCH_TODO_NOTHING; - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); /* Perform todo. */ switch (todo) { case SCH_TODO_NOTHING: @@ -615,9 +605,9 @@ static void css_sch_todo(struct work_struct *work) case SCH_TODO_EVAL: ret = css_evaluate_known_subchannel(sch, 1); if (ret == -EAGAIN) { - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); css_sched_sch_todo(sch, todo); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); } break; case SCH_TODO_UNREG: @@ -890,7 +880,7 @@ static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a, if (!css->id_valid) return -EINVAL; - return sprintf(buf, "%x\n", css->cssid); + return sysfs_emit(buf, "%x\n", css->cssid); } static DEVICE_ATTR_RO(real_cssid); @@ -913,7 +903,7 @@ static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a, int ret; mutex_lock(&css->mutex); - ret = sprintf(buf, "%x\n", css->cm_enabled); + ret = sysfs_emit(buf, "%x\n", css->cm_enabled); mutex_unlock(&css->mutex); return ret; } @@ -1026,12 +1016,7 @@ static int __init setup_css(int nr) css->pseudo_subchannel->dev.parent = &css->device; css->pseudo_subchannel->dev.release = css_subchannel_release; mutex_init(&css->pseudo_subchannel->reg_mutex); - ret = css_sch_create_locks(css->pseudo_subchannel); - if (ret) { - kfree(css->pseudo_subchannel); - device_unregister(&css->device); - goto out_err; - } + css_sch_create_locks(css->pseudo_subchannel); dev_set_name(&css->pseudo_subchannel->dev, "defunct"); ret = device_register(&css->pseudo_subchannel->dev); @@ -1128,26 +1113,33 @@ static int cio_dma_pool_init(void) return 0; } -void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, - size_t size) +void *__cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, + size_t size, dma32_t *dma_handle) { dma_addr_t dma_addr; - unsigned long addr; size_t chunk_size; + void *addr; if (!gp_dma) return NULL; - addr = gen_pool_alloc(gp_dma, size); + addr = gen_pool_dma_alloc(gp_dma, size, &dma_addr); while (!addr) { chunk_size = round_up(size, PAGE_SIZE); - addr = (unsigned long) dma_alloc_coherent(dma_dev, - chunk_size, &dma_addr, CIO_DMA_GFP); + addr = dma_alloc_coherent(dma_dev, chunk_size, &dma_addr, CIO_DMA_GFP); if (!addr) return NULL; - gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1); - addr = gen_pool_alloc(gp_dma, size); + gen_pool_add_virt(gp_dma, (unsigned long)addr, dma_addr, chunk_size, -1); + addr = gen_pool_dma_alloc(gp_dma, size, dma_handle ? &dma_addr : NULL); } - return (void *) addr; + if (dma_handle) + *dma_handle = (__force dma32_t)dma_addr; + return addr; +} + +void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, + size_t size) +{ + return __cio_gp_dma_zalloc(gp_dma, dma_dev, size, NULL); } void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size) @@ -1339,7 +1331,6 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf, static const struct proc_ops cio_settle_proc_ops = { .proc_open = nonseekable_open, .proc_write = cio_settle_write, - .proc_lseek = no_llseek, }; static int __init cio_settle_init(void) @@ -1361,10 +1352,10 @@ int sch_is_pseudo_sch(struct subchannel *sch) return sch == to_css(sch->dev.parent)->pseudo_subchannel; } -static int css_bus_match(struct device *dev, struct device_driver *drv) +static int css_bus_match(struct device *dev, const struct device_driver *drv) { struct subchannel *sch = to_subchannel(dev); - struct css_driver *driver = to_cssdriver(drv); + const struct css_driver *driver = to_cssdriver(drv); struct css_device_id *id; /* When driver_override is set, only bind to the matching driver */ @@ -1423,7 +1414,7 @@ static int css_uevent(const struct device *dev, struct kobj_uevent_env *env) return ret; } -static struct bus_type css_bus_type = { +static const struct bus_type css_bus_type = { .name = "css", .match = css_bus_match, .probe = css_probe, diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index ea5550554297..a65a27dc520c 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -35,6 +35,15 @@ #define SNID_STATE3_SINGLE_PATH 0 /* + * Miscellaneous constants + */ + +#define CSS_NUM_CUB_PAGES 2 +#define CSS_CUES_PER_PAGE 128 +#define CSS_NUM_ECUB_PAGES 4 +#define CSS_ECUES_PER_PAGE 64 + +/* * Conditions used to specify which subchannels need evaluation */ enum css_eval_cond { @@ -94,7 +103,7 @@ struct css_driver { int (*settle)(void); }; -#define to_cssdriver(n) container_of(n, struct css_driver, drv) +#define to_cssdriver(n) container_of_const(n, struct css_driver, drv) extern int css_driver_register(struct css_driver *); extern void css_driver_unregister(struct css_driver *); @@ -122,8 +131,8 @@ struct channel_subsystem { struct mutex mutex; /* channel measurement related */ int cm_enabled; - void *cub_addr1; - void *cub_addr2; + void *cub[CSS_NUM_CUB_PAGES]; + void *ecub[CSS_NUM_ECUB_PAGES]; /* for orphaned ccw devices */ struct subchannel *pseudo_subchannel; }; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 4ca5adce9107..602f36102c7c 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -8,8 +8,7 @@ * Martin Schwidefsky (schwidefsky@de.ibm.com) */ -#define KMSG_COMPONENT "cio" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "cio: " fmt #include <linux/export.h> #include <linux/init.h> @@ -49,7 +48,7 @@ static const unsigned long recovery_delay[] = { 3, 30, 300 }; static atomic_t ccw_device_init_count = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); -static struct bus_type ccw_bus_type; +static const struct bus_type ccw_bus_type; /******************* bus type handling ***********************/ @@ -58,10 +57,10 @@ static struct bus_type ccw_bus_type; * subsystem driver and one channel system per machine, but * we still use the abstraction. T.R. says it's a good idea. */ static int -ccw_bus_match (struct device * dev, struct device_driver * drv) +ccw_bus_match (struct device * dev, const struct device_driver * drv) { struct ccw_device *cdev = to_ccwdev(dev); - struct ccw_driver *cdrv = to_ccwdrv(drv); + const struct ccw_driver *cdrv = to_ccwdrv(drv); const struct ccw_device_id *ids = cdrv->ids, *found; if (!ids) @@ -201,10 +200,9 @@ devtype_show (struct device *dev, struct device_attribute *attr, char *buf) struct ccw_device_id *id = &(cdev->id); if (id->dev_type != 0) - return sprintf(buf, "%04x/%02x\n", - id->dev_type, id->dev_model); + return sysfs_emit(buf, "%04x/%02x\n", id->dev_type, id->dev_model); else - return sprintf(buf, "n/a\n"); + return sysfs_emit(buf, "n/a\n"); } static ssize_t @@ -213,8 +211,7 @@ cutype_show (struct device *dev, struct device_attribute *attr, char *buf) struct ccw_device *cdev = to_ccwdev(dev); struct ccw_device_id *id = &(cdev->id); - return sprintf(buf, "%04x/%02x\n", - id->cu_type, id->cu_model); + return sysfs_emit(buf, "%04x/%02x\n", id->cu_type, id->cu_model); } static ssize_t @@ -234,7 +231,7 @@ online_show (struct device *dev, struct device_attribute *attr, char *buf) { struct ccw_device *cdev = to_ccwdev(dev); - return sprintf(buf, cdev->online ? "1\n" : "0\n"); + return sysfs_emit(buf, cdev->online ? "1\n" : "0\n"); } int ccw_device_is_orphan(struct ccw_device *cdev) @@ -363,10 +360,8 @@ int ccw_device_set_online(struct ccw_device *cdev) spin_lock_irq(cdev->ccwlock); ret = ccw_device_online(cdev); - spin_unlock_irq(cdev->ccwlock); - if (ret == 0) - wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); - else { + if (ret) { + spin_unlock_irq(cdev->ccwlock); CIO_MSG_EVENT(0, "ccw_device_online returned %d, " "device 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, @@ -375,7 +370,12 @@ int ccw_device_set_online(struct ccw_device *cdev) put_device(&cdev->dev); return ret; } - spin_lock_irq(cdev->ccwlock); + /* Wait until a final state is reached */ + while (!dev_fsm_final_state(cdev)) { + spin_unlock_irq(cdev->ccwlock); + wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); + spin_lock_irq(cdev->ccwlock); + } /* Check if online processing was successful */ if ((cdev->private->state != DEV_STATE_ONLINE) && (cdev->private->state != DEV_STATE_W4SENSE)) { @@ -543,21 +543,21 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf) struct subchannel *sch; if (ccw_device_is_orphan(cdev)) - return sprintf(buf, "no device\n"); + return sysfs_emit(buf, "no device\n"); switch (cdev->private->state) { case DEV_STATE_BOXED: - return sprintf(buf, "boxed\n"); + return sysfs_emit(buf, "boxed\n"); case DEV_STATE_DISCONNECTED: case DEV_STATE_DISCONNECTED_SENSE_ID: case DEV_STATE_NOT_OPER: sch = to_subchannel(dev->parent); if (!sch->lpm) - return sprintf(buf, "no path\n"); + return sysfs_emit(buf, "no path\n"); else - return sprintf(buf, "no device\n"); + return sysfs_emit(buf, "no device\n"); default: /* All other states considered fine. */ - return sprintf(buf, "good\n"); + return sysfs_emit(buf, "good\n"); } } @@ -584,7 +584,7 @@ static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, { struct subchannel *sch = to_subchannel(dev); - return sprintf(buf, "%02x\n", sch->vpm); + return sysfs_emit(buf, "%02x\n", sch->vpm); } static DEVICE_ATTR_RO(devtype); @@ -748,7 +748,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, mutex_init(&cdev->reg_mutex); atomic_set(&priv->onoff, 0); - cdev->ccwlock = sch->lock; + cdev->ccwlock = &sch->lock; cdev->dev.parent = &sch->dev; cdev->dev.release = ccw_device_release; cdev->dev.bus = &ccw_bus_type; @@ -764,9 +764,9 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, goto out_put; } priv->flags.initialized = 1; - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); sch_set_cdev(sch, cdev); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); return 0; out_put: @@ -851,9 +851,9 @@ static void io_subchannel_register(struct ccw_device *cdev) CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, ret); - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); sch_set_cdev(sch, NULL); - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); mutex_unlock(&cdev->reg_mutex); /* Release initial device reference. */ put_device(&cdev->dev); @@ -904,9 +904,9 @@ static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) atomic_inc(&ccw_device_init_count); /* Start async. device sensing. */ - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); ccw_device_recognition(cdev); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); } static int ccw_device_move_to_sch(struct ccw_device *cdev, @@ -921,12 +921,12 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev, return -ENODEV; if (!sch_is_pseudo_sch(old_sch)) { - spin_lock_irq(old_sch->lock); + spin_lock_irq(&old_sch->lock); old_enabled = old_sch->schib.pmcw.ena; rc = 0; if (old_enabled) rc = cio_disable_subchannel(old_sch); - spin_unlock_irq(old_sch->lock); + spin_unlock_irq(&old_sch->lock); if (rc == -EBUSY) { /* Release child reference for new parent. */ put_device(&sch->dev); @@ -944,9 +944,9 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev, sch->schib.pmcw.dev, rc); if (old_enabled) { /* Try to re-enable the old subchannel. */ - spin_lock_irq(old_sch->lock); + spin_lock_irq(&old_sch->lock); cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch)); - spin_unlock_irq(old_sch->lock); + spin_unlock_irq(&old_sch->lock); } /* Release child reference for new parent. */ put_device(&sch->dev); @@ -954,19 +954,19 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev, } /* Clean up old subchannel. */ if (!sch_is_pseudo_sch(old_sch)) { - spin_lock_irq(old_sch->lock); + spin_lock_irq(&old_sch->lock); sch_set_cdev(old_sch, NULL); - spin_unlock_irq(old_sch->lock); + spin_unlock_irq(&old_sch->lock); css_schedule_eval(old_sch->schid); } /* Release child reference for old parent. */ put_device(&old_sch->dev); /* Initialize new subchannel. */ - spin_lock_irq(sch->lock); - cdev->ccwlock = sch->lock; + spin_lock_irq(&sch->lock); + cdev->ccwlock = &sch->lock; if (!sch_is_pseudo_sch(sch)) sch_set_cdev(sch, cdev); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); if (!sch_is_pseudo_sch(sch)) css_update_ssd_info(sch); return 0; @@ -1077,9 +1077,9 @@ static int io_subchannel_probe(struct subchannel *sch) return 0; out_schedule: - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); css_sched_sch_todo(sch, SCH_TODO_UNREG); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); return 0; } @@ -1093,10 +1093,10 @@ static void io_subchannel_remove(struct subchannel *sch) goto out_free; ccw_device_unregister(cdev); - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); sch_set_cdev(sch, NULL); set_io_private(sch, NULL); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); out_free: dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), io_priv->dma_area, io_priv->dma_area_dma); @@ -1203,7 +1203,7 @@ static void io_subchannel_quiesce(struct subchannel *sch) struct ccw_device *cdev; int ret; - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); cdev = sch_get_cdev(sch); if (cio_is_console(sch->schid)) goto out_unlock; @@ -1220,15 +1220,15 @@ static void io_subchannel_quiesce(struct subchannel *sch) ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, HZ/10); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); wait_event(cdev->private->wait_q, cdev->private->state != DEV_STATE_QUIESCE); - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); } ret = cio_disable_subchannel(sch); } out_unlock: - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); } static void io_subchannel_shutdown(struct subchannel *sch) @@ -1315,23 +1315,34 @@ void ccw_device_schedule_recovery(void) spin_unlock_irqrestore(&recovery_lock, flags); } -static int purge_fn(struct device *dev, void *data) +static int purge_fn(struct subchannel *sch, void *data) { - struct ccw_device *cdev = to_ccwdev(dev); - struct ccw_dev_id *id = &cdev->private->dev_id; - struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_device *cdev; - spin_lock_irq(cdev->ccwlock); - if (is_blacklisted(id->ssid, id->devno) && - (cdev->private->state == DEV_STATE_OFFLINE) && - (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { - CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, - id->devno); + spin_lock_irq(&sch->lock); + if (sch->st != SUBCHANNEL_TYPE_IO || !sch->schib.pmcw.dnv) + goto unlock; + + if (!is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) + goto unlock; + + cdev = sch_get_cdev(sch); + if (cdev) { + if (cdev->private->state != DEV_STATE_OFFLINE) + goto unlock; + + if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) + goto unlock; ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); - css_sched_sch_todo(sch, SCH_TODO_UNREG); atomic_set(&cdev->private->onoff, 0); } - spin_unlock_irq(cdev->ccwlock); + + css_sched_sch_todo(sch, SCH_TODO_UNREG); + CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x%s\n", sch->schid.ssid, + sch->schib.pmcw.dev, cdev ? "" : " (no cdev)"); + +unlock: + spin_unlock_irq(&sch->lock); /* Abort loop in case of pending signal. */ if (signal_pending(current)) return -EINTR; @@ -1347,7 +1358,7 @@ static int purge_fn(struct device *dev, void *data) int ccw_purge_blacklisted(void) { CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); - bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn); + for_each_subchannel_staged(purge_fn, NULL, NULL); return 0; } @@ -1384,14 +1395,18 @@ enum io_sch_action { IO_SCH_VERIFY, IO_SCH_DISC, IO_SCH_NOP, + IO_SCH_ORPH_CDEV, }; static enum io_sch_action sch_get_action(struct subchannel *sch) { struct ccw_device *cdev; + int rc; cdev = sch_get_cdev(sch); - if (cio_update_schib(sch)) { + rc = cio_update_schib(sch); + + if (rc == -ENODEV) { /* Not operational. */ if (!cdev) return IO_SCH_UNREG; @@ -1399,6 +1414,16 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) return IO_SCH_UNREG; return IO_SCH_ORPH_UNREG; } + + /* Avoid unregistering subchannels without working device. */ + if (rc == -EACCES) { + if (!cdev) + return IO_SCH_NOP; + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) + return IO_SCH_UNREG_CDEV; + return IO_SCH_ORPH_CDEV; + } + /* Operational. */ if (!cdev) return IO_SCH_ATTACH; @@ -1439,7 +1464,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) enum io_sch_action action; int rc = -EAGAIN; - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); if (!device_is_registered(&sch->dev)) goto out_unlock; if (work_pending(&sch->todo_work)) @@ -1468,6 +1493,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) rc = 0; goto out_unlock; case IO_SCH_ORPH_UNREG: + case IO_SCH_ORPH_CDEV: case IO_SCH_ORPH_ATTACH: ccw_device_set_disconnected(cdev); break; @@ -1492,13 +1518,14 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) default: break; } - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); /* All other actions require process context. */ if (!process) goto out; /* Handle attached ccw device. */ switch (action) { case IO_SCH_ORPH_UNREG: + case IO_SCH_ORPH_CDEV: case IO_SCH_ORPH_ATTACH: /* Move ccw device to orphanage. */ rc = ccw_device_move_to_orph(cdev); @@ -1507,9 +1534,9 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) break; case IO_SCH_UNREG_CDEV: case IO_SCH_UNREG_ATTACH: - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); sch_set_cdev(sch, NULL); - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); /* Unregister ccw device. */ ccw_device_unregister(cdev); break; @@ -1538,9 +1565,9 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) put_device(&cdev->dev); goto out; } - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); ccw_device_trigger_reprobe(cdev); - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); /* Release reference from get_ccwdev_by_dev_id() */ put_device(&cdev->dev); break; @@ -1550,7 +1577,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) return 0; out_unlock: - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); out: return rc; } @@ -1776,7 +1803,7 @@ static void ccw_device_shutdown(struct device *dev) __disable_cmf(cdev); } -static struct bus_type ccw_bus_type = { +static const struct bus_type ccw_bus_type = { .name = "ccw", .match = ccw_bus_match, .uevent = ccw_uevent, @@ -1846,9 +1873,9 @@ static void ccw_device_todo(struct work_struct *work) css_schedule_eval(sch->schid); fallthrough; case CDEV_TODO_UNREG: - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); sch_set_cdev(sch, NULL); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); ccw_device_unregister(cdev); break; default: diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index c396ac3e3a32..ab419d40a8a7 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -7,6 +7,7 @@ * Martin Schwidefsky (schwidefsky@de.ibm.com) */ +#include <linux/export.h> #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> @@ -64,13 +65,13 @@ static void ccw_timeout_log(struct ccw_device *cdev) printk(KERN_WARNING "cio: orb indicates transport mode\n"); printk(KERN_WARNING "cio: last tcw:\n"); print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, - phys_to_virt(orb->tm.tcw), + dma32_to_virt(orb->tm.tcw), sizeof(struct tcw), 0); } else { printk(KERN_WARNING "cio: orb indicates command mode\n"); - if ((void *)(addr_t)orb->cmd.cpa == + if (dma32_to_virt(orb->cmd.cpa) == &private->dma_area->sense_ccw || - (void *)(addr_t)orb->cmd.cpa == + dma32_to_virt(orb->cmd.cpa) == cdev->private->dma_area->iccws) printk(KERN_WARNING "cio: last channel program " "(intern):\n"); @@ -78,7 +79,7 @@ static void ccw_timeout_log(struct ccw_device *cdev) printk(KERN_WARNING "cio: last channel program:\n"); print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, - phys_to_virt(orb->cmd.cpa), + dma32_to_virt(orb->cmd.cpa), sizeof(struct ccw1), 0); } printk(KERN_WARNING "cio: ccw device state: %d\n", @@ -98,7 +99,7 @@ static void ccw_timeout_log(struct ccw_device *cdev) void ccw_device_timeout(struct timer_list *t) { - struct ccw_device_private *priv = from_timer(priv, t, timer); + struct ccw_device_private *priv = timer_container_of(priv, t, timer); struct ccw_device *cdev = priv->cdev; spin_lock_irq(cdev->ccwlock); @@ -115,7 +116,7 @@ void ccw_device_set_timeout(struct ccw_device *cdev, int expires) { if (expires == 0) - del_timer(&cdev->private->timer); + timer_delete(&cdev->private->timer); else mod_timer(&cdev->private->timer, jiffies + expires); } @@ -504,6 +505,11 @@ callback: ccw_device_done(cdev, DEV_STATE_ONLINE); /* Deliver fake irb to device driver, if needed. */ if (cdev->private->flags.fake_irb) { + CIO_MSG_EVENT(2, "fakeirb: deliver device 0.%x.%04x intparm %lx type=%d\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno, + cdev->private->intparm, + cdev->private->flags.fake_irb); create_fake_irb(&cdev->private->dma_area->irb, cdev->private->flags.fake_irb); cdev->private->flags.fake_irb = 0; diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index ce99ee2457e6..d0f65d97dd4a 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -12,6 +12,7 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/errno.h> +#include <asm/machine.h> #include <asm/ccwdev.h> #include <asm/setup.h> #include <asm/cio.h> @@ -175,7 +176,7 @@ static void snsid_callback(struct ccw_device *cdev, void *data, int rc) struct senseid *senseid = &cdev->private->dma_area->senseid; int vm = 0; - if (rc && MACHINE_IS_VM) { + if (rc && machine_is_vm()) { /* Try diag 0x210 fallback on z/VM. */ snsid_init(cdev); if (diag210_get_dev_info(cdev) == 0) { @@ -210,7 +211,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev) snsid_init(cdev); /* Channel program setup. */ cp->cmd_code = CCW_CMD_SENSE_ID; - cp->cda = (u32)virt_to_phys(&cdev->private->dma_area->senseid); + cp->cda = virt_to_dma32(&cdev->private->dma_area->senseid); cp->count = sizeof(struct senseid); cp->flags = CCW_FLAG_SLI; /* Request setup. */ diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index c533d1dadc6b..61c07b4a0fe8 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -202,11 +202,16 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, return -EINVAL; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; - if (cdev->private->state == DEV_STATE_VERIFY) { + if (cdev->private->state == DEV_STATE_VERIFY || + cdev->private->flags.doverify) { /* Remember to fake irb when finished. */ if (!cdev->private->flags.fake_irb) { cdev->private->flags.fake_irb = FAKE_CMD_IRB; cdev->private->intparm = intparm; + CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno, intparm, + cdev->private->flags.fake_irb); return 0; } else /* There's already a fake I/O around. */ @@ -214,8 +219,7 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, } if (cdev->private->state != DEV_STATE_ONLINE || ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && - !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) || - cdev->private->flags.doverify) + !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS))) return -EBUSY; ret = cio_set_options (sch, flags); if (ret) @@ -441,7 +445,7 @@ struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct) return NULL; for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++) if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct) - return cdev->private->dma_area->senseid.ciw + ciw_cnt; + return &cdev->private->dma_area->senseid.ciw[ciw_cnt]; return NULL; } @@ -551,6 +555,10 @@ int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, if (!cdev->private->flags.fake_irb) { cdev->private->flags.fake_irb = FAKE_TM_IRB; cdev->private->intparm = intparm; + CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno, intparm, + cdev->private->flags.fake_irb); return 0; } else /* There's already a fake I/O around. */ @@ -823,13 +831,14 @@ EXPORT_SYMBOL_GPL(ccw_device_get_chid); * the subchannels dma pool. Maximal size of allocation supported * is PAGE_SIZE. */ -void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size) +void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size, + dma32_t *dma_handle) { void *addr; if (!get_device(&cdev->dev)) return NULL; - addr = cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size); + addr = __cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size, dma_handle); if (IS_ERR_OR_NULL(addr)) put_device(&cdev->dev); return addr; diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 3862961697eb..b3afe283cc10 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -141,7 +141,7 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn) pgid->inf.fc = fn; cp->cmd_code = CCW_CMD_SET_PGID; - cp->cda = (u32)virt_to_phys(pgid); + cp->cda = virt_to_dma32(pgid); cp->count = sizeof(*pgid); cp->flags = CCW_FLAG_SLI; req->cp = cp; @@ -442,7 +442,7 @@ static void snid_build_cp(struct ccw_device *cdev) /* Channel program setup. */ cp->cmd_code = CCW_CMD_SENSE_PGID; - cp->cda = (u32)virt_to_phys(&cdev->private->dma_area->pgid[i]); + cp->cda = virt_to_dma32(&cdev->private->dma_area->pgid[i]); cp->count = sizeof(struct pgid); cp->flags = CCW_FLAG_SLI; req->cp = cp; @@ -632,11 +632,11 @@ static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2) struct ccw1 *cp = cdev->private->dma_area->iccws; cp[0].cmd_code = CCW_CMD_STLCK; - cp[0].cda = (u32)virt_to_phys(buf1); + cp[0].cda = virt_to_dma32(buf1); cp[0].count = 32; cp[0].flags = CCW_FLAG_CC; cp[1].cmd_code = CCW_CMD_RELEASE; - cp[1].cda = (u32)virt_to_phys(buf2); + cp[1].cda = virt_to_dma32(buf2); cp[1].count = 32; cp[1].flags = 0; req->cp = cp; @@ -698,29 +698,29 @@ int ccw_device_stlck(struct ccw_device *cdev) return -ENOMEM; init_completion(&data.done); data.rc = -EIO; - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); rc = cio_enable_subchannel(sch, (u32)virt_to_phys(sch)); if (rc) goto out_unlock; /* Perform operation. */ cdev->private->state = DEV_STATE_STEAL_LOCK; ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); /* Wait for operation to finish. */ if (wait_for_completion_interruptible(&data.done)) { /* Got a signal. */ - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); ccw_request_cancel(cdev); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); wait_for_completion(&data.done); } rc = data.rc; /* Check results. */ - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); cio_disable_subchannel(sch); cdev->private->state = DEV_STATE_BOXED; out_unlock: - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); kfree(buffer); return rc; diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 6c2e35065fec..f4096373c8c0 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -42,7 +42,7 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) cdev->private->dev_id.devno, sch->schid.ssid, sch->schid.sch_no, scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); - sprintf(dbf_text, "chk%x", sch->schid.sch_no); + scnprintf(dbf_text, sizeof(dbf_text), "chk%x", sch->schid.sch_no); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, irb, sizeof(struct irb)); } @@ -332,7 +332,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) */ sense_ccw = &to_io_private(sch)->dma_area->sense_ccw; sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE; - sense_ccw->cda = virt_to_phys(cdev->private->dma_area->irb.ecw); + sense_ccw->cda = virt_to_dma32(cdev->private->dma_area->irb.ecw); sense_ccw->count = SENSE_MAX_COUNT; sense_ccw->flags = CCW_FLAG_SLI; diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index 826364d2facd..d60f7d80863a 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c @@ -11,6 +11,7 @@ #include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/device.h> +#include <linux/export.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/slab.h> @@ -63,7 +64,7 @@ static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob) int cc; orb_init(orb); - orb->eadm.aob = (u32)virt_to_phys(aob); + orb->eadm.aob = virt_to_dma32(aob); orb->eadm.intparm = (u32)virt_to_phys(sch); orb->eadm.key = PAGE_DEFAULT_KEY >> 4; @@ -98,15 +99,15 @@ static int eadm_subchannel_clear(struct subchannel *sch) static void eadm_subchannel_timeout(struct timer_list *t) { - struct eadm_private *private = from_timer(private, t, timer); + struct eadm_private *private = timer_container_of(private, t, timer); struct subchannel *sch = private->sch; - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); EADM_LOG(1, "timeout"); EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid)); if (eadm_subchannel_clear(sch)) EADM_LOG(0, "clear failed"); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); } static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires) @@ -114,7 +115,7 @@ static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires) struct eadm_private *private = get_eadm_private(sch); if (expires == 0) - del_timer(&private->timer); + timer_delete(&private->timer); else mod_timer(&private->timer, jiffies + expires); } @@ -147,7 +148,7 @@ static void eadm_subchannel_irq(struct subchannel *sch) css_sched_sch_todo(sch, SCH_TODO_EVAL); return; } - scm_irq_handler(phys_to_virt(scsw->aob), error); + scm_irq_handler(dma32_to_virt(scsw->aob), error); private->state = EADM_IDLE; if (private->completion) @@ -163,16 +164,16 @@ static struct subchannel *eadm_get_idle_sch(void) spin_lock_irqsave(&list_lock, flags); list_for_each_entry(private, &eadm_list, head) { sch = private->sch; - spin_lock(sch->lock); + spin_lock(&sch->lock); if (private->state == EADM_IDLE) { private->state = EADM_BUSY; list_move_tail(&private->head, &eadm_list); - spin_unlock(sch->lock); + spin_unlock(&sch->lock); spin_unlock_irqrestore(&list_lock, flags); return sch; } - spin_unlock(sch->lock); + spin_unlock(&sch->lock); } spin_unlock_irqrestore(&list_lock, flags); @@ -190,7 +191,7 @@ int eadm_start_aob(struct aob *aob) if (!sch) return -EBUSY; - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); eadm_subchannel_set_timeout(sch, EADM_TIMEOUT); ret = eadm_subchannel_start(sch, aob); if (!ret) @@ -203,7 +204,7 @@ int eadm_start_aob(struct aob *aob) css_sched_sch_todo(sch, SCH_TODO_EVAL); out_unlock: - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); return ret; } @@ -221,7 +222,7 @@ static int eadm_subchannel_probe(struct subchannel *sch) INIT_LIST_HEAD(&private->head); timer_setup(&private->timer, eadm_subchannel_timeout, 0); - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); set_eadm_private(sch, private); private->state = EADM_IDLE; private->sch = sch; @@ -229,11 +230,11 @@ static int eadm_subchannel_probe(struct subchannel *sch) ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch)); if (ret) { set_eadm_private(sch, NULL); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); kfree(private); goto out; } - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); spin_lock_irq(&list_lock); list_add(&private->head, &eadm_list); @@ -248,7 +249,7 @@ static void eadm_quiesce(struct subchannel *sch) DECLARE_COMPLETION_ONSTACK(completion); int ret; - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); if (private->state != EADM_BUSY) goto disable; @@ -256,11 +257,11 @@ static void eadm_quiesce(struct subchannel *sch) goto disable; private->completion = &completion; - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); wait_for_completion_io(&completion); - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); private->completion = NULL; disable: @@ -269,7 +270,7 @@ disable: ret = cio_disable_subchannel(sch); } while (ret == -EBUSY); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); } static void eadm_subchannel_remove(struct subchannel *sch) @@ -282,9 +283,9 @@ static void eadm_subchannel_remove(struct subchannel *sch) eadm_quiesce(sch); - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); set_eadm_private(sch, NULL); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); kfree(private); } @@ -309,7 +310,7 @@ static int eadm_subchannel_sch_event(struct subchannel *sch, int process) struct eadm_private *private; unsigned long flags; - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); if (!device_is_registered(&sch->dev)) goto out_unlock; @@ -325,7 +326,7 @@ static int eadm_subchannel_sch_event(struct subchannel *sch, int process) private->state = EADM_IDLE; out_unlock: - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); return 0; } diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c index 84f24a2f46e4..533465ae6038 100644 --- a/drivers/s390/cio/fcx.c +++ b/drivers/s390/cio/fcx.c @@ -6,6 +6,7 @@ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ +#include <linux/export.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> @@ -25,7 +26,7 @@ */ struct tcw *tcw_get_intrg(struct tcw *tcw) { - return phys_to_virt(tcw->intrg); + return dma32_to_virt(tcw->intrg); } EXPORT_SYMBOL(tcw_get_intrg); @@ -40,9 +41,9 @@ EXPORT_SYMBOL(tcw_get_intrg); void *tcw_get_data(struct tcw *tcw) { if (tcw->r) - return phys_to_virt(tcw->input); + return dma64_to_virt(tcw->input); if (tcw->w) - return phys_to_virt(tcw->output); + return dma64_to_virt(tcw->output); return NULL; } EXPORT_SYMBOL(tcw_get_data); @@ -55,7 +56,7 @@ EXPORT_SYMBOL(tcw_get_data); */ struct tccb *tcw_get_tccb(struct tcw *tcw) { - return phys_to_virt(tcw->tccb); + return dma64_to_virt(tcw->tccb); } EXPORT_SYMBOL(tcw_get_tccb); @@ -67,7 +68,7 @@ EXPORT_SYMBOL(tcw_get_tccb); */ struct tsb *tcw_get_tsb(struct tcw *tcw) { - return phys_to_virt(tcw->tsb); + return dma64_to_virt(tcw->tsb); } EXPORT_SYMBOL(tcw_get_tsb); @@ -190,7 +191,7 @@ EXPORT_SYMBOL(tcw_finalize); */ void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw) { - tcw->intrg = (u32)virt_to_phys(intrg_tcw); + tcw->intrg = virt_to_dma32(intrg_tcw); } EXPORT_SYMBOL(tcw_set_intrg); @@ -208,11 +209,11 @@ EXPORT_SYMBOL(tcw_set_intrg); void tcw_set_data(struct tcw *tcw, void *data, int use_tidal) { if (tcw->r) { - tcw->input = virt_to_phys(data); + tcw->input = virt_to_dma64(data); if (use_tidal) tcw->flags |= TCW_FLAGS_INPUT_TIDA; } else if (tcw->w) { - tcw->output = virt_to_phys(data); + tcw->output = virt_to_dma64(data); if (use_tidal) tcw->flags |= TCW_FLAGS_OUTPUT_TIDA; } @@ -228,7 +229,7 @@ EXPORT_SYMBOL(tcw_set_data); */ void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb) { - tcw->tccb = virt_to_phys(tccb); + tcw->tccb = virt_to_dma64(tccb); } EXPORT_SYMBOL(tcw_set_tccb); @@ -241,7 +242,7 @@ EXPORT_SYMBOL(tcw_set_tccb); */ void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb) { - tcw->tsb = virt_to_phys(tsb); + tcw->tsb = virt_to_dma64(tsb); } EXPORT_SYMBOL(tcw_set_tsb); @@ -346,7 +347,7 @@ struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags, memset(tidaw, 0, sizeof(struct tidaw)); tidaw->flags = flags; tidaw->count = count; - tidaw->addr = virt_to_phys(addr); + tidaw->addr = virt_to_dma64(addr); return tidaw; } EXPORT_SYMBOL(tcw_add_tidaw); diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c index 45f9c0736be4..e5f28370a903 100644 --- a/drivers/s390/cio/idset.c +++ b/drivers/s390/cio/idset.c @@ -16,20 +16,21 @@ struct idset { unsigned long bitmap[]; }; -static inline unsigned long bitmap_size(int num_ssid, int num_id) +static inline unsigned long idset_bitmap_size(int num_ssid, int num_id) { - return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long); + return bitmap_size(size_mul(num_ssid, num_id)); } static struct idset *idset_new(int num_ssid, int num_id) { struct idset *set; - set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id)); + set = vmalloc(sizeof(struct idset) + + idset_bitmap_size(num_ssid, num_id)); if (set) { set->num_ssid = num_ssid; set->num_id = num_id; - memset(set->bitmap, 0, bitmap_size(num_ssid, num_id)); + memset(set->bitmap, 0, idset_bitmap_size(num_ssid, num_id)); } return set; } @@ -41,7 +42,8 @@ void idset_free(struct idset *set) void idset_fill(struct idset *set) { - memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id)); + memset(set->bitmap, 0xff, + idset_bitmap_size(set->num_ssid, set->num_id)); } static inline void idset_add(struct idset *set, int ssid, int id) diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c index acf1edd36549..8b06b234e110 100644 --- a/drivers/s390/cio/ioasm.c +++ b/drivers/s390/cio/ioasm.c @@ -8,6 +8,7 @@ #include <asm/asm-extable.h> #include <asm/chpid.h> #include <asm/schid.h> +#include <asm/asm.h> #include <asm/crw.h> #include "ioasm.h" @@ -18,19 +19,20 @@ static inline int __stsch(struct subchannel_id schid, struct schib *addr) { unsigned long r1 = *(unsigned int *)&schid; - int ccode = -EIO; + int ccode, exception; - asm volatile( + exception = 1; + asm_inline volatile( " lgr 1,%[r1]\n" " stsch %[addr]\n" - "0: ipm %[cc]\n" - " srl %[cc],28\n" + "0: lhi %[exc],0\n" "1:\n" + CC_IPM(cc) EX_TABLE(0b, 1b) - : [cc] "+&d" (ccode), [addr] "=Q" (*addr) + : CC_OUT(cc, ccode), [addr] "=Q" (*addr), [exc] "+d" (exception) : [r1] "d" (r1) - : "cc", "1"); - return ccode; + : CC_CLOBBER_LIST("1")); + return exception ? -EIO : CC_TRANSFORM(ccode); } int stsch(struct subchannel_id schid, struct schib *addr) @@ -47,19 +49,20 @@ EXPORT_SYMBOL(stsch); static inline int __msch(struct subchannel_id schid, struct schib *addr) { unsigned long r1 = *(unsigned int *)&schid; - int ccode = -EIO; + int ccode, exception; - asm volatile( + exception = 1; + asm_inline volatile( " lgr 1,%[r1]\n" " msch %[addr]\n" - "0: ipm %[cc]\n" - " srl %[cc],28\n" + "0: lhi %[exc],0\n" "1:\n" + CC_IPM(cc) EX_TABLE(0b, 1b) - : [cc] "+&d" (ccode) + : CC_OUT(cc, ccode), [exc] "+d" (exception) : [r1] "d" (r1), [addr] "Q" (*addr) - : "cc", "1"); - return ccode; + : CC_CLOBBER_LIST("1")); + return exception ? -EIO : CC_TRANSFORM(ccode); } int msch(struct subchannel_id schid, struct schib *addr) @@ -80,12 +83,11 @@ static inline int __tsch(struct subchannel_id schid, struct irb *addr) asm volatile( " lgr 1,%[r1]\n" " tsch %[addr]\n" - " ipm %[cc]\n" - " srl %[cc],28" - : [cc] "=&d" (ccode), [addr] "=Q" (*addr) + CC_IPM(cc) + : CC_OUT(cc, ccode), [addr] "=Q" (*addr) : [r1] "d" (r1) - : "cc", "1"); - return ccode; + : CC_CLOBBER_LIST("1")); + return CC_TRANSFORM(ccode); } int tsch(struct subchannel_id schid, struct irb *addr) @@ -101,19 +103,20 @@ int tsch(struct subchannel_id schid, struct irb *addr) static inline int __ssch(struct subchannel_id schid, union orb *addr) { unsigned long r1 = *(unsigned int *)&schid; - int ccode = -EIO; + int ccode, exception; - asm volatile( + exception = 1; + asm_inline volatile( " lgr 1,%[r1]\n" " ssch %[addr]\n" - "0: ipm %[cc]\n" - " srl %[cc],28\n" + "0: lhi %[exc],0\n" "1:\n" + CC_IPM(cc) EX_TABLE(0b, 1b) - : [cc] "+&d" (ccode) + : CC_OUT(cc, ccode), [exc] "+d" (exception) : [r1] "d" (r1), [addr] "Q" (*addr) - : "cc", "memory", "1"); - return ccode; + : CC_CLOBBER_LIST("memory", "1")); + return CC_TRANSFORM(ccode); } int ssch(struct subchannel_id schid, union orb *addr) @@ -135,12 +138,11 @@ static inline int __csch(struct subchannel_id schid) asm volatile( " lgr 1,%[r1]\n" " csch\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=&d" (ccode) + CC_IPM(cc) + : CC_OUT(cc, ccode) : [r1] "d" (r1) - : "cc", "1"); - return ccode; + : CC_CLOBBER_LIST("1")); + return CC_TRANSFORM(ccode); } int csch(struct subchannel_id schid) @@ -160,11 +162,11 @@ int tpi(struct tpi_info *addr) asm volatile( " tpi %[addr]\n" - " ipm %[cc]\n" - " srl %[cc],28" - : [cc] "=&d" (ccode), [addr] "=Q" (*addr) + CC_IPM(cc) + : CC_OUT(cc, ccode), [addr] "=Q" (*addr) : - : "cc"); + : CC_CLOBBER); + ccode = CC_TRANSFORM(ccode); trace_s390_cio_tpi(addr, ccode); return ccode; @@ -173,17 +175,19 @@ int tpi(struct tpi_info *addr) int chsc(void *chsc_area) { typedef struct { char _[4096]; } addr_type; - int cc = -EIO; + int cc, exception; - asm volatile( + exception = 1; + asm_inline volatile( " .insn rre,0xb25f0000,%[chsc_area],0\n" - "0: ipm %[cc]\n" - " srl %[cc],28\n" + "0: lhi %[exc],0\n" "1:\n" + CC_IPM(cc) EX_TABLE(0b, 1b) - : [cc] "+&d" (cc), "+m" (*(addr_type *)chsc_area) + : CC_OUT(cc, cc), "+m" (*(addr_type *)chsc_area), [exc] "+d" (exception) : [chsc_area] "d" (chsc_area) - : "cc"); + : CC_CLOBBER); + cc = exception ? -EIO : CC_TRANSFORM(cc); trace_s390_cio_chsc(chsc_area, cc); return cc; @@ -198,12 +202,11 @@ static inline int __rsch(struct subchannel_id schid) asm volatile( " lgr 1,%[r1]\n" " rsch\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=&d" (ccode) + CC_IPM(cc) + : CC_OUT(cc, ccode) : [r1] "d" (r1) - : "cc", "memory", "1"); - return ccode; + : CC_CLOBBER_LIST("memory", "1")); + return CC_TRANSFORM(ccode); } int rsch(struct subchannel_id schid) @@ -224,12 +227,11 @@ static inline int __hsch(struct subchannel_id schid) asm volatile( " lgr 1,%[r1]\n" " hsch\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=&d" (ccode) + CC_IPM(cc) + : CC_OUT(cc, ccode) : [r1] "d" (r1) - : "cc", "1"); - return ccode; + : CC_CLOBBER_LIST("1")); + return CC_TRANSFORM(ccode); } int hsch(struct subchannel_id schid) @@ -251,12 +253,11 @@ static inline int __xsch(struct subchannel_id schid) asm volatile( " lgr 1,%[r1]\n" " xsch\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=&d" (ccode) + CC_IPM(cc) + : CC_OUT(cc, ccode) : [r1] "d" (r1) - : "cc", "1"); - return ccode; + : CC_CLOBBER_LIST("1")); + return CC_TRANSFORM(ccode); } int xsch(struct subchannel_id schid) @@ -275,12 +276,11 @@ static inline int __stcrw(struct crw *crw) asm volatile( " stcrw %[crw]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=&d" (ccode), [crw] "=Q" (*crw) + CC_IPM(cc) + : CC_OUT(cc, ccode), [crw] "=Q" (*crw) : - : "cc"); - return ccode; + : CC_CLOBBER); + return CC_TRANSFORM(ccode); } static inline int _stcrw(struct crw *crw) diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c index 77fde9f5ea8b..7dda7f1fc6e4 100644 --- a/drivers/s390/cio/isc.c +++ b/drivers/s390/cio/isc.c @@ -7,6 +7,7 @@ */ #include <linux/spinlock.h> +#include <linux/export.h> #include <linux/module.h> #include <asm/isc.h> @@ -33,7 +34,7 @@ void isc_register(unsigned int isc) spin_lock(&isc_ref_lock); if (isc_refs[isc] == 0) - ctl_set_bit(6, 31 - isc); + system_ctl_set_bit(6, 31 - isc); isc_refs[isc]++; spin_unlock(&isc_ref_lock); } @@ -61,7 +62,7 @@ void isc_unregister(unsigned int isc) goto out_unlock; } if (isc_refs[isc] == 1) - ctl_clear_bit(6, 31 - isc); + system_ctl_clear_bit(6, 31 - isc); isc_refs[isc]--; out_unlock: spin_unlock(&isc_ref_lock); diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c index dbd3099c520e..b6408a475983 100644 --- a/drivers/s390/cio/itcw.c +++ b/drivers/s390/cio/itcw.c @@ -6,6 +6,7 @@ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ +#include <linux/export.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h index a2d3778b2c95..14d2a1822b50 100644 --- a/drivers/s390/cio/orb.h +++ b/drivers/s390/cio/orb.h @@ -12,6 +12,9 @@ #ifndef S390_ORB_H #define S390_ORB_H +#include <linux/types.h> +#include <asm/dma-types.h> + /* * Command-mode operation request block */ @@ -34,7 +37,7 @@ struct cmd_orb { u32 ils:1; /* incorrect length */ u32 zero:6; /* reserved zeros */ u32 orbx:1; /* ORB extension control */ - u32 cpa; /* channel program address */ + dma32_t cpa; /* channel program address */ } __packed __aligned(4); /* @@ -49,7 +52,7 @@ struct tm_orb { u32 lpm:8; u32:7; u32 x:1; - u32 tcw; + dma32_t tcw; u32 prio:8; u32:8; u32 rsvpgm:8; @@ -71,7 +74,7 @@ struct eadm_orb { u32 compat2:1; u32:21; u32 x:1; - u32 aob; + dma32_t aob; u32 css_prio:8; u32:8; u32 scm_prio:8; diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 641f0dbb65a9..4bd4c00c9c0c 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -210,11 +210,10 @@ struct qdio_q { qdio_handler_t (*handler); struct qdio_irq *irq_ptr; + + /* memory page (PAGE_SIZE) used to place slib and sl on */ + void *sl_page; struct sl *sl; - /* - * A page is allocated under this pointer and used for slib and sl. - * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2. - */ struct slib *slib; } __attribute__ ((aligned(256))); @@ -266,7 +265,7 @@ struct qdio_irq { #define is_thinint_irq(irq) \ (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ - css_general_characteristics.aif_osa) + css_general_characteristics.aif_qdio) #define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr)) diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index 1a9714af51e4..4fb900f2d3d9 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -7,7 +7,6 @@ #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/uaccess.h> -#include <linux/export.h> #include <linux/slab.h> #include <asm/debug.h> #include "qdio_debug.h" diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 9cde55730b65..7dd967165025 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -7,6 +7,8 @@ * Jan Glauber <jang@linux.vnet.ibm.com> * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> */ + +#include <linux/export.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> @@ -17,6 +19,7 @@ #include <linux/atomic.h> #include <asm/debug.h> #include <asm/qdio.h> +#include <asm/asm.h> #include <asm/ipl.h> #include "cio.h" @@ -42,13 +45,12 @@ static inline int do_siga_sync(unsigned long schid, " lgr 2,%[out]\n" " lgr 3,%[in]\n" " siga 0\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=&d" (cc) + CC_IPM(cc) + : CC_OUT(cc, cc) : [fc] "d" (fc), [schid] "d" (schid), [out] "d" (out_mask), [in] "d" (in_mask) - : "cc", "0", "1", "2", "3"); - return cc; + : CC_CLOBBER_LIST("0", "1", "2", "3")); + return CC_TRANSFORM(cc); } static inline int do_siga_input(unsigned long schid, unsigned long mask, @@ -61,12 +63,11 @@ static inline int do_siga_input(unsigned long schid, unsigned long mask, " lgr 1,%[schid]\n" " lgr 2,%[mask]\n" " siga 0\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=&d" (cc) + CC_IPM(cc) + : CC_OUT(cc, cc) : [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask) - : "cc", "0", "1", "2"); - return cc; + : CC_CLOBBER_LIST("0", "1", "2")); + return CC_TRANSFORM(cc); } /** @@ -82,7 +83,7 @@ static inline int do_siga_input(unsigned long schid, unsigned long mask, */ static inline int do_siga_output(unsigned long schid, unsigned long mask, unsigned int *bb, unsigned long fc, - unsigned long aob) + dma64_t aob) { int cc; @@ -93,13 +94,12 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask, " lgr 3,%[aob]\n" " siga 0\n" " lgr %[fc],0\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=&d" (cc), [fc] "+&d" (fc) + CC_IPM(cc) + : CC_OUT(cc, cc), [fc] "+&d" (fc) : [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob) - : "cc", "0", "1", "2", "3"); + : CC_CLOBBER_LIST("0", "1", "2", "3")); *bb = fc >> 31; - return cc; + return CC_TRANSFORM(cc); } /** @@ -321,7 +321,7 @@ static inline int qdio_siga_sync_q(struct qdio_q *q) } static int qdio_siga_output(struct qdio_q *q, unsigned int count, - unsigned int *busy_bit, unsigned long aob) + unsigned int *busy_bit, dma64_t aob) { unsigned long schid = *((u32 *) &q->irq_ptr->schid); unsigned int fc = QDIO_SIGA_WRITE; @@ -628,7 +628,7 @@ int qdio_inspect_output_queue(struct ccw_device *cdev, unsigned int nr, EXPORT_SYMBOL_GPL(qdio_inspect_output_queue); static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count, - unsigned long aob) + dma64_t aob) { int retries = 0, cc; unsigned int busy_bit; @@ -695,7 +695,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) return; qdio_deliver_irq(irq_ptr); - irq_ptr->last_data_irq_time = S390_lowcore.int_clock; + irq_ptr->last_data_irq_time = get_lowcore()->int_clock; } static void qdio_handle_activate_check(struct qdio_irq *irq_ptr, @@ -722,8 +722,8 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr, lgr_info_log(); } -static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat, - int dstat) +static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat, + int dstat, int dcc) { DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); @@ -731,15 +731,18 @@ static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat, goto error; if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END)) goto error; + if (dcc == 1) + return -EAGAIN; if (!(dstat & DEV_STAT_DEV_END)) goto error; qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); - return; + return 0; error: DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + return -EIO; } /* qdio interrupt handler */ @@ -748,7 +751,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, { struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct subchannel_id schid; - int cstat, dstat; + int cstat, dstat, rc, dcc; if (!intparm || !irq_ptr) { ccw_device_get_schid(cdev, &schid); @@ -768,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, qdio_irq_check_sense(irq_ptr, irb); cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; + dcc = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0; + rc = 0; switch (irq_ptr->state) { case QDIO_IRQ_STATE_INACTIVE: - qdio_establish_handle_irq(irq_ptr, cstat, dstat); + rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc); break; case QDIO_IRQ_STATE_CLEANUP: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); @@ -785,12 +790,25 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, if (cstat || dstat) qdio_handle_activate_check(irq_ptr, intparm, cstat, dstat); + else if (dcc == 1) + rc = -EAGAIN; break; case QDIO_IRQ_STATE_STOPPED: break; default: WARN_ON_ONCE(1); } + + if (rc == -EAGAIN) { + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry"); + rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0); + if (!rc) + return; + DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no); + DBF_ERROR("rc:%4x", rc); + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + } + wake_up(&cdev->private->wait_q); } @@ -1070,7 +1088,7 @@ int qdio_establish(struct ccw_device *cdev, irq_ptr->ccw->cmd_code = ciw->cmd; irq_ptr->ccw->flags = CCW_FLAG_SLI; irq_ptr->ccw->count = ciw->count; - irq_ptr->ccw->cda = (u32) virt_to_phys(irq_ptr->qdr); + irq_ptr->ccw->cda = virt_to_dma32(irq_ptr->qdr); spin_lock_irq(get_ccwdev_lock(cdev)); ccw_device_set_options_mask(cdev, 0); @@ -1263,9 +1281,9 @@ static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int co qperf_inc(q, outbound_queue_full); if (queue_type(q) == QDIO_IQDIO_QFMT) { - unsigned long phys_aob = aob ? virt_to_phys(aob) : 0; + dma64_t phys_aob = aob ? virt_to_dma64(aob) : 0; - WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256)); + WARN_ON_ONCE(!IS_ALIGNED(dma64_to_u64(phys_aob), 256)); rc = qdio_kick_outbound_q(q, count, phys_aob); } else if (qdio_need_siga_sync(q->irq_ptr)) { rc = qdio_sync_output_queue(q); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 714878e2acc4..ea09aadaae4e 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -83,7 +83,7 @@ static void __qdio_free_queues(struct qdio_q **queues, unsigned int count) for (i = 0; i < count; i++) { q = queues[i]; - free_page((unsigned long) q->slib); + free_page((unsigned long)q->sl_page); kmem_cache_free(qdio_q_cache, q); } } @@ -109,12 +109,16 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) return -ENOMEM; } - q->slib = (struct slib *) __get_free_page(GFP_KERNEL); - if (!q->slib) { + q->sl_page = (void *)__get_free_page(GFP_KERNEL); + if (!q->sl_page) { kmem_cache_free(qdio_q_cache, q); __qdio_free_queues(irq_ptr_qs, i); return -ENOMEM; } + q->slib = q->sl_page; + /* As per architecture: SLIB is 2K bytes long, and SL 1K. */ + q->sl = (struct sl *)(q->slib + 1); + irq_ptr_qs[i] = q; } return 0; @@ -142,11 +146,15 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, qdio_handler_t *handler, int i) { - struct slib *slib = q->slib; + struct slib *const slib = q->slib; + void *const sl_page = q->sl_page; + struct sl *const sl = q->sl; /* queue must be cleared for qdio_establish */ memset(q, 0, sizeof(*q)); - memset(slib, 0, PAGE_SIZE); + memset(sl_page, 0, PAGE_SIZE); + q->sl_page = sl_page; + q->sl = sl; q->slib = slib; q->irq_ptr = irq_ptr; q->mask = 1 << (31 - i); @@ -161,7 +169,6 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, int j; DBF_HEX(&q, sizeof(void *)); - q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); /* fill in sbal */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) @@ -179,7 +186,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, /* fill in sl */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) - q->sl->element[j].sbal = virt_to_phys(q->sbal[j]); + q->sl->element[j].sbal = virt_to_dma64(q->sbal[j]); } static void setup_queues(struct qdio_irq *irq_ptr, @@ -291,9 +298,9 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue) { - desc->sliba = virt_to_phys(queue->slib); - desc->sla = virt_to_phys(queue->sl); - desc->slsba = virt_to_phys(&queue->slsb); + desc->sliba = virt_to_dma64(queue->slib); + desc->sla = virt_to_dma64(queue->sl); + desc->slsba = virt_to_dma64(&queue->slsb); desc->akey = PAGE_DEFAULT_KEY >> 4; desc->bkey = PAGE_DEFAULT_KEY >> 4; @@ -315,7 +322,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr, irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; - irq_ptr->qdr->qiba = virt_to_phys(&irq_ptr->qib); + irq_ptr->qdr->qiba = virt_to_dma64(&irq_ptr->qib); irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4; for (i = 0; i < qdio_init->no_input_qs; i++) @@ -423,7 +430,7 @@ int __init qdio_setup_init(void) /* Check for OSA/FCP thin interrupts (bit 67). */ DBF_EVENT("thinint:%1d", - (css_general_characteristics.aif_osa) ? 1 : 0); + (css_general_characteristics.aif_qdio) ? 1 : 0); /* Check for QEBSM support in general (bit 58). */ DBF_EVENT("cssQEBSM:%1d", css_general_characteristics.qebsm); diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 9b9335dd06db..f931954910c5 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -99,7 +99,7 @@ static inline u32 clear_shared_ind(void) static void tiqdio_thinint_handler(struct airq_struct *airq, struct tpi_info *tpi_info) { - u64 irq_time = S390_lowcore.int_clock; + u64 irq_time = get_lowcore()->int_clock; u32 si_used = clear_shared_ind(); struct qdio_irq *irq; @@ -137,15 +137,15 @@ static struct airq_struct tiqdio_airq = { static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) { struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page; - u64 summary_indicator_addr, subchannel_indicator_addr; + dma64_t summary_indicator_addr, subchannel_indicator_addr; int rc; if (reset) { summary_indicator_addr = 0; subchannel_indicator_addr = 0; } else { - summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr); - subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci); + summary_indicator_addr = virt_to_dma64(tiqdio_airq.lsi_ptr); + subchannel_indicator_addr = virt_to_dma64(irq_ptr->dsci); } rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr, diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c index 6b21ba68c1fe..9b4da237a0ed 100644 --- a/drivers/s390/cio/scm.c +++ b/drivers/s390/cio/scm.c @@ -7,6 +7,7 @@ */ #include <linux/device.h> +#include <linux/export.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> @@ -42,7 +43,7 @@ static int scmdev_uevent(const struct device *dev, struct kobj_uevent_env *env) return add_uevent_var(env, "MODALIAS=scm:scmdev"); } -static struct bus_type scm_bus_type = { +static const struct bus_type scm_bus_type = { .name = "scm", .probe = scmdev_probe, .remove = scmdev_remove, @@ -91,7 +92,7 @@ static ssize_t show_##name(struct device *dev, \ int ret; \ \ device_lock(dev); \ - ret = sprintf(buf, "%u\n", scmdev->attrs.name); \ + ret = sysfs_emit(buf, "%u\n", scmdev->attrs.name); \ device_unlock(dev); \ \ return ret; \ @@ -228,7 +229,7 @@ int scm_update_information(void) size_t num; int ret; - scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); + scm_info = (void *)__get_free_page(GFP_KERNEL); if (!scm_info) return -ENOMEM; diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h index 86993de25345..6bca5315ee2a 100644 --- a/drivers/s390/cio/trace.h +++ b/drivers/s390/cio/trace.h @@ -50,7 +50,7 @@ DECLARE_EVENT_CLASS(s390_class_schib, __entry->devno = schib->pmcw.dev; __entry->schib = *schib; __entry->pmcw_ena = schib->pmcw.ena; - __entry->pmcw_st = schib->pmcw.ena; + __entry->pmcw_st = schib->pmcw.st; __entry->pmcw_dnv = schib->pmcw.dnv; __entry->pmcw_dev = schib->pmcw.dev; __entry->pmcw_lpm = schib->pmcw.lpm; @@ -169,7 +169,7 @@ TRACE_EVENT(s390_cio_tpi, else if (addr) __entry->tpi_info = *addr; else - __entry->tpi_info = S390_lowcore.tpi_info; + __entry->tpi_info = get_lowcore()->tpi_info; __entry->cssid = __entry->tpi_info.schid.cssid; __entry->ssid = __entry->tpi_info.schid.ssid; __entry->schno = __entry->tpi_info.schid.sch_no; diff --git a/drivers/s390/cio/vfio_ccw_chp.c b/drivers/s390/cio/vfio_ccw_chp.c index d3f3a611f95b..38c176cf6295 100644 --- a/drivers/s390/cio/vfio_ccw_chp.c +++ b/drivers/s390/cio/vfio_ccw_chp.c @@ -115,7 +115,7 @@ static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private, /* Notify the guest if more CRWs are on our queue */ if (!list_empty(&private->crw) && private->crw_trigger) - eventfd_signal(private->crw_trigger, 1); + eventfd_signal(private->crw_trigger); return ret; } diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index aafd66305ead..5f6e10225627 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -190,7 +190,7 @@ static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length) } /* Create the list of IDAL words for a page_array. */ static inline void page_array_idal_create_words(struct page_array *pa, - unsigned long *idaws) + dma64_t *idaws) { int i; @@ -203,10 +203,10 @@ static inline void page_array_idal_create_words(struct page_array *pa, */ for (i = 0; i < pa->pa_nr; i++) { - idaws[i] = page_to_phys(pa->pa_page[i]); + idaws[i] = virt_to_dma64(page_to_virt(pa->pa_page[i])); /* Incorporate any offset from each starting address */ - idaws[i] += pa->pa_iova[i] & (PAGE_SIZE - 1); + idaws[i] = dma64_add(idaws[i], pa->pa_iova[i] & ~PAGE_MASK); } } @@ -227,7 +227,7 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len) pccw1->flags = ccw0.flags; pccw1->count = ccw0.count; } - pccw1->cda = ccw0.cda; + pccw1->cda = u32_to_dma32(ccw0.cda); pccw1++; } } @@ -299,11 +299,12 @@ static inline int ccw_does_data_transfer(struct ccw1 *ccw) * * Returns 1 if yes, 0 if no. */ -static inline int is_cpa_within_range(u32 cpa, u32 head, int len) +static inline int is_cpa_within_range(dma32_t cpa, u32 head, int len) { u32 tail = head + (len - 1) * sizeof(struct ccw1); + u32 gcpa = dma32_to_u32(cpa); - return (head <= cpa && cpa <= tail); + return head <= gcpa && gcpa <= tail; } static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len) @@ -356,7 +357,7 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx) if (ccw_is_tic(ccw)) return; - kfree(phys_to_virt(ccw->cda)); + kfree(dma32_to_virt(ccw->cda)); } /** @@ -417,15 +418,17 @@ static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp) static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp); -static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) +static int ccwchain_handle_ccw(dma32_t cda, struct channel_program *cp) { struct vfio_device *vdev = &container_of(cp, struct vfio_ccw_private, cp)->vdev; struct ccwchain *chain; int len, ret; + u32 gcda; + gcda = dma32_to_u32(cda); /* Copy 2K (the most we support today) of possible CCWs */ - ret = vfio_dma_rw(vdev, cda, cp->guest_cp, CCWCHAIN_LEN_MAX * sizeof(struct ccw1), false); + ret = vfio_dma_rw(vdev, gcda, cp->guest_cp, CCWCHAIN_LEN_MAX * sizeof(struct ccw1), false); if (ret) return ret; @@ -434,7 +437,7 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX); /* Count the CCWs in the current chain */ - len = ccwchain_calc_length(cda, cp); + len = ccwchain_calc_length(gcda, cp); if (len < 0) return len; @@ -444,7 +447,7 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) return -ENOMEM; chain->ch_len = len; - chain->ch_iova = cda; + chain->ch_iova = gcda; /* Copy the actual CCWs into the new chain */ memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1)); @@ -487,13 +490,14 @@ static int ccwchain_fetch_tic(struct ccw1 *ccw, struct channel_program *cp) { struct ccwchain *iter; - u32 ccw_head; + u32 offset, ccw_head; list_for_each_entry(iter, &cp->ccwchain_list, next) { ccw_head = iter->ch_iova; if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) { - ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) + - (ccw->cda - ccw_head)); + /* Calculate offset of TIC target */ + offset = dma32_to_u32(ccw->cda) - ccw_head; + ccw->cda = virt_to_dma32((void *)iter->ch_ccw + offset); return 0; } } @@ -501,14 +505,12 @@ static int ccwchain_fetch_tic(struct ccw1 *ccw, return -EFAULT; } -static unsigned long *get_guest_idal(struct ccw1 *ccw, - struct channel_program *cp, - int idaw_nr) +static dma64_t *get_guest_idal(struct ccw1 *ccw, struct channel_program *cp, int idaw_nr) { struct vfio_device *vdev = &container_of(cp, struct vfio_ccw_private, cp)->vdev; - unsigned long *idaws; - unsigned int *idaws_f1; + dma64_t *idaws; + dma32_t *idaws_f1; int idal_len = idaw_nr * sizeof(*idaws); int idaw_size = idal_is_2k(cp) ? PAGE_SIZE / 2 : PAGE_SIZE; int idaw_mask = ~(idaw_size - 1); @@ -520,7 +522,7 @@ static unsigned long *get_guest_idal(struct ccw1 *ccw, if (ccw_is_idal(ccw)) { /* Copy IDAL from guest */ - ret = vfio_dma_rw(vdev, ccw->cda, idaws, idal_len, false); + ret = vfio_dma_rw(vdev, dma32_to_u32(ccw->cda), idaws, idal_len, false); if (ret) { kfree(idaws); return ERR_PTR(ret); @@ -528,14 +530,18 @@ static unsigned long *get_guest_idal(struct ccw1 *ccw, } else { /* Fabricate an IDAL based off CCW data address */ if (cp->orb.cmd.c64) { - idaws[0] = ccw->cda; - for (i = 1; i < idaw_nr; i++) - idaws[i] = (idaws[i - 1] + idaw_size) & idaw_mask; + idaws[0] = u64_to_dma64(dma32_to_u32(ccw->cda)); + for (i = 1; i < idaw_nr; i++) { + idaws[i] = dma64_add(idaws[i - 1], idaw_size); + idaws[i] = dma64_and(idaws[i], idaw_mask); + } } else { - idaws_f1 = (unsigned int *)idaws; + idaws_f1 = (dma32_t *)idaws; idaws_f1[0] = ccw->cda; - for (i = 1; i < idaw_nr; i++) - idaws_f1[i] = (idaws_f1[i - 1] + idaw_size) & idaw_mask; + for (i = 1; i < idaw_nr; i++) { + idaws_f1[i] = dma32_add(idaws_f1[i - 1], idaw_size); + idaws_f1[i] = dma32_and(idaws_f1[i], idaw_mask); + } } } @@ -572,7 +578,7 @@ static int ccw_count_idaws(struct ccw1 *ccw, if (ccw_is_idal(ccw)) { /* Read first IDAW to check its starting address. */ /* All subsequent IDAWs will be 2K- or 4K-aligned. */ - ret = vfio_dma_rw(vdev, ccw->cda, &iova, size, false); + ret = vfio_dma_rw(vdev, dma32_to_u32(ccw->cda), &iova, size, false); if (ret) return ret; @@ -583,7 +589,7 @@ static int ccw_count_idaws(struct ccw1 *ccw, if (!cp->orb.cmd.c64) iova = iova >> 32; } else { - iova = ccw->cda; + iova = dma32_to_u32(ccw->cda); } /* Format-1 IDAWs operate on 2K each */ @@ -604,8 +610,8 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw, { struct vfio_device *vdev = &container_of(cp, struct vfio_ccw_private, cp)->vdev; - unsigned long *idaws; - unsigned int *idaws_f1; + dma64_t *idaws; + dma32_t *idaws_f1; int ret; int idaw_nr; int i; @@ -636,12 +642,12 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw, * Copy guest IDAWs into page_array, in case the memory they * occupy is not contiguous. */ - idaws_f1 = (unsigned int *)idaws; + idaws_f1 = (dma32_t *)idaws; for (i = 0; i < idaw_nr; i++) { if (cp->orb.cmd.c64) - pa->pa_iova[i] = idaws[i]; + pa->pa_iova[i] = dma64_to_u64(idaws[i]); else - pa->pa_iova[i] = idaws_f1[i]; + pa->pa_iova[i] = dma32_to_u32(idaws_f1[i]); } if (ccw_does_data_transfer(ccw)) { @@ -652,7 +658,7 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw, pa->pa_nr = 0; } - ccw->cda = (__u32) virt_to_phys(idaws); + ccw->cda = virt_to_dma32(idaws); ccw->flags |= CCW_FLAG_IDA; /* Populate the IDAL with pinned/translated addresses from page */ @@ -874,7 +880,7 @@ union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch) chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next); cpa = chain->ch_ccw; - orb->cmd.cpa = (__u32)virt_to_phys(cpa); + orb->cmd.cpa = virt_to_dma32(cpa); return orb; } @@ -896,7 +902,7 @@ union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch) void cp_update_scsw(struct channel_program *cp, union scsw *scsw) { struct ccwchain *chain; - u32 cpa = scsw->cmd.cpa; + dma32_t cpa = scsw->cmd.cpa; u32 ccw_head; if (!cp->initialized) @@ -909,7 +915,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw) * in the ioctl directly. Path status changes etc. */ list_for_each_entry(chain, &cp->ccwchain_list, next) { - ccw_head = (u32)(u64)chain->ch_ccw; + ccw_head = dma32_to_u32(virt_to_dma32(chain->ch_ccw)); /* * On successful execution, cpa points just beyond the end * of the chain. @@ -919,9 +925,10 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw) * (cpa - ccw_head) is the offset value of the host * physical ccw to its chain head. * Adding this value to the guest physical ccw chain - * head gets us the guest cpa. + * head gets us the guest cpa: + * cpa = chain->ch_iova + (cpa - ccw_head) */ - cpa = chain->ch_iova + (cpa - ccw_head); + cpa = dma32_add(cpa, chain->ch_iova - ccw_head); break; } } diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 43601816ea4e..6ff5c9cfb7ed 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -65,14 +65,14 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch) * cancel/halt/clear completion. */ private->completion = &completion; - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); if (ret == -EBUSY) wait_for_completion_timeout(&completion, 3*HZ); private->completion = NULL; flush_workqueue(vfio_ccw_work_q); - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); ret = cio_disable_subchannel(sch); } while (ret == -EBUSY); @@ -112,7 +112,7 @@ void vfio_ccw_sch_io_todo(struct work_struct *work) private->state = VFIO_CCW_STATE_IDLE; if (private->io_trigger) - eventfd_signal(private->io_trigger, 1); + eventfd_signal(private->io_trigger); } void vfio_ccw_crw_todo(struct work_struct *work) @@ -122,7 +122,7 @@ void vfio_ccw_crw_todo(struct work_struct *work) private = container_of(work, struct vfio_ccw_private, crw_work); if (!list_empty(&private->crw) && private->crw_trigger) - eventfd_signal(private->crw_trigger, 1); + eventfd_signal(private->crw_trigger); } /* @@ -171,7 +171,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) return -ENODEV; } - parent = kzalloc(struct_size(parent, mdev_types, 1), GFP_KERNEL); + parent = kzalloc(sizeof(*parent), GFP_KERNEL); if (!parent) return -ENOMEM; @@ -186,10 +186,10 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) parent->mdev_type.sysfs_name = "io"; parent->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)"; - parent->mdev_types[0] = &parent->mdev_type; + parent->mdev_types = &parent->mdev_type; ret = mdev_register_parent(&parent->parent, &sch->dev, &vfio_ccw_mdev_driver, - parent->mdev_types, 1); + &parent->mdev_types, 1); if (ret) goto out_unreg; @@ -249,7 +249,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) unsigned long flags; int rc = -EAGAIN; - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); if (!device_is_registered(&sch->dev)) goto out_unlock; @@ -264,7 +264,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) } out_unlock: - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); return rc; } @@ -488,4 +488,5 @@ static void __exit vfio_ccw_sch_exit(void) module_init(vfio_ccw_sch_init); module_exit(vfio_ccw_sch_exit); +MODULE_DESCRIPTION("VFIO based Subchannel device driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index 757b73141246..4d7988ea47ef 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -25,7 +25,7 @@ static int fsm_io_helper(struct vfio_ccw_private *private) unsigned long flags; int ret; - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); orb = cp_get_orb(&private->cp, sch); if (!orb) { @@ -72,7 +72,7 @@ static int fsm_io_helper(struct vfio_ccw_private *private) ret = ccode; } out: - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); return ret; } @@ -83,7 +83,7 @@ static int fsm_do_halt(struct vfio_ccw_private *private) int ccode; int ret; - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); VFIO_CCW_TRACE_EVENT(2, "haltIO"); VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev)); @@ -111,7 +111,7 @@ static int fsm_do_halt(struct vfio_ccw_private *private) default: ret = ccode; } - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); return ret; } @@ -122,7 +122,7 @@ static int fsm_do_clear(struct vfio_ccw_private *private) int ccode; int ret; - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); VFIO_CCW_TRACE_EVENT(2, "clearIO"); VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev)); @@ -147,7 +147,7 @@ static int fsm_do_clear(struct vfio_ccw_private *private) default: ret = ccode; } - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); return ret; } @@ -376,18 +376,18 @@ static void fsm_open(struct vfio_ccw_private *private, struct subchannel *sch = to_subchannel(private->vdev.dev->parent); int ret; - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); sch->isc = VFIO_CCW_ISC; - ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); + ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch)); if (ret) goto err_unlock; private->state = VFIO_CCW_STATE_IDLE; - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); return; err_unlock: - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); } @@ -397,7 +397,7 @@ static void fsm_close(struct vfio_ccw_private *private, struct subchannel *sch = to_subchannel(private->vdev.dev->parent); int ret; - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); if (!sch->schib.pmcw.ena) goto err_unlock; @@ -409,12 +409,12 @@ static void fsm_close(struct vfio_ccw_private *private, goto err_unlock; private->state = VFIO_CCW_STATE_STANDBY; - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); cp_free(&private->cp); return; err_unlock: - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); } diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index 5b53b94f13c7..a596f6013019 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -313,10 +313,12 @@ static int vfio_ccw_mdev_get_device_info(struct vfio_ccw_private *private, return 0; } -static int vfio_ccw_mdev_get_region_info(struct vfio_ccw_private *private, - struct vfio_region_info *info, - unsigned long arg) +static int vfio_ccw_mdev_ioctl_get_region_info(struct vfio_device *vdev, + struct vfio_region_info *info, + struct vfio_info_cap *caps) { + struct vfio_ccw_private *private = + container_of(vdev, struct vfio_ccw_private, vdev); int i; switch (info->index) { @@ -328,7 +330,6 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_ccw_private *private, return 0; default: /* all other regions are handled via capability chain */ { - struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; struct vfio_region_info_cap_type cap_type = { .header.id = VFIO_REGION_INFO_CAP_TYPE, .header.version = 1 }; @@ -351,27 +352,10 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_ccw_private *private, cap_type.type = private->region[i].type; cap_type.subtype = private->region[i].subtype; - ret = vfio_info_add_capability(&caps, &cap_type.header, + ret = vfio_info_add_capability(caps, &cap_type.header, sizeof(cap_type)); if (ret) return ret; - - info->flags |= VFIO_REGION_INFO_FLAG_CAPS; - if (info->argsz < sizeof(*info) + caps.size) { - info->argsz = sizeof(*info) + caps.size; - info->cap_offset = 0; - } else { - vfio_info_cap_shift(&caps, sizeof(*info)); - if (copy_to_user((void __user *)arg + sizeof(*info), - caps.buf, caps.size)) { - kfree(caps.buf); - return -EFAULT; - } - info->cap_offset = sizeof(*info); - } - - kfree(caps.buf); - } } return 0; @@ -421,7 +405,7 @@ static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private, case VFIO_IRQ_SET_DATA_NONE: { if (*ctx) - eventfd_signal(*ctx, 1); + eventfd_signal(*ctx); return 0; } case VFIO_IRQ_SET_DATA_BOOL: @@ -432,7 +416,7 @@ static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private, return -EFAULT; if (trigger && *ctx) - eventfd_signal(*ctx, 1); + eventfd_signal(*ctx); return 0; } case VFIO_IRQ_SET_DATA_EVENTFD: @@ -532,24 +516,6 @@ static ssize_t vfio_ccw_mdev_ioctl(struct vfio_device *vdev, return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } - case VFIO_DEVICE_GET_REGION_INFO: - { - struct vfio_region_info info; - - minsz = offsetofend(struct vfio_region_info, offset); - - if (copy_from_user(&info, (void __user *)arg, minsz)) - return -EFAULT; - - if (info.argsz < minsz) - return -EINVAL; - - ret = vfio_ccw_mdev_get_region_info(private, &info, arg); - if (ret) - return ret; - - return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; - } case VFIO_DEVICE_GET_IRQ_INFO: { struct vfio_irq_info info; @@ -612,7 +578,7 @@ static void vfio_ccw_mdev_request(struct vfio_device *vdev, unsigned int count) "Relaying device request to user (#%u)\n", count); - eventfd_signal(private->req_trigger, 1); + eventfd_signal(private->req_trigger); } else if (count == 0) { dev_notice(dev, "No device request channel registered, blocked until released by user\n"); @@ -627,11 +593,13 @@ static const struct vfio_device_ops vfio_ccw_dev_ops = { .read = vfio_ccw_mdev_read, .write = vfio_ccw_mdev_write, .ioctl = vfio_ccw_mdev_ioctl, + .get_region_info_caps = vfio_ccw_mdev_ioctl_get_region_info, .request = vfio_ccw_mdev_request, .dma_unmap = vfio_ccw_dma_unmap, .bind_iommufd = vfio_iommufd_emulated_bind, .unbind_iommufd = vfio_iommufd_emulated_unbind, .attach_ioas = vfio_iommufd_emulated_attach_ioas, + .detach_ioas = vfio_iommufd_emulated_detach_ioas, }; struct mdev_driver vfio_ccw_mdev_driver = { diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h index b62bbc5c6376..0501d4bbcdbd 100644 --- a/drivers/s390/cio/vfio_ccw_private.h +++ b/drivers/s390/cio/vfio_ccw_private.h @@ -79,7 +79,7 @@ struct vfio_ccw_parent { struct mdev_parent parent; struct mdev_type mdev_type; - struct mdev_type *mdev_types[]; + struct mdev_type *mdev_types; }; /** diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile index 0edacd101c12..e83c6603c858 100644 --- a/drivers/s390/crypto/Makefile +++ b/drivers/s390/crypto/Makefile @@ -4,7 +4,7 @@ # ap-objs := ap_bus.o ap_card.o ap_queue.o -obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o +obj-$(CONFIG_AP) += ap.o # zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o @@ -13,10 +13,26 @@ obj-$(CONFIG_ZCRYPT) += zcrypt.o # adapter drivers depend on ap.o and zcrypt.o obj-$(CONFIG_ZCRYPT) += zcrypt_cex4.o -# pkey kernel module -pkey-objs := pkey_api.o +# pkey base and api module +pkey-objs := pkey_base.o pkey_api.o pkey_sysfs.o obj-$(CONFIG_PKEY) += pkey.o +# pkey cca handler module +pkey-cca-objs := pkey_cca.o +obj-$(CONFIG_PKEY_CCA) += pkey-cca.o + +# pkey ep11 handler module +pkey-ep11-objs := pkey_ep11.o +obj-$(CONFIG_PKEY_EP11) += pkey-ep11.o + +# pkey pckmo handler module +pkey-pckmo-objs := pkey_pckmo.o +obj-$(CONFIG_PKEY_PCKMO) += pkey-pckmo.o + +# pkey uv handler module +pkey-uv-objs := pkey_uv.o +obj-$(CONFIG_PKEY_UV) += pkey-uv.o + # adjunct processor matrix vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o obj-$(CONFIG_VFIO_AP) += vfio_ap.o diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 339812efe822..a445494fd2be 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -11,11 +11,11 @@ * Adjunct processor bus. */ -#define KMSG_COMPONENT "ap" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "ap: " fmt #include <linux/kernel_stat.h> #include <linux/moduleparam.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/err.h> @@ -26,6 +26,7 @@ #include <linux/notifier.h> #include <linux/kthread.h> #include <linux/mutex.h> +#include <asm/machine.h> #include <asm/airq.h> #include <asm/tpi.h> #include <linux/atomic.h> @@ -38,13 +39,17 @@ #include <linux/debugfs.h> #include <linux/ctype.h> #include <linux/module.h> +#include <asm/uv.h> +#include <asm/chsc.h> +#include <linux/mempool.h> #include "ap_bus.h" #include "ap_debug.h" -/* - * Module parameters; note though this file itself isn't modular. - */ +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("Adjunct Processor Bus driver"); +MODULE_LICENSE("GPL"); + int ap_domain_index = -1; /* Adjunct Processor Domain Index */ static DEFINE_SPINLOCK(ap_domain_lock); module_param_named(domain, ap_domain_index, int, 0440); @@ -80,20 +85,27 @@ DEFINE_SPINLOCK(ap_queues_lock); /* Default permissions (ioctl, card and domain masking) */ struct ap_perms ap_perms; EXPORT_SYMBOL(ap_perms); -DEFINE_MUTEX(ap_perms_mutex); -EXPORT_SYMBOL(ap_perms_mutex); - -/* # of bus scans since init */ -static atomic64_t ap_scan_bus_count; +/* true if apmask and/or aqmask are NOT default */ +bool ap_apmask_aqmask_in_use; +/* counter for how many driver_overrides are currently active */ +int ap_driver_override_ctr; +/* + * Mutex for consistent read and write of the ap_perms struct, + * ap_apmask_aqmask_in_use, ap_driver_override_ctr + * and the ap bus sysfs attributes apmask and aqmask. + */ +DEFINE_MUTEX(ap_attr_mutex); +EXPORT_SYMBOL(ap_attr_mutex); /* # of bindings complete since init */ static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0); -/* completion for initial APQN bindings complete */ -static DECLARE_COMPLETION(ap_init_apqn_bindings_complete); +/* completion for APQN bindings complete */ +static DECLARE_COMPLETION(ap_apqn_bindings_complete); -static struct ap_config_info *ap_qci_info; -static struct ap_config_info *ap_qci_info_old; +static struct ap_config_info qci[2]; +static struct ap_config_info *const ap_qci_info = &qci[0]; +static struct ap_config_info *const ap_qci_info_old = &qci[1]; /* * AP bus related debug feature things. @@ -101,12 +113,38 @@ static struct ap_config_info *ap_qci_info_old; debug_info_t *ap_dbf_info; /* - * Workqueue timer for bus rescan. + * There is a need for a do-not-allocate-memory path through the AP bus + * layer. The pkey layer may be triggered via the in-kernel interface from + * a protected key crypto algorithm (namely PAES) to convert a secure key + * into a protected key. This happens in a workqueue context, so sleeping + * is allowed but memory allocations causing IO operations are not permitted. + * To accomplish this, an AP message memory pool with pre-allocated space + * is established. When ap_init_apmsg() with use_mempool set to true is + * called, instead of kmalloc() the ap message buffer is allocated from + * the ap_msg_pool. This pool only holds a limited amount of buffers: + * ap_msg_pool_min_items with the item size AP_DEFAULT_MAX_MSG_SIZE and + * exactly one of these items (if available) is returned if ap_init_apmsg() + * with the use_mempool arg set to true is called. When this pool is exhausted + * and use_mempool is set true, ap_init_apmsg() returns -ENOMEM without + * any attempt to allocate memory and the caller has to deal with that. + */ +static mempool_t *ap_msg_pool; +static unsigned int ap_msg_pool_min_items = 8; +module_param_named(msgpool_min_items, ap_msg_pool_min_items, uint, 0440); +MODULE_PARM_DESC(msgpool_min_items, "AP message pool minimal items"); + +/* + * AP bus rescan related things. */ -static struct timer_list ap_config_timer; -static int ap_config_time = AP_CONFIG_TIME; -static void ap_scan_bus(struct work_struct *); -static DECLARE_WORK(ap_scan_work, ap_scan_bus); +static bool ap_scan_bus(void); +static bool ap_scan_bus_result; /* result of last ap_scan_bus() */ +static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */ +static struct task_struct *ap_scan_bus_task; /* thread holding the scan mutex */ +static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */ +static int ap_scan_bus_time = AP_CONFIG_TIME; +static struct timer_list ap_scan_bus_timer; +static void ap_scan_bus_wq_callback(struct work_struct *); +static DECLARE_WORK(ap_scan_bus_work, ap_scan_bus_wq_callback); /* * Tasklet & timer for AP request polling and interrupts @@ -135,7 +173,7 @@ static int ap_max_domain_id = 15; /* Maximum adapter id, if not given via qci */ static int ap_max_adapter_id = 63; -static struct bus_type ap_bus_type; +static const struct bus_type ap_bus_type; /* Adapter interrupt definitions */ static void ap_interrupt_handler(struct airq_struct *airq, @@ -201,9 +239,7 @@ static int ap_apft_available(void) */ static inline int ap_qact_available(void) { - if (ap_qci_info) - return ap_qci_info->qact; - return 0; + return ap_qci_info->qact; } /* @@ -213,9 +249,7 @@ static inline int ap_qact_available(void) */ int ap_sb_available(void) { - if (ap_qci_info) - return ap_qci_info->apsb; - return 0; + return ap_qci_info->apsb; } /* @@ -227,23 +261,6 @@ bool ap_is_se_guest(void) } EXPORT_SYMBOL(ap_is_se_guest); -/* - * ap_fetch_qci_info(): Fetch cryptographic config info - * - * Returns the ap configuration info fetched via PQAP(QCI). - * On success 0 is returned, on failure a negative errno - * is returned, e.g. if the PQAP(QCI) instruction is not - * available, the return value will be -EOPNOTSUPP. - */ -static inline int ap_fetch_qci_info(struct ap_config_info *info) -{ - if (!ap_qci_available()) - return -EOPNOTSUPP; - if (!info) - return -EINVAL; - return ap_qci(info); -} - /** * ap_init_qci_info(): Allocate and query qci config info. * Does also update the static variables ap_max_domain_id @@ -251,27 +268,12 @@ static inline int ap_fetch_qci_info(struct ap_config_info *info) */ static void __init ap_init_qci_info(void) { - if (!ap_qci_available()) { + if (!ap_qci_available() || + ap_qci(ap_qci_info)) { AP_DBF_INFO("%s QCI not supported\n", __func__); return; } - - ap_qci_info = kzalloc(sizeof(*ap_qci_info), GFP_KERNEL); - if (!ap_qci_info) - return; - ap_qci_info_old = kzalloc(sizeof(*ap_qci_info_old), GFP_KERNEL); - if (!ap_qci_info_old) { - kfree(ap_qci_info); - ap_qci_info = NULL; - return; - } - if (ap_fetch_qci_info(ap_qci_info) != 0) { - kfree(ap_qci_info); - kfree(ap_qci_info_old); - ap_qci_info = NULL; - ap_qci_info_old = NULL; - return; - } + memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info)); AP_DBF_INFO("%s successful fetched initial qci info\n", __func__); if (ap_qci_info->apxa) { @@ -286,8 +288,6 @@ static void __init ap_init_qci_info(void) __func__, ap_max_domain_id); } } - - memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info)); } /* @@ -310,7 +310,7 @@ static inline int ap_test_config_card_id(unsigned int id) { if (id > ap_max_adapter_id) return 0; - if (ap_qci_info) + if (ap_qci_info->flags) return ap_test_config(ap_qci_info->apm, id); return 1; } @@ -327,7 +327,7 @@ int ap_test_config_usage_domain(unsigned int domain) { if (domain > ap_max_domain_id) return 0; - if (ap_qci_info) + if (ap_qci_info->flags) return ap_test_config(ap_qci_info->aqm, domain); return 1; } @@ -352,18 +352,17 @@ EXPORT_SYMBOL(ap_test_config_ctrl_domain); /* * ap_queue_info(): Check and get AP queue info. * Returns: 1 if APQN exists and info is filled, - * 0 if APQN seems to exit but there is no info + * 0 if APQN seems to exist but there is no info * available (eg. caused by an asynch pending error) * -1 invalid APQN, TAPQ error or AP queue status which * indicates there is no APQN. */ -static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac, - int *q_depth, int *q_ml, bool *q_decfg, bool *q_cstop) +static int ap_queue_info(ap_qid_t qid, struct ap_tapq_hwinfo *hwinfo, + bool *decfg, bool *cstop) { struct ap_queue_status status; - struct ap_tapq_gr2 tapq_info; - tapq_info.value = 0; + hwinfo->value = 0; /* make sure we don't run into a specifiation exception */ if (AP_QID_CARD(qid) > ap_max_adapter_id || @@ -371,11 +370,7 @@ static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac, return -1; /* call TAPQ on this APQN */ - status = ap_test_queue(qid, ap_apft_available(), &tapq_info); - - /* handle pending async error with return 'no info available' */ - if (status.async) - return 0; + status = ap_test_queue(qid, ap_apft_available(), hwinfo); switch (status.response_code) { case AP_RESPONSE_NORMAL: @@ -383,26 +378,23 @@ static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac, case AP_RESPONSE_DECONFIGURED: case AP_RESPONSE_CHECKSTOPPED: case AP_RESPONSE_BUSY: - /* - * According to the architecture in all these cases the - * info should be filled. All bits 0 is not possible as - * there is at least one of the mode bits set. - */ - if (WARN_ON_ONCE(!tapq_info.value)) - return 0; - *q_type = tapq_info.at; - *q_fac = tapq_info.fac; - *q_depth = tapq_info.qd; - *q_ml = tapq_info.ml; - *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED; - *q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED; - return 1; + /* For all these RCs the tapq info should be available */ + break; default: - /* - * A response code which indicates, there is no info available. - */ - return -1; + /* On a pending async error the info should be available */ + if (!status.async) + return -1; + break; } + + /* There should be at least one of the mode bits set */ + if (WARN_ON_ONCE(!hwinfo->value)) + return 0; + + *decfg = status.response_code == AP_RESPONSE_DECONFIGURED; + *cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED; + + return 1; } void ap_wait(enum ap_sm_wait wait) @@ -445,7 +437,7 @@ void ap_wait(enum ap_sm_wait wait) */ void ap_request_timeout(struct timer_list *t) { - struct ap_queue *aq = from_timer(aq, t, timeout); + struct ap_queue *aq = timer_container_of(aq, t, timeout); spin_lock_bh(&aq->lock); ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT)); @@ -493,7 +485,7 @@ static void ap_tasklet_fn(unsigned long dummy) * important that no requests on any AP get lost. */ if (ap_irq_flag) - xchg(ap_airq.lsi_ptr, 0); + WRITE_ONCE(*ap_airq.lsi_ptr, 0); spin_lock_bh(&ap_queues_lock); hash_for_each(ap_queues, bkt, aq, hnode) { @@ -586,6 +578,48 @@ static void ap_poll_thread_stop(void) #define is_card_dev(x) ((x)->parent == ap_root_device) #define is_queue_dev(x) ((x)->parent != ap_root_device) +/* + * ap_init_apmsg() - Initialize ap_message. + */ +int ap_init_apmsg(struct ap_message *ap_msg, u32 flags) +{ + unsigned int maxmsgsize; + + memset(ap_msg, 0, sizeof(*ap_msg)); + ap_msg->flags = flags; + + if (flags & AP_MSG_FLAG_MEMPOOL) { + ap_msg->msg = mempool_alloc_preallocated(ap_msg_pool); + if (!ap_msg->msg) + return -ENOMEM; + ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE; + return 0; + } + + maxmsgsize = atomic_read(&ap_max_msg_size); + ap_msg->msg = kmalloc(maxmsgsize, GFP_KERNEL); + if (!ap_msg->msg) + return -ENOMEM; + ap_msg->bufsize = maxmsgsize; + + return 0; +} +EXPORT_SYMBOL(ap_init_apmsg); + +/* + * ap_release_apmsg() - Release ap_message. + */ +void ap_release_apmsg(struct ap_message *ap_msg) +{ + if (ap_msg->flags & AP_MSG_FLAG_MEMPOOL) { + memzero_explicit(ap_msg->msg, ap_msg->bufsize); + mempool_free(ap_msg->msg, ap_msg_pool); + } else { + kfree_sensitive(ap_msg->msg); + } +} +EXPORT_SYMBOL(ap_release_apmsg); + /** * ap_bus_match() * @dev: Pointer to device @@ -593,9 +627,9 @@ static void ap_poll_thread_stop(void) * * AP bus driver registration/unregistration. */ -static int ap_bus_match(struct device *dev, struct device_driver *drv) +static int ap_bus_match(struct device *dev, const struct device_driver *drv) { - struct ap_driver *ap_drv = to_ap_drv(drv); + const struct ap_driver *ap_drv = to_ap_drv(drv); struct ap_device_id *id; /* @@ -645,11 +679,11 @@ static int ap_uevent(const struct device *dev, struct kobj_uevent_env *env) return rc; /* Add MODE=<accel|cca|ep11> */ - if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) + if (ac->hwinfo.accel) rc = add_uevent_var(env, "MODE=accel"); - else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) + else if (ac->hwinfo.cca) rc = add_uevent_var(env, "MODE=cca"); - else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) + else if (ac->hwinfo.ep11) rc = add_uevent_var(env, "MODE=ep11"); if (rc) return rc; @@ -657,11 +691,11 @@ static int ap_uevent(const struct device *dev, struct kobj_uevent_env *env) struct ap_queue *aq = to_ap_queue(&ap_dev->device); /* Add MODE=<accel|cca|ep11> */ - if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) + if (aq->card->hwinfo.accel) rc = add_uevent_var(env, "MODE=accel"); - else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) + else if (aq->card->hwinfo.cca) rc = add_uevent_var(env, "MODE=cca"); - else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) + else if (aq->card->hwinfo.ep11) rc = add_uevent_var(env, "MODE=ep11"); if (rc) return rc; @@ -761,7 +795,7 @@ static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound) } /* - * After initial ap bus scan do check if all existing APQNs are + * After ap bus scan do check if all existing APQNs are * bound to device drivers. */ static void ap_check_bindings_complete(void) @@ -771,11 +805,11 @@ static void ap_check_bindings_complete(void) if (atomic64_read(&ap_scan_bus_count) >= 1) { ap_calc_bound_apqns(&apqns, &bound); if (bound == apqns) { - if (!completion_done(&ap_init_apqn_bindings_complete)) { - complete_all(&ap_init_apqn_bindings_complete); - AP_DBF_INFO("%s complete\n", __func__); + if (!completion_done(&ap_apqn_bindings_complete)) { + complete_all(&ap_apqn_bindings_complete); + ap_send_bindings_complete_uevent(); + pr_debug("all apqn bindings complete\n"); } - ap_send_bindings_complete_uevent(); } } } @@ -790,27 +824,29 @@ static void ap_check_bindings_complete(void) * -ETIME is returned. On failures negative return values are * returned to the caller. */ -int ap_wait_init_apqn_bindings_complete(unsigned long timeout) +int ap_wait_apqn_bindings_complete(unsigned long timeout) { + int rc = 0; long l; - if (completion_done(&ap_init_apqn_bindings_complete)) + if (completion_done(&ap_apqn_bindings_complete)) return 0; if (timeout) l = wait_for_completion_interruptible_timeout( - &ap_init_apqn_bindings_complete, timeout); + &ap_apqn_bindings_complete, timeout); else l = wait_for_completion_interruptible( - &ap_init_apqn_bindings_complete); + &ap_apqn_bindings_complete); if (l < 0) - return l == -ERESTARTSYS ? -EINTR : l; + rc = l == -ERESTARTSYS ? -EINTR : l; else if (l == 0 && timeout) - return -ETIME; + rc = -ETIME; - return 0; + pr_debug("rc=%d\n", rc); + return rc; } -EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete); +EXPORT_SYMBOL(ap_wait_apqn_bindings_complete); static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data) { @@ -825,21 +861,38 @@ static int __ap_revise_reserved(struct device *dev, void *dummy) int rc, card, queue, devres, drvres; if (is_queue_dev(dev)) { - card = AP_QID_CARD(to_ap_queue(dev)->qid); - queue = AP_QID_QUEUE(to_ap_queue(dev)->qid); - mutex_lock(&ap_perms_mutex); - devres = test_bit_inv(card, ap_perms.apm) && - test_bit_inv(queue, ap_perms.aqm); - mutex_unlock(&ap_perms_mutex); - drvres = to_ap_drv(dev->driver)->flags - & AP_DRIVER_FLAG_DEFAULT; - if (!!devres != !!drvres) { - AP_DBF_DBG("%s reprobing queue=%02x.%04x\n", - __func__, card, queue); - rc = device_reprobe(dev); - if (rc) - AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n", - __func__, card, queue); + struct ap_driver *ap_drv = to_ap_drv(dev->driver); + struct ap_queue *aq = to_ap_queue(dev); + struct ap_device *ap_dev = &aq->ap_dev; + + card = AP_QID_CARD(aq->qid); + queue = AP_QID_QUEUE(aq->qid); + + if (ap_dev->driver_override) { + if (strcmp(ap_dev->driver_override, + ap_drv->driver.name)) { + pr_debug("reprobing queue=%02x.%04x\n", card, queue); + rc = device_reprobe(dev); + if (rc) { + AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n", + __func__, card, queue); + } + } + } else { + mutex_lock(&ap_attr_mutex); + devres = test_bit_inv(card, ap_perms.apm) && + test_bit_inv(queue, ap_perms.aqm); + mutex_unlock(&ap_attr_mutex); + drvres = to_ap_drv(dev->driver)->flags + & AP_DRIVER_FLAG_DEFAULT; + if (!!devres != !!drvres) { + pr_debug("reprobing queue=%02x.%04x\n", card, queue); + rc = device_reprobe(dev); + if (rc) { + AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n", + __func__, card, queue); + } + } } } @@ -857,22 +910,37 @@ static void ap_bus_revise_bindings(void) * @card: the APID of the adapter card to check * @queue: the APQI of the queue to check * - * Note: the ap_perms_mutex must be locked by the caller of this function. + * Note: the ap_attr_mutex must be locked by the caller of this function. * * Return: an int specifying whether the AP adapter is reserved for the host (1) * or not (0). */ int ap_owned_by_def_drv(int card, int queue) { + struct ap_queue *aq; int rc = 0; if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS) return -EINVAL; + aq = ap_get_qdev(AP_MKQID(card, queue)); + if (aq) { + const struct device_driver *drv = aq->ap_dev.device.driver; + const struct ap_driver *ap_drv = to_ap_drv(drv); + bool override = !!aq->ap_dev.driver_override; + + if (override && drv && ap_drv->flags & AP_DRIVER_FLAG_DEFAULT) + rc = 1; + put_device(&aq->ap_dev.device); + if (override) + goto out; + } + if (test_bit_inv(card, ap_perms.apm) && test_bit_inv(queue, ap_perms.aqm)) rc = 1; +out: return rc; } EXPORT_SYMBOL(ap_owned_by_def_drv); @@ -884,7 +952,7 @@ EXPORT_SYMBOL(ap_owned_by_def_drv); * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check * - * Note: the ap_perms_mutex must be locked by the caller of this function. + * Note: the ap_attr_mutex must be locked by the caller of this function. * * Return: an int specifying whether each APQN is reserved for the host (1) or * not (0) @@ -895,12 +963,10 @@ int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm, int card, queue, rc = 0; for (card = 0; !rc && card < AP_DEVICES; card++) - if (test_bit_inv(card, apm) && - test_bit_inv(card, ap_perms.apm)) + if (test_bit_inv(card, apm)) for (queue = 0; !rc && queue < AP_DOMAINS; queue++) - if (test_bit_inv(queue, aqm) && - test_bit_inv(queue, ap_perms.aqm)) - rc = 1; + if (test_bit_inv(queue, aqm)) + rc = ap_owned_by_def_drv(card, queue); return rc; } @@ -924,15 +990,27 @@ static int ap_device_probe(struct device *dev) */ card = AP_QID_CARD(to_ap_queue(dev)->qid); queue = AP_QID_QUEUE(to_ap_queue(dev)->qid); - mutex_lock(&ap_perms_mutex); - devres = test_bit_inv(card, ap_perms.apm) && - test_bit_inv(queue, ap_perms.aqm); - mutex_unlock(&ap_perms_mutex); - drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; - if (!!devres != !!drvres) - goto out; + if (ap_dev->driver_override) { + if (strcmp(ap_dev->driver_override, + ap_drv->driver.name)) + goto out; + } else { + mutex_lock(&ap_attr_mutex); + devres = test_bit_inv(card, ap_perms.apm) && + test_bit_inv(queue, ap_perms.aqm); + mutex_unlock(&ap_attr_mutex); + drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; + if (!!devres != !!drvres) + goto out; + } } + /* + * Rearm the bindings complete completion to trigger + * bindings complete when all devices are bound again + */ + reinit_completion(&ap_apqn_bindings_complete); + /* Add queue/card to list of active queues/cards */ spin_lock_bh(&ap_queues_lock); if (is_queue_dev(dev)) @@ -947,13 +1025,20 @@ static int ap_device_probe(struct device *dev) if (is_queue_dev(dev)) hash_del(&to_ap_queue(dev)->hnode); spin_unlock_bh(&ap_queues_lock); - } else { - ap_check_bindings_complete(); } out: - if (rc) + if (rc) { put_device(dev); + } else { + if (is_queue_dev(dev)) { + pr_debug("queue=%02x.%04x new driver=%s\n", + card, queue, ap_drv->driver.name); + } else { + pr_debug("card=%02x new driver=%s\n", + to_ap_card(dev)->id, ap_drv->driver.name); + } + } return rc; } @@ -1006,11 +1091,16 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, char *name) { struct device_driver *drv = &ap_drv->driver; + int rc; drv->bus = &ap_bus_type; drv->owner = owner; drv->name = name; - return driver_register(drv); + rc = driver_register(drv); + + ap_check_bindings_complete(); + + return rc; } EXPORT_SYMBOL(ap_driver_register); @@ -1020,34 +1110,85 @@ void ap_driver_unregister(struct ap_driver *ap_drv) } EXPORT_SYMBOL(ap_driver_unregister); -void ap_bus_force_rescan(void) +/* + * Enforce a synchronous AP bus rescan. + * Returns true if the bus scan finds a change in the AP configuration + * and AP devices have been added or deleted when this function returns. + */ +bool ap_bus_force_rescan(void) { - /* processing a asynchronous bus rescan */ - del_timer(&ap_config_timer); - queue_work(system_long_wq, &ap_scan_work); - flush_work(&ap_scan_work); + unsigned long scan_counter = atomic64_read(&ap_scan_bus_count); + bool rc = false; + + pr_debug("> scan counter=%lu\n", scan_counter); + + /* Only trigger AP bus scans after the initial scan is done */ + if (scan_counter <= 0) + goto out; + + /* + * There is one unlikely but nevertheless valid scenario where the + * thread holding the mutex may try to send some crypto load but + * all cards are offline so a rescan is triggered which causes + * a recursive call of ap_bus_force_rescan(). A simple return if + * the mutex is already locked by this thread solves this. + */ + if (mutex_is_locked(&ap_scan_bus_mutex)) { + if (ap_scan_bus_task == current) + goto out; + } + + /* Try to acquire the AP scan bus mutex */ + if (mutex_trylock(&ap_scan_bus_mutex)) { + /* mutex acquired, run the AP bus scan */ + ap_scan_bus_task = current; + ap_scan_bus_result = ap_scan_bus(); + rc = ap_scan_bus_result; + ap_scan_bus_task = NULL; + mutex_unlock(&ap_scan_bus_mutex); + goto out; + } + + /* + * Mutex acquire failed. So there is currently another task + * already running the AP bus scan. Then let's simple wait + * for the lock which means the other task has finished and + * stored the result in ap_scan_bus_result. + */ + if (mutex_lock_interruptible(&ap_scan_bus_mutex)) { + /* some error occurred, ignore and go out */ + goto out; + } + rc = ap_scan_bus_result; + mutex_unlock(&ap_scan_bus_mutex); + +out: + pr_debug("rc=%d\n", rc); + return rc; } EXPORT_SYMBOL(ap_bus_force_rescan); /* * A config change has happened, force an ap bus rescan. */ -void ap_bus_cfg_chg(void) +static int ap_bus_cfg_chg(struct notifier_block *nb, + unsigned long action, void *data) { - AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__); + if (action != CHSC_NOTIFY_AP_CFG) + return NOTIFY_DONE; + + pr_debug("config change, forcing bus rescan\n"); ap_bus_force_rescan(); + + return NOTIFY_OK; } -/* - * hex2bitmap() - parse hex mask string and set bitmap. - * Valid strings are "0x012345678" with at least one valid hex number. - * Rest of the bitmap to the right is padded with 0. No spaces allowed - * within the string, the leading 0x may be omitted. - * Returns the bitmask with exactly the bits set as given by the hex - * string (both in big endian order). - */ -static int hex2bitmap(const char *str, unsigned long *bitmap, int bits) +static struct notifier_block ap_bus_nb = { + .notifier_call = ap_bus_cfg_chg, +}; + +int ap_hex2bitmap(const char *str, unsigned long *bitmap, int bits) { int i, n, b; @@ -1074,6 +1215,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits) return -EINVAL; return 0; } +EXPORT_SYMBOL(ap_hex2bitmap); /* * modify_bitmap() - parse bitmask argument and modify an existing @@ -1094,7 +1236,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits) */ static int modify_bitmap(const char *str, unsigned long *bitmap, int bits) { - int a, i, z; + unsigned long a, i, z; char *np, sign; /* bits needs to be a multiple of 8 */ @@ -1139,7 +1281,7 @@ static int ap_parse_bitmap_str(const char *str, unsigned long *bitmap, int bits, rc = modify_bitmap(str, newmap, bits); } else { memset(newmap, 0, size); - rc = hex2bitmap(str, newmap, bits); + rc = ap_hex2bitmap(str, newmap, bits); } return rc; } @@ -1205,7 +1347,7 @@ static BUS_ATTR_RW(ap_domain); static ssize_t ap_control_domain_mask_show(const struct bus_type *bus, char *buf) { - if (!ap_qci_info) /* QCI not supported */ + if (!ap_qci_info->flags) /* QCI not supported */ return sysfs_emit(buf, "not supported\n"); return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", @@ -1219,7 +1361,7 @@ static BUS_ATTR_RO(ap_control_domain_mask); static ssize_t ap_usage_domain_mask_show(const struct bus_type *bus, char *buf) { - if (!ap_qci_info) /* QCI not supported */ + if (!ap_qci_info->flags) /* QCI not supported */ return sysfs_emit(buf, "not supported\n"); return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", @@ -1233,7 +1375,7 @@ static BUS_ATTR_RO(ap_usage_domain_mask); static ssize_t ap_adapter_mask_show(const struct bus_type *bus, char *buf) { - if (!ap_qci_info) /* QCI not supported */ + if (!ap_qci_info->flags) /* QCI not supported */ return sysfs_emit(buf, "not supported\n"); return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", @@ -1254,7 +1396,7 @@ static BUS_ATTR_RO(ap_interrupts); static ssize_t config_time_show(const struct bus_type *bus, char *buf) { - return sysfs_emit(buf, "%d\n", ap_config_time); + return sysfs_emit(buf, "%d\n", ap_scan_bus_time); } static ssize_t config_time_store(const struct bus_type *bus, @@ -1264,8 +1406,8 @@ static ssize_t config_time_store(const struct bus_type *bus, if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120) return -EINVAL; - ap_config_time = time; - mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); + ap_scan_bus_time = time; + mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ); return count; } @@ -1349,12 +1491,12 @@ static ssize_t apmask_show(const struct bus_type *bus, char *buf) { int rc; - if (mutex_lock_interruptible(&ap_perms_mutex)) + if (mutex_lock_interruptible(&ap_attr_mutex)) return -ERESTARTSYS; rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n", ap_perms.apm[0], ap_perms.apm[1], ap_perms.apm[2], ap_perms.apm[3]); - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); return rc; } @@ -1364,6 +1506,7 @@ static int __verify_card_reservations(struct device_driver *drv, void *data) int rc = 0; struct ap_driver *ap_drv = to_ap_drv(drv); unsigned long *newapm = (unsigned long *)data; + unsigned long aqm_any[BITS_TO_LONGS(AP_DOMAINS)]; /* * increase the driver's module refcounter to be sure it is not @@ -1373,7 +1516,8 @@ static int __verify_card_reservations(struct device_driver *drv, void *data) return 0; if (ap_drv->in_use) { - rc = ap_drv->in_use(newapm, ap_perms.aqm); + bitmap_fill(aqm_any, AP_DOMAINS); + rc = ap_drv->in_use(newapm, aqm_any); if (rc) rc = -EBUSY; } @@ -1402,18 +1546,31 @@ static int apmask_commit(unsigned long *newapm) memcpy(ap_perms.apm, newapm, APMASKSIZE); + /* + * Update ap_apmask_aqmask_in_use. Note that the + * ap_attr_mutex has to be obtained here. + */ + ap_apmask_aqmask_in_use = + bitmap_full(ap_perms.apm, AP_DEVICES) && + bitmap_full(ap_perms.aqm, AP_DOMAINS) ? + false : true; + return 0; } static ssize_t apmask_store(const struct bus_type *bus, const char *buf, size_t count) { - int rc, changes = 0; DECLARE_BITMAP(newapm, AP_DEVICES); + int rc = -EINVAL, changes = 0; - if (mutex_lock_interruptible(&ap_perms_mutex)) + if (mutex_lock_interruptible(&ap_attr_mutex)) return -ERESTARTSYS; + /* Do not allow apmask/aqmask if driver override is active */ + if (ap_driver_override_ctr) + goto done; + rc = ap_parse_bitmap_str(buf, ap_perms.apm, AP_DEVICES, newapm); if (rc) goto done; @@ -1423,7 +1580,7 @@ static ssize_t apmask_store(const struct bus_type *bus, const char *buf, rc = apmask_commit(newapm); done: - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); if (rc) return rc; @@ -1441,12 +1598,12 @@ static ssize_t aqmask_show(const struct bus_type *bus, char *buf) { int rc; - if (mutex_lock_interruptible(&ap_perms_mutex)) + if (mutex_lock_interruptible(&ap_attr_mutex)) return -ERESTARTSYS; rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n", ap_perms.aqm[0], ap_perms.aqm[1], ap_perms.aqm[2], ap_perms.aqm[3]); - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); return rc; } @@ -1456,6 +1613,7 @@ static int __verify_queue_reservations(struct device_driver *drv, void *data) int rc = 0; struct ap_driver *ap_drv = to_ap_drv(drv); unsigned long *newaqm = (unsigned long *)data; + unsigned long apm_any[BITS_TO_LONGS(AP_DEVICES)]; /* * increase the driver's module refcounter to be sure it is not @@ -1465,7 +1623,8 @@ static int __verify_queue_reservations(struct device_driver *drv, void *data) return 0; if (ap_drv->in_use) { - rc = ap_drv->in_use(ap_perms.apm, newaqm); + bitmap_fill(apm_any, AP_DEVICES); + rc = ap_drv->in_use(apm_any, newaqm); if (rc) rc = -EBUSY; } @@ -1494,18 +1653,31 @@ static int aqmask_commit(unsigned long *newaqm) memcpy(ap_perms.aqm, newaqm, AQMASKSIZE); + /* + * Update ap_apmask_aqmask_in_use. Note that the + * ap_attr_mutex has to be obtained here. + */ + ap_apmask_aqmask_in_use = + bitmap_full(ap_perms.apm, AP_DEVICES) && + bitmap_full(ap_perms.aqm, AP_DOMAINS) ? + false : true; + return 0; } static ssize_t aqmask_store(const struct bus_type *bus, const char *buf, size_t count) { - int rc, changes = 0; DECLARE_BITMAP(newaqm, AP_DOMAINS); + int rc = -EINVAL, changes = 0; - if (mutex_lock_interruptible(&ap_perms_mutex)) + if (mutex_lock_interruptible(&ap_attr_mutex)) return -ERESTARTSYS; + /* Do not allow apmask/aqmask if driver override is active */ + if (ap_driver_override_ctr) + goto done; + rc = ap_parse_bitmap_str(buf, ap_perms.aqm, AP_DOMAINS, newaqm); if (rc) goto done; @@ -1515,7 +1687,7 @@ static ssize_t aqmask_store(const struct bus_type *bus, const char *buf, rc = aqmask_commit(newaqm); done: - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); if (rc) return rc; @@ -1562,11 +1734,20 @@ static ssize_t bindings_show(const struct bus_type *bus, char *buf) static BUS_ATTR_RO(bindings); +static ssize_t bindings_complete_count_show(const struct bus_type *bus, + char *buf) +{ + return sysfs_emit(buf, "%llu\n", + atomic64_read(&ap_bindings_complete_count)); +} + +static BUS_ATTR_RO(bindings_complete_count); + static ssize_t features_show(const struct bus_type *bus, char *buf) { int n = 0; - if (!ap_qci_info) /* QCI not supported */ + if (!ap_qci_info->flags) /* QCI not supported */ return sysfs_emit(buf, "-\n"); if (ap_qci_info->apsc) @@ -1602,12 +1783,13 @@ static struct attribute *ap_bus_attrs[] = { &bus_attr_aqmask.attr, &bus_attr_scans.attr, &bus_attr_bindings.attr, + &bus_attr_bindings_complete_count.attr, &bus_attr_features.attr, NULL, }; ATTRIBUTE_GROUPS(ap_bus); -static struct bus_type ap_bus_type = { +static const struct bus_type ap_bus_type = { .name = "ap", .bus_groups = ap_bus_groups, .match = &ap_bus_match, @@ -1798,12 +1980,12 @@ static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac) */ static inline void ap_scan_domains(struct ap_card *ac) { - int rc, dom, depth, type, ml; + struct ap_tapq_hwinfo hwinfo; bool decfg, chkstop; struct ap_queue *aq; struct device *dev; - unsigned int func; ap_qid_t qid; + int rc, dom; /* * Go through the configuration for the domains and compare them @@ -1826,8 +2008,7 @@ static inline void ap_scan_domains(struct ap_card *ac) goto put_dev_and_continue; } /* domain is valid, get info from this APQN */ - rc = ap_queue_info(qid, &type, &func, &depth, - &ml, &decfg, &chkstop); + rc = ap_queue_info(qid, &hwinfo, &decfg, &chkstop); switch (rc) { case -1: if (dev) { @@ -1843,15 +2024,15 @@ static inline void ap_scan_domains(struct ap_card *ac) } /* if no queue device exists, create a new one */ if (!aq) { - aq = ap_queue_create(qid, ac->ap_dev.device_type); + aq = ap_queue_create(qid, ac); if (!aq) { AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n", __func__, ac->id, dom); continue; } - aq->card = ac; aq->config = !decfg; aq->chkstop = chkstop; + aq->se_bstate = hwinfo.bs; dev = &aq->ap_dev.device; dev->bus = &ap_bus_type; dev->parent = &ac->ap_dev.device; @@ -1865,19 +2046,24 @@ static inline void ap_scan_domains(struct ap_card *ac) } /* get it and thus adjust reference counter */ get_device(dev); - if (decfg) + if (decfg) { AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n", __func__, ac->id, dom); - else if (chkstop) + } else if (chkstop) { AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n", __func__, ac->id, dom); - else + } else { + /* nudge the queue's state machine */ + ap_queue_init_state(aq); AP_DBF_INFO("%s(%d,%d) new queue dev created\n", __func__, ac->id, dom); + } goto put_dev_and_continue; } /* handle state changes on already existing queue device */ spin_lock_bh(&aq->lock); + /* SE bind state */ + aq->se_bstate = hwinfo.bs; /* checkstop state */ if (chkstop && !aq->chkstop) { /* checkstop on */ @@ -1887,21 +2073,19 @@ static inline void ap_scan_domains(struct ap_card *ac) aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED; } spin_unlock_bh(&aq->lock); - AP_DBF_DBG("%s(%d,%d) queue dev checkstop on\n", - __func__, ac->id, dom); + pr_debug("(%d,%d) queue dev checkstop on\n", + ac->id, dom); /* 'receive' pending messages with -EAGAIN */ ap_flush_queue(aq); goto put_dev_and_continue; } else if (!chkstop && aq->chkstop) { /* checkstop off */ aq->chkstop = false; - if (aq->dev_state > AP_DEV_STATE_UNINITIATED) { - aq->dev_state = AP_DEV_STATE_OPERATING; - aq->sm_state = AP_SM_STATE_RESET_START; - } + if (aq->dev_state > AP_DEV_STATE_UNINITIATED) + _ap_queue_init_state(aq); spin_unlock_bh(&aq->lock); - AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n", - __func__, ac->id, dom); + pr_debug("(%d,%d) queue dev checkstop off\n", + ac->id, dom); goto put_dev_and_continue; } /* config state change */ @@ -1913,8 +2097,8 @@ static inline void ap_scan_domains(struct ap_card *ac) aq->last_err_rc = AP_RESPONSE_DECONFIGURED; } spin_unlock_bh(&aq->lock); - AP_DBF_DBG("%s(%d,%d) queue dev config off\n", - __func__, ac->id, dom); + pr_debug("(%d,%d) queue dev config off\n", + ac->id, dom); ap_send_config_uevent(&aq->ap_dev, aq->config); /* 'receive' pending messages with -EAGAIN */ ap_flush_queue(aq); @@ -1922,13 +2106,11 @@ static inline void ap_scan_domains(struct ap_card *ac) } else if (!decfg && !aq->config) { /* config on this queue device */ aq->config = true; - if (aq->dev_state > AP_DEV_STATE_UNINITIATED) { - aq->dev_state = AP_DEV_STATE_OPERATING; - aq->sm_state = AP_SM_STATE_RESET_START; - } + if (aq->dev_state > AP_DEV_STATE_UNINITIATED) + _ap_queue_init_state(aq); spin_unlock_bh(&aq->lock); - AP_DBF_DBG("%s(%d,%d) queue dev config on\n", - __func__, ac->id, dom); + pr_debug("(%d,%d) queue dev config on\n", + ac->id, dom); ap_send_config_uevent(&aq->ap_dev, aq->config); goto put_dev_and_continue; } @@ -1955,11 +2137,11 @@ put_dev_and_continue: */ static inline void ap_scan_adapter(int ap) { - int rc, dom, depth, type, comp_type, ml; + struct ap_tapq_hwinfo hwinfo; + int rc, dom, comp_type; bool decfg, chkstop; struct ap_card *ac; struct device *dev; - unsigned int func; ap_qid_t qid; /* Is there currently a card device for this adapter ? */ @@ -1989,8 +2171,7 @@ static inline void ap_scan_adapter(int ap) for (dom = 0; dom <= ap_max_domain_id; dom++) if (ap_test_config_usage_domain(dom)) { qid = AP_MKQID(ap, dom); - if (ap_queue_info(qid, &type, &func, &depth, - &ml, &decfg, &chkstop) > 0) + if (ap_queue_info(qid, &hwinfo, &decfg, &chkstop) > 0) break; } if (dom > ap_max_domain_id) { @@ -2001,12 +2182,12 @@ static inline void ap_scan_adapter(int ap) ap_scan_rm_card_dev_and_queue_devs(ac); put_device(dev); } else { - AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n", - __func__, ap); + pr_debug("(%d) no type info (no APQN found), ignored\n", + ap); } return; } - if (!type) { + if (!hwinfo.at) { /* No apdater type info available, an unusable adapter */ if (ac) { AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devs\n", @@ -2014,23 +2195,22 @@ static inline void ap_scan_adapter(int ap) ap_scan_rm_card_dev_and_queue_devs(ac); put_device(dev); } else { - AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n", - __func__, ap); + pr_debug("(%d) no valid type (0) info, ignored\n", ap); } return; } + hwinfo.value &= TAPQ_CARD_HWINFO_MASK; /* filter card specific hwinfo */ if (ac) { /* Check APQN against existing card device for changes */ - if (ac->raw_hwtype != type) { + if (ac->hwinfo.at != hwinfo.at) { AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devs\n", - __func__, ap, type); + __func__, ap, hwinfo.at); ap_scan_rm_card_dev_and_queue_devs(ac); put_device(dev); ac = NULL; - } else if ((ac->functions & TAPQ_CARD_FUNC_CMP_MASK) != - (func & TAPQ_CARD_FUNC_CMP_MASK)) { + } else if (ac->hwinfo.fac != hwinfo.fac) { AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devs\n", - __func__, ap, func); + __func__, ap, hwinfo.fac); ap_scan_rm_card_dev_and_queue_devs(ac); put_device(dev); ac = NULL; @@ -2064,13 +2244,13 @@ static inline void ap_scan_adapter(int ap) if (!ac) { /* Build a new card device */ - comp_type = ap_get_compatible_type(qid, type, func); + comp_type = ap_get_compatible_type(qid, hwinfo.at, hwinfo.fac); if (!comp_type) { AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n", - __func__, ap, type); + __func__, ap, hwinfo.at); return; } - ac = ap_card_create(ap, depth, type, comp_type, func, ml); + ac = ap_card_create(ap, hwinfo, comp_type); if (!ac) { AP_DBF_WARN("%s(%d) ap_card_create() failed\n", __func__, ap); @@ -2101,13 +2281,13 @@ static inline void ap_scan_adapter(int ap) get_device(dev); if (decfg) AP_DBF_INFO("%s(%d) new (decfg) card dev type=%d func=0x%08x created\n", - __func__, ap, type, func); + __func__, ap, hwinfo.at, hwinfo.fac); else if (chkstop) AP_DBF_INFO("%s(%d) new (chkstop) card dev type=%d func=0x%08x created\n", - __func__, ap, type, func); + __func__, ap, hwinfo.at, hwinfo.fac); else AP_DBF_INFO("%s(%d) new card dev type=%d func=0x%08x created\n", - __func__, ap, type, func); + __func__, ap, hwinfo.at, hwinfo.fac); } /* Verify the domains and the queue devices for this card */ @@ -2129,33 +2309,90 @@ static inline void ap_scan_adapter(int ap) */ static bool ap_get_configuration(void) { - if (!ap_qci_info) /* QCI not supported */ + if (!ap_qci_info->flags) /* QCI not supported */ return false; memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info)); - ap_fetch_qci_info(ap_qci_info); + ap_qci(ap_qci_info); return memcmp(ap_qci_info, ap_qci_info_old, sizeof(struct ap_config_info)) != 0; } +/* + * ap_config_has_new_aps - Check current against old qci info if + * new adapters have appeared. Returns true if at least one new + * adapter in the apm mask is showing up. Existing adapters or + * receding adapters are not counted. + */ +static bool ap_config_has_new_aps(void) +{ + + unsigned long m[BITS_TO_LONGS(AP_DEVICES)]; + + if (!ap_qci_info->flags) + return false; + + bitmap_andnot(m, (unsigned long *)ap_qci_info->apm, + (unsigned long *)ap_qci_info_old->apm, AP_DEVICES); + if (!bitmap_empty(m, AP_DEVICES)) + return true; + + return false; +} + +/* + * ap_config_has_new_doms - Check current against old qci info if + * new (usage) domains have appeared. Returns true if at least one + * new domain in the aqm mask is showing up. Existing domains or + * receding domains are not counted. + */ +static bool ap_config_has_new_doms(void) +{ + unsigned long m[BITS_TO_LONGS(AP_DOMAINS)]; + + if (!ap_qci_info->flags) + return false; + + bitmap_andnot(m, (unsigned long *)ap_qci_info->aqm, + (unsigned long *)ap_qci_info_old->aqm, AP_DOMAINS); + if (!bitmap_empty(m, AP_DOMAINS)) + return true; + + return false; +} + /** * ap_scan_bus(): Scan the AP bus for new devices - * Runs periodically, workqueue timer (ap_config_time) - * @unused: Unused pointer. + * Always run under mutex ap_scan_bus_mutex protection + * which needs to get locked/unlocked by the caller! + * Returns true if any config change has been detected + * during the scan, otherwise false. */ -static void ap_scan_bus(struct work_struct *unused) +static bool ap_scan_bus(void) { - int ap, config_changed = 0; + bool config_changed; + int ap; + + pr_debug(">\n"); - /* config change notify */ + /* (re-)fetch configuration via QCI */ config_changed = ap_get_configuration(); - if (config_changed) + if (config_changed) { + if (ap_config_has_new_aps() || ap_config_has_new_doms()) { + /* + * Appearance of new adapters and/or domains need to + * build new ap devices which need to get bound to an + * device driver. Thus reset the APQN bindings complete + * completion. + */ + reinit_completion(&ap_apqn_bindings_complete); + } + /* post a config change notify */ notify_config_changed(); + } ap_select_domain(); - AP_DBF_DBG("%s running\n", __func__); - /* loop over all possible adapters */ for (ap = 0; ap <= ap_max_adapter_id; ap++) ap_scan_adapter(ap); @@ -2178,23 +2415,132 @@ static void ap_scan_bus(struct work_struct *unused) } if (atomic64_inc_return(&ap_scan_bus_count) == 1) { - AP_DBF_DBG("%s init scan complete\n", __func__); + pr_debug("init scan complete\n"); ap_send_init_scan_done_uevent(); - ap_check_bindings_complete(); } - mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); + ap_check_bindings_complete(); + + mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ); + + pr_debug("< config_changed=%d\n", config_changed); + + return config_changed; +} + +/* + * Callback for the ap_scan_bus_timer + * Runs periodically, workqueue timer (ap_scan_bus_time) + */ +static void ap_scan_bus_timer_callback(struct timer_list *unused) +{ + /* + * schedule work into the system long wq which when + * the work is finally executed, calls the AP bus scan. + */ + queue_work(system_long_wq, &ap_scan_bus_work); +} + +/* + * Callback for the ap_scan_bus_work + */ +static void ap_scan_bus_wq_callback(struct work_struct *unused) +{ + /* + * Try to invoke an ap_scan_bus(). If the mutex acquisition + * fails there is currently another task already running the + * AP scan bus and there is no need to wait and re-trigger the + * scan again. Please note at the end of the scan bus function + * the AP scan bus timer is re-armed which triggers then the + * ap_scan_bus_timer_callback which enqueues a work into the + * system_long_wq which invokes this function here again. + */ + if (mutex_trylock(&ap_scan_bus_mutex)) { + ap_scan_bus_task = current; + ap_scan_bus_result = ap_scan_bus(); + ap_scan_bus_task = NULL; + mutex_unlock(&ap_scan_bus_mutex); + } +} + +static inline void __exit ap_async_exit(void) +{ + if (ap_thread_flag) + ap_poll_thread_stop(); + chsc_notifier_unregister(&ap_bus_nb); + cancel_work(&ap_scan_bus_work); + hrtimer_cancel(&ap_poll_timer); + timer_delete(&ap_scan_bus_timer); +} + +static inline int __init ap_async_init(void) +{ + int rc; + + /* Setup the AP bus rescan timer. */ + timer_setup(&ap_scan_bus_timer, ap_scan_bus_timer_callback, 0); + + /* + * Setup the high resolution poll timer. + * If we are running under z/VM adjust polling to z/VM polling rate. + */ + if (machine_is_vm()) + poll_high_timeout = 1500000; + hrtimer_setup(&ap_poll_timer, ap_poll_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + + queue_work(system_long_wq, &ap_scan_bus_work); + + rc = chsc_notifier_register(&ap_bus_nb); + if (rc) + goto out; + + /* Start the low priority AP bus poll thread. */ + if (!ap_thread_flag) + return 0; + + rc = ap_poll_thread_start(); + if (rc) + goto out_notifier; + + return 0; + +out_notifier: + chsc_notifier_unregister(&ap_bus_nb); +out: + cancel_work(&ap_scan_bus_work); + hrtimer_cancel(&ap_poll_timer); + timer_delete(&ap_scan_bus_timer); + return rc; +} + +static inline void ap_irq_exit(void) +{ + if (ap_irq_flag) + unregister_adapter_interrupt(&ap_airq); } -static void ap_config_timeout(struct timer_list *unused) +static inline int __init ap_irq_init(void) { - queue_work(system_long_wq, &ap_scan_work); + int rc; + + if (!ap_interrupts_available() || !ap_useirq) + return 0; + + rc = register_adapter_interrupt(&ap_airq); + ap_irq_flag = (rc == 0); + + return rc; } -static int __init ap_debug_init(void) +static inline void ap_debug_exit(void) +{ + debug_unregister(ap_dbf_info); +} + +static inline int __init ap_debug_init(void) { ap_dbf_info = debug_register("ap", 2, 1, - DBF_MAX_SPRINTF_ARGS * sizeof(long)); + AP_DBF_MAX_SPRINTF_ARGS * sizeof(long)); debug_register_view(ap_dbf_info, &debug_sprintf_view); debug_set_level(ap_dbf_info, DBF_ERR); @@ -2212,14 +2558,14 @@ static void __init ap_perms_init(void) if (apm_str) { memset(&ap_perms.apm, 0, sizeof(ap_perms.apm)); ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES, - &ap_perms_mutex); + &ap_attr_mutex); } /* aqm kernel parameter string */ if (aqm_str) { memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm)); ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS, - &ap_perms_mutex); + &ap_attr_mutex); } } @@ -2232,18 +2578,26 @@ static int __init ap_module_init(void) { int rc; - rc = ap_debug_init(); - if (rc) - return rc; - if (!ap_instructions_available()) { pr_warn("The hardware system does not support AP instructions\n"); return -ENODEV; } + rc = ap_debug_init(); + if (rc) + return rc; + /* init ap_queue hashtable */ hash_init(ap_queues); + /* create ap msg buffer memory pool */ + ap_msg_pool = mempool_create_kmalloc_pool(ap_msg_pool_min_items, + AP_DEFAULT_MAX_MSG_SIZE); + if (!ap_msg_pool) { + rc = -ENOMEM; + goto out; + } + /* set up the AP permissions (ioctls, ap and aq masks) */ ap_perms_init(); @@ -2259,12 +2613,6 @@ static int __init ap_module_init(void) ap_domain_index = -1; } - /* enable interrupts if available */ - if (ap_interrupts_available() && ap_useirq) { - rc = register_adapter_interrupt(&ap_airq); - ap_irq_flag = (rc == 0); - } - /* Create /sys/bus/ap. */ rc = bus_register(&ap_bus_type); if (rc) @@ -2277,38 +2625,39 @@ static int __init ap_module_init(void) goto out_bus; ap_root_device->bus = &ap_bus_type; - /* Setup the AP bus rescan timer. */ - timer_setup(&ap_config_timer, ap_config_timeout, 0); - - /* - * Setup the high resolution poll timer. - * If we are running under z/VM adjust polling to z/VM polling rate. - */ - if (MACHINE_IS_VM) - poll_high_timeout = 1500000; - hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - ap_poll_timer.function = ap_poll_timeout; - - /* Start the low priority AP bus poll thread. */ - if (ap_thread_flag) { - rc = ap_poll_thread_start(); - if (rc) - goto out_work; - } + /* enable interrupts if available */ + rc = ap_irq_init(); + if (rc) + goto out_device; - queue_work(system_long_wq, &ap_scan_work); + /* Setup asynchronous work (timers, workqueue, etc). */ + rc = ap_async_init(); + if (rc) + goto out_irq; return 0; -out_work: - hrtimer_cancel(&ap_poll_timer); +out_irq: + ap_irq_exit(); +out_device: root_device_unregister(ap_root_device); out_bus: bus_unregister(&ap_bus_type); out: - if (ap_irq_flag) - unregister_adapter_interrupt(&ap_airq); - kfree(ap_qci_info); + mempool_destroy(ap_msg_pool); + ap_debug_exit(); return rc; } -device_initcall(ap_module_init); + +static void __exit ap_module_exit(void) +{ + ap_async_exit(); + ap_irq_exit(); + root_device_unregister(ap_root_device); + bus_unregister(&ap_bus_type); + mempool_destroy(ap_msg_pool); + ap_debug_exit(); +} + +module_init(ap_module_init); +module_exit(ap_module_exit); diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index be54b070c031..51e08f27bd75 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -76,16 +76,6 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) #define AP_DEVICE_TYPE_CEX8 14 /* - * Known function facilities - */ -#define AP_FUNC_MEX4K 1 -#define AP_FUNC_CRT4K 2 -#define AP_FUNC_COPRO 3 -#define AP_FUNC_ACCEL 4 -#define AP_FUNC_EP11 5 -#define AP_FUNC_APXA 6 - -/* * AP queue state machine states */ enum ap_sm_state { @@ -168,7 +158,7 @@ struct ap_driver { struct ap_config_info *old_config_info); }; -#define to_ap_drv(x) container_of((x), struct ap_driver, driver) +#define to_ap_drv(x) container_of_const((x), struct ap_driver, driver) int ap_driver_register(struct ap_driver *, struct module *, char *); void ap_driver_unregister(struct ap_driver *); @@ -176,15 +166,14 @@ void ap_driver_unregister(struct ap_driver *); struct ap_device { struct device device; int device_type; /* AP device type. */ + const char *driver_override; }; #define to_ap_dev(x) container_of((x), struct ap_device, device) struct ap_card { struct ap_device ap_dev; - int raw_hwtype; /* AP raw hardware type. */ - unsigned int functions; /* TAPQ GR2 upper 32 facility bits */ - int queue_depth; /* AP queue depth.*/ + struct ap_tapq_hwinfo hwinfo; /* TAPQ GR2 content */ int id; /* AP card number. */ unsigned int maxmsgsize; /* AP msg limit for this card */ bool config; /* configured state */ @@ -192,7 +181,7 @@ struct ap_card { atomic64_t total_request_count; /* # requests ever for this AP device.*/ }; -#define TAPQ_CARD_FUNC_CMP_MASK 0xFFFF0000 +#define TAPQ_CARD_HWINFO_MASK 0xFFFF0000FFFF0F0FUL #define ASSOC_IDX_INVALID 0x10000 #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device) @@ -206,7 +195,7 @@ struct ap_queue { bool config; /* configured state */ bool chkstop; /* checkstop state */ ap_qid_t qid; /* AP queue id. */ - bool interrupt; /* indicate if interrupts are enabled */ + unsigned int se_bstate; /* SE bind state (BS) */ unsigned int assoc_idx; /* SE association index */ int queue_count; /* # messages currently on AP queue. */ int pendingq_count; /* # requests on pendingq list. */ @@ -226,6 +215,11 @@ struct ap_queue { typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue); +struct ap_response_type { + struct completion work; + int type; +}; + struct ap_message { struct list_head list; /* Request queueing. */ unsigned long psmid; /* Message id. */ @@ -234,7 +228,7 @@ struct ap_message { size_t bufsize; /* allocated msg buffer size */ u16 flags; /* Flags, see AP_MSG_FLAG_xxx */ int rc; /* Return code for this message */ - void *private; /* ap driver private pointer. */ + struct ap_response_type response; /* receive is called from tasklet context */ void (*receive)(struct ap_queue *, struct ap_message *, struct ap_message *); @@ -243,27 +237,10 @@ struct ap_message { #define AP_MSG_FLAG_SPECIAL 0x0001 /* flag msg as 'special' with NQAP */ #define AP_MSG_FLAG_USAGE 0x0002 /* CCA, EP11: usage (no admin) msg */ #define AP_MSG_FLAG_ADMIN 0x0004 /* CCA, EP11: admin (=control) msg */ +#define AP_MSG_FLAG_MEMPOOL 0x0008 /* ap msg buffer allocated via mempool */ -/** - * ap_init_message() - Initialize ap_message. - * Initialize a message before using. Otherwise this might result in - * unexpected behaviour. - */ -static inline void ap_init_message(struct ap_message *ap_msg) -{ - memset(ap_msg, 0, sizeof(*ap_msg)); -} - -/** - * ap_release_message() - Release ap_message. - * Releases all memory used internal within the ap_message struct - * Currently this is the message and private field. - */ -static inline void ap_release_message(struct ap_message *ap_msg) -{ - kfree_sensitive(ap_msg->msg); - kfree_sensitive(ap_msg->private); -} +int ap_init_apmsg(struct ap_message *ap_msg, u32 flags); +void ap_release_apmsg(struct ap_message *ap_msg); enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event); enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event); @@ -271,25 +248,27 @@ enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event); int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg); void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg); void ap_flush_queue(struct ap_queue *aq); +bool ap_queue_usable(struct ap_queue *aq); void *ap_airq_ptr(void); int ap_sb_available(void); bool ap_is_se_guest(void); void ap_wait(enum ap_sm_wait wait); void ap_request_timeout(struct timer_list *t); -void ap_bus_force_rescan(void); +bool ap_bus_force_rescan(void); int ap_test_config_usage_domain(unsigned int domain); int ap_test_config_ctrl_domain(unsigned int domain); void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); -struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); +struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac); void ap_queue_prepare_remove(struct ap_queue *aq); void ap_queue_remove(struct ap_queue *aq); void ap_queue_init_state(struct ap_queue *aq); +void _ap_queue_init_state(struct ap_queue *aq); -struct ap_card *ap_card_create(int id, int queue_depth, int raw_type, - int comp_type, unsigned int functions, int ml); +struct ap_card *ap_card_create(int id, struct ap_tapq_hwinfo info, + int comp_type); #define APMASKSIZE (BITS_TO_LONGS(AP_DEVICES) * sizeof(unsigned long)) #define AQMASKSIZE (BITS_TO_LONGS(AP_DOMAINS) * sizeof(unsigned long)) @@ -302,7 +281,9 @@ struct ap_perms { }; extern struct ap_perms ap_perms; -extern struct mutex ap_perms_mutex; +extern bool ap_apmask_aqmask_in_use; +extern int ap_driver_override_ctr; +extern struct mutex ap_attr_mutex; /* * Get ap_queue device for this qid. @@ -354,6 +335,28 @@ int ap_parse_mask_str(const char *str, struct mutex *lock); /* + * ap_hex2bitmap() - Convert a string containing a hexadecimal number (str) + * into a bitmap (bitmap) with bits set that correspond to the bits represented + * by the hex string. Input and output data is in big endian order. + * + * str - Input hex string of format "0x1234abcd". The leading "0x" is optional. + * At least one digit is required. Must be large enough to hold the number of + * bits represented by the bits parameter. + * + * bitmap - Pointer to a bitmap. Upon successful completion of this function, + * this bitmap will have bits set to match the value of str. If bitmap is longer + * than str, then the rightmost bits of bitmap are padded with zeros. Must be + * large enough to hold the number of bits represented by the bits parameter. + * + * bits - Length, in bits, of the bitmap represented by str. Must be a multiple + * of 8. + * + * Returns: 0 On success + * -EINVAL If str format is invalid or bits is not a multiple of 8. + */ +int ap_hex2bitmap(const char *str, unsigned long *bitmap, int bits); + +/* * Interface to wait for the AP bus to have done one initial ap bus * scan and all detected APQNs have been bound to device drivers. * If these both conditions are not fulfilled, this function blocks @@ -362,8 +365,12 @@ int ap_parse_mask_str(const char *str, * the return value is 0. If the timeout (in jiffies) hits instead * -ETIME is returned. On failures negative return values are * returned to the caller. + * It may be that the AP bus scan finds new devices. Then the + * condition that all APQNs are bound to their device drivers + * is reset to false and this call again blocks until either all + * APQNs are bound to a device driver or the timeout hits again. */ -int ap_wait_init_apqn_bindings_complete(unsigned long timeout); +int ap_wait_apqn_bindings_complete(unsigned long timeout); void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg); void ap_send_online_uevent(struct ap_device *ap_dev, int online); diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index b2bd477659a7..8102c8134c49 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c @@ -6,8 +6,7 @@ * Adjunct processor bus, card related code. */ -#define KMSG_COMPONENT "ap" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "ap: " fmt #include <linux/init.h> #include <linux/slab.h> @@ -34,7 +33,7 @@ static ssize_t raw_hwtype_show(struct device *dev, { struct ap_card *ac = to_ap_card(dev); - return sysfs_emit(buf, "%d\n", ac->raw_hwtype); + return sysfs_emit(buf, "%d\n", ac->hwinfo.at); } static DEVICE_ATTR_RO(raw_hwtype); @@ -44,7 +43,7 @@ static ssize_t depth_show(struct device *dev, struct device_attribute *attr, { struct ap_card *ac = to_ap_card(dev); - return sysfs_emit(buf, "%d\n", ac->queue_depth); + return sysfs_emit(buf, "%d\n", ac->hwinfo.qd); } static DEVICE_ATTR_RO(depth); @@ -54,7 +53,7 @@ static ssize_t ap_functions_show(struct device *dev, { struct ap_card *ac = to_ap_card(dev); - return sysfs_emit(buf, "0x%08X\n", ac->functions); + return sysfs_emit(buf, "0x%08X\n", ac->hwinfo.fac); } static DEVICE_ATTR_RO(ap_functions); @@ -229,8 +228,8 @@ static void ap_card_device_release(struct device *dev) kfree(ac); } -struct ap_card *ap_card_create(int id, int queue_depth, int raw_type, - int comp_type, unsigned int functions, int ml) +struct ap_card *ap_card_create(int id, struct ap_tapq_hwinfo hwinfo, + int comp_type) { struct ap_card *ac; @@ -240,12 +239,10 @@ struct ap_card *ap_card_create(int id, int queue_depth, int raw_type, ac->ap_dev.device.release = ap_card_device_release; ac->ap_dev.device.type = &ap_card_type; ac->ap_dev.device_type = comp_type; - ac->raw_hwtype = raw_type; - ac->queue_depth = queue_depth; - ac->functions = functions; + ac->hwinfo = hwinfo; ac->id = id; - ac->maxmsgsize = ml > 0 ? - ml * AP_TAPQ_ML_FIELD_CHUNK_SIZE : AP_DEFAULT_MAX_MSG_SIZE; + ac->maxmsgsize = hwinfo.ml > 0 ? + hwinfo.ml * AP_TAPQ_ML_FIELD_CHUNK_SIZE : AP_DEFAULT_MAX_MSG_SIZE; return ac; } diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h index c083ce88a9a6..2f66271b8564 100644 --- a/drivers/s390/crypto/ap_debug.h +++ b/drivers/s390/crypto/ap_debug.h @@ -16,7 +16,7 @@ #define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO) #define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO) -#define DBF_MAX_SPRINTF_ARGS 6 +#define AP_DBF_MAX_SPRINTF_ARGS 6 #define AP_DBF(...) \ debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__) @@ -26,8 +26,6 @@ debug_sprintf_event(ap_dbf_info, DBF_WARN, ##__VA_ARGS__) #define AP_DBF_INFO(...) \ debug_sprintf_event(ap_dbf_info, DBF_INFO, ##__VA_ARGS__) -#define AP_DBF_DBG(...) \ - debug_sprintf_event(ap_dbf_info, DBF_DEBUG, ##__VA_ARGS__) extern debug_info_t *ap_dbf_info; diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 1336e632adc4..4a32c1e19a1e 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -6,31 +6,46 @@ * Adjunct processor bus, queue related code. */ -#define KMSG_COMPONENT "ap" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "ap: " fmt +#include <linux/export.h> #include <linux/init.h> #include <linux/slab.h> #include <asm/facility.h> +#define CREATE_TRACE_POINTS +#include <asm/trace/ap.h> + #include "ap_bus.h" #include "ap_debug.h" +EXPORT_TRACEPOINT_SYMBOL(s390_ap_nqap); +EXPORT_TRACEPOINT_SYMBOL(s390_ap_dqap); + static void __ap_flush_queue(struct ap_queue *aq); /* * some AP queue helper functions */ +static inline bool ap_q_supported_in_se(struct ap_queue *aq) +{ + return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel; +} + static inline bool ap_q_supports_bind(struct ap_queue *aq) { - return ap_test_bit(&aq->card->functions, AP_FUNC_EP11) || - ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL); + return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel; } static inline bool ap_q_supports_assoc(struct ap_queue *aq) { - return ap_test_bit(&aq->card->functions, AP_FUNC_EP11); + return aq->card->hwinfo.ep11; +} + +static inline bool ap_q_needs_bind(struct ap_queue *aq) +{ + return ap_q_supports_bind(aq) && ap_sb_available(); } /** @@ -88,9 +103,17 @@ static inline struct ap_queue_status __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen, int special) { + struct ap_queue_status status; + if (special) qid |= 0x400000UL; - return ap_nqap(qid, psmid, msg, msglen); + + status = ap_nqap(qid, psmid, msg, msglen); + + trace_s390_ap_nqap(AP_QID_CARD(qid), AP_QID_QUEUE(qid), + status.value, psmid); + + return status; } /* State machine definitions and helpers */ @@ -130,8 +153,13 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) parts++; } while (status.response_code == 0xFF && resgr0 != 0); + trace_s390_ap_dqap(AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid), + status.value, aq->reply->psmid); + switch (status.response_code) { case AP_RESPONSE_NORMAL: + print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1, + aq->reply->msg, aq->reply->len, false); aq->queue_count = max_t(int, 0, aq->queue_count - 1); if (!status.queue_empty && !aq->queue_count) aq->queue_count++; @@ -165,6 +193,9 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) aq->queue_count = 0; list_splice_init(&aq->pendingq, &aq->requestq); aq->requestq_count += aq->pendingq_count; + pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n", + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid), + aq->pendingq_count, aq->requestq_count); aq->pendingq_count = 0; break; default: @@ -195,13 +226,13 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq) return AP_SM_WAIT_AGAIN; } aq->sm_state = AP_SM_STATE_IDLE; - return AP_SM_WAIT_NONE; + break; case AP_RESPONSE_NO_PENDING_REPLY: if (aq->queue_count > 0) - return aq->interrupt ? + return status.irq_enabled ? AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT; aq->sm_state = AP_SM_STATE_IDLE; - return AP_SM_WAIT_NONE; + break; default: aq->dev_state = AP_DEV_STATE_ERROR; aq->last_err_rc = status.response_code; @@ -210,6 +241,16 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq) AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return AP_SM_WAIT_NONE; } + /* Check and maybe enable irq support (again) on this queue */ + if (!status.irq_enabled && status.queue_empty) { + void *lsi_ptr = ap_airq_ptr(); + + if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) { + aq->sm_state = AP_SM_STATE_SETIRQ_WAIT; + return AP_SM_WAIT_AGAIN; + } + } + return AP_SM_WAIT_NONE; } /** @@ -229,6 +270,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq) /* Start the next request on the queue. */ ap_msg = list_entry(aq->requestq.next, struct ap_message, list); + print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1, + ap_msg->msg, ap_msg->len, false); status = __ap_send(qid, ap_msg->psmid, ap_msg->msg, ap_msg->len, ap_msg->flags & AP_MSG_FLAG_SPECIAL); @@ -242,14 +285,14 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq) list_move_tail(&ap_msg->list, &aq->pendingq); aq->requestq_count--; aq->pendingq_count++; - if (aq->queue_count < aq->card->queue_depth) { + if (aq->queue_count < aq->card->hwinfo.qd) { aq->sm_state = AP_SM_STATE_WORKING; return AP_SM_WAIT_AGAIN; } fallthrough; case AP_RESPONSE_Q_FULL: aq->sm_state = AP_SM_STATE_QUEUE_FULL; - return aq->interrupt ? + return status.irq_enabled ? AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT; case AP_RESPONSE_RESET_IN_PROGRESS: aq->sm_state = AP_SM_STATE_RESET_WAIT; @@ -302,7 +345,6 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq) case AP_RESPONSE_NORMAL: case AP_RESPONSE_RESET_IN_PROGRESS: aq->sm_state = AP_SM_STATE_RESET_WAIT; - aq->interrupt = false; aq->rapq_fbit = 0; return AP_SM_WAIT_LOW_TIMEOUT; default: @@ -324,17 +366,15 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq) static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq) { struct ap_queue_status status; + struct ap_tapq_hwinfo hwinfo; void *lsi_ptr; - if (aq->queue_count > 0 && aq->reply) - /* Try to read a completed message and get the status */ - status = ap_sm_recv(aq); - else - /* Get the status with TAPQ */ - status = ap_tapq(aq->qid, NULL); + /* Get the status with TAPQ */ + status = ap_test_queue(aq->qid, 1, &hwinfo); switch (status.response_code) { case AP_RESPONSE_NORMAL: + aq->se_bstate = hwinfo.bs; lsi_ptr = ap_airq_ptr(); if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) aq->sm_state = AP_SM_STATE_SETIRQ_WAIT; @@ -377,7 +417,6 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq) if (status.irq_enabled == 1) { /* Irqs are now enabled */ - aq->interrupt = true; aq->sm_state = (aq->queue_count > 0) ? AP_SM_STATE_WORKING : AP_SM_STATE_IDLE; } @@ -407,9 +446,9 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq) static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq) { struct ap_queue_status status; - struct ap_tapq_gr2 info; + struct ap_tapq_hwinfo hwinfo; - status = ap_test_queue(aq->qid, 1, &info); + status = ap_test_queue(aq->qid, 1, &hwinfo); /* handle asynchronous error on this queue */ if (status.async && status.response_code) { aq->dev_state = AP_DEV_STATE_ERROR; @@ -428,14 +467,17 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq) return AP_SM_WAIT_NONE; } + /* update queue's SE bind state */ + aq->se_bstate = hwinfo.bs; + /* check bs bits */ - switch (info.bs) { + switch (hwinfo.bs) { case AP_BS_Q_USABLE: /* association is through */ aq->sm_state = AP_SM_STATE_IDLE; - AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n", - __func__, AP_QID_CARD(aq->qid), - AP_QID_QUEUE(aq->qid), aq->assoc_idx); + pr_debug("queue 0x%02x.%04x associated with %u\n", + AP_QID_CARD(aq->qid), + AP_QID_QUEUE(aq->qid), aq->assoc_idx); return AP_SM_WAIT_NONE; case AP_BS_Q_USABLE_NO_SECURE_KEY: /* association still pending */ @@ -446,7 +488,7 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq) aq->dev_state = AP_DEV_STATE_ERROR; aq->last_err_rc = status.response_code; AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n", - __func__, info.bs, + __func__, hwinfo.bs, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return AP_SM_WAIT_NONE; } @@ -620,16 +662,21 @@ static ssize_t interrupt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ap_queue *aq = to_ap_queue(dev); + struct ap_queue_status status; int rc = 0; spin_lock_bh(&aq->lock); - if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) + if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) { rc = sysfs_emit(buf, "Enable Interrupt pending.\n"); - else if (aq->interrupt) - rc = sysfs_emit(buf, "Interrupts enabled.\n"); - else - rc = sysfs_emit(buf, "Interrupts disabled.\n"); + } else { + status = ap_tapq(aq->qid, NULL); + if (status.irq_enabled) + rc = sysfs_emit(buf, "Interrupts enabled.\n"); + else + rc = sysfs_emit(buf, "Interrupts disabled.\n"); + } spin_unlock_bh(&aq->lock); + return rc; } @@ -668,22 +715,74 @@ static ssize_t ap_functions_show(struct device *dev, { struct ap_queue *aq = to_ap_queue(dev); struct ap_queue_status status; - struct ap_tapq_gr2 info; + struct ap_tapq_hwinfo hwinfo; - status = ap_test_queue(aq->qid, 1, &info); + status = ap_test_queue(aq->qid, 1, &hwinfo); if (status.response_code > AP_RESPONSE_BUSY) { - AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n", - __func__, status.response_code, - AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n", + status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return -EIO; } - return sysfs_emit(buf, "0x%08X\n", info.fac); + return sysfs_emit(buf, "0x%08X\n", hwinfo.fac); } static DEVICE_ATTR_RO(ap_functions); -#ifdef CONFIG_ZCRYPT_DEBUG +static ssize_t driver_override_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + struct ap_device *ap_dev = &aq->ap_dev; + int rc; + + device_lock(dev); + if (ap_dev->driver_override) + rc = sysfs_emit(buf, "%s\n", ap_dev->driver_override); + else + rc = sysfs_emit(buf, "\n"); + device_unlock(dev); + + return rc; +} + +static ssize_t driver_override_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ap_queue *aq = to_ap_queue(dev); + struct ap_device *ap_dev = &aq->ap_dev; + int rc = -EINVAL; + bool old_value; + + if (mutex_lock_interruptible(&ap_attr_mutex)) + return -ERESTARTSYS; + + /* Do not allow driver override if apmask/aqmask is in use */ + if (ap_apmask_aqmask_in_use) + goto out; + + old_value = ap_dev->driver_override ? true : false; + rc = driver_set_override(dev, &ap_dev->driver_override, buf, count); + if (rc) + goto out; + if (old_value && !ap_dev->driver_override) + --ap_driver_override_ctr; + else if (!old_value && ap_dev->driver_override) + ++ap_driver_override_ctr; + + rc = count; + +out: + mutex_unlock(&ap_attr_mutex); + return rc; +} + +static DEVICE_ATTR_RW(driver_override); + +#ifdef CONFIG_AP_DEBUG static ssize_t states_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -795,7 +894,8 @@ static struct attribute *ap_queue_dev_attrs[] = { &dev_attr_config.attr, &dev_attr_chkstop.attr, &dev_attr_ap_functions.attr, -#ifdef CONFIG_ZCRYPT_DEBUG + &dev_attr_driver_override.attr, +#ifdef CONFIG_AP_DEBUG &dev_attr_states.attr, &dev_attr_last_err_rc.attr, #endif @@ -821,19 +921,25 @@ static ssize_t se_bind_show(struct device *dev, { struct ap_queue *aq = to_ap_queue(dev); struct ap_queue_status status; - struct ap_tapq_gr2 info; + struct ap_tapq_hwinfo hwinfo; if (!ap_q_supports_bind(aq)) return sysfs_emit(buf, "-\n"); - status = ap_test_queue(aq->qid, 1, &info); + status = ap_test_queue(aq->qid, 1, &hwinfo); if (status.response_code > AP_RESPONSE_BUSY) { - AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n", - __func__, status.response_code, - AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n", + status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return -EIO; } - switch (info.bs) { + + /* update queue's SE bind state */ + spin_lock_bh(&aq->lock); + aq->se_bstate = hwinfo.bs; + spin_unlock_bh(&aq->lock); + + switch (hwinfo.bs) { case AP_BS_Q_USABLE: case AP_BS_Q_USABLE_NO_SECURE_KEY: return sysfs_emit(buf, "bound\n"); @@ -848,6 +954,7 @@ static ssize_t se_bind_store(struct device *dev, { struct ap_queue *aq = to_ap_queue(dev); struct ap_queue_status status; + struct ap_tapq_hwinfo hwinfo; bool value; int rc; @@ -859,34 +966,80 @@ static ssize_t se_bind_store(struct device *dev, if (rc) return rc; - if (value) { - /* bind, do BAPQ */ - spin_lock_bh(&aq->lock); - if (aq->sm_state < AP_SM_STATE_IDLE) { - spin_unlock_bh(&aq->lock); - return -EBUSY; - } - status = ap_bapq(aq->qid); - spin_unlock_bh(&aq->lock); - if (status.response_code) { - AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n", - __func__, status.response_code, - AP_QID_CARD(aq->qid), - AP_QID_QUEUE(aq->qid)); - return -EIO; - } - } else { - /* unbind, set F bit arg and trigger RAPQ */ + if (!value) { + /* Unbind. Set F bit arg and trigger RAPQ */ spin_lock_bh(&aq->lock); __ap_flush_queue(aq); aq->rapq_fbit = 1; - aq->assoc_idx = ASSOC_IDX_INVALID; - aq->sm_state = AP_SM_STATE_RESET_START; - ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL)); - spin_unlock_bh(&aq->lock); + _ap_queue_init_state(aq); + rc = count; + goto out; } - return count; + /* Bind. Check current SE bind state */ + status = ap_test_queue(aq->qid, 1, &hwinfo); + if (status.response_code) { + AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + return -EIO; + } + + /* Update BS state */ + spin_lock_bh(&aq->lock); + aq->se_bstate = hwinfo.bs; + if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) { + AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n", + __func__, hwinfo.bs, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + rc = -EINVAL; + goto out; + } + + /* Check SM state */ + if (aq->sm_state < AP_SM_STATE_IDLE) { + rc = -EBUSY; + goto out; + } + + /* invoke BAPQ */ + status = ap_bapq(aq->qid); + if (status.response_code) { + AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + rc = -EIO; + goto out; + } + aq->assoc_idx = ASSOC_IDX_INVALID; + + /* verify SE bind state */ + status = ap_test_queue(aq->qid, 1, &hwinfo); + if (status.response_code) { + AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + rc = -EIO; + goto out; + } + aq->se_bstate = hwinfo.bs; + if (!(hwinfo.bs == AP_BS_Q_USABLE || + hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) { + AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n", + __func__, hwinfo.bs, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + rc = -EIO; + goto out; + } + + /* SE bind was successful */ + AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + rc = count; + +out: + spin_unlock_bh(&aq->lock); + return rc; } static DEVICE_ATTR_RW(se_bind); @@ -896,20 +1049,25 @@ static ssize_t se_associate_show(struct device *dev, { struct ap_queue *aq = to_ap_queue(dev); struct ap_queue_status status; - struct ap_tapq_gr2 info; + struct ap_tapq_hwinfo hwinfo; if (!ap_q_supports_assoc(aq)) return sysfs_emit(buf, "-\n"); - status = ap_test_queue(aq->qid, 1, &info); + status = ap_test_queue(aq->qid, 1, &hwinfo); if (status.response_code > AP_RESPONSE_BUSY) { - AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n", - __func__, status.response_code, - AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n", + status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return -EIO; } - switch (info.bs) { + /* update queue's SE bind state */ + spin_lock_bh(&aq->lock); + aq->se_bstate = hwinfo.bs; + spin_unlock_bh(&aq->lock); + + switch (hwinfo.bs) { case AP_BS_Q_USABLE: if (aq->assoc_idx == ASSOC_IDX_INVALID) { AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__); @@ -931,6 +1089,7 @@ static ssize_t se_associate_store(struct device *dev, { struct ap_queue *aq = to_ap_queue(dev); struct ap_queue_status status; + struct ap_tapq_hwinfo hwinfo; unsigned int value; int rc; @@ -944,18 +1103,28 @@ static ssize_t se_associate_store(struct device *dev, if (value >= ASSOC_IDX_INVALID) return -EINVAL; + /* check current SE bind state */ + status = ap_test_queue(aq->qid, 1, &hwinfo); + if (status.response_code) { + AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + return -EIO; + } spin_lock_bh(&aq->lock); - - /* sm should be in idle state */ - if (aq->sm_state != AP_SM_STATE_IDLE) { - spin_unlock_bh(&aq->lock); - return -EBUSY; + aq->se_bstate = hwinfo.bs; + if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) { + AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n", + __func__, hwinfo.bs, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + rc = -EINVAL; + goto out; } - /* already associated or association pending ? */ - if (aq->assoc_idx != ASSOC_IDX_INVALID) { - spin_unlock_bh(&aq->lock); - return -EINVAL; + /* check SM state */ + if (aq->sm_state != AP_SM_STATE_IDLE) { + rc = -EBUSY; + goto out; } /* trigger the asynchronous association request */ @@ -966,17 +1135,20 @@ static ssize_t se_associate_store(struct device *dev, aq->sm_state = AP_SM_STATE_ASSOC_WAIT; aq->assoc_idx = value; ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL)); - spin_unlock_bh(&aq->lock); break; default: - spin_unlock_bh(&aq->lock); AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n", __func__, status.response_code, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); - return -EIO; + rc = -EIO; + goto out; } - return count; + rc = count; + +out: + spin_unlock_bh(&aq->lock); + return rc; } static DEVICE_ATTR_RW(se_associate); @@ -1007,21 +1179,21 @@ static void ap_queue_device_release(struct device *dev) kfree(aq); } -struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) +struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac) { struct ap_queue *aq; aq = kzalloc(sizeof(*aq), GFP_KERNEL); if (!aq) return NULL; + aq->card = ac; aq->ap_dev.device.release = ap_queue_device_release; aq->ap_dev.device.type = &ap_queue_type; - aq->ap_dev.device_type = device_type; - // add optional SE secure binding attributes group - if (ap_sb_available() && is_prot_virt_guest()) + aq->ap_dev.device_type = ac->ap_dev.device_type; + /* in SE environment add bind/associate attributes group */ + if (ap_is_se_guest() && ap_q_supported_in_se(aq)) aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups; aq->qid = qid; - aq->interrupt = false; spin_lock_init(&aq->lock); INIT_LIST_HEAD(&aq->pendingq); INIT_LIST_HEAD(&aq->requestq); @@ -1074,6 +1246,50 @@ int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) EXPORT_SYMBOL(ap_queue_message); /** + * ap_queue_usable(): Check if queue is usable just now. + * @aq: The AP queue device to test for usability. + * This function is intended for the scheduler to query if it makes + * sense to enqueue a message into this AP queue device by calling + * ap_queue_message(). The perspective is very short-term as the + * state machine and device state(s) may change at any time. + */ +bool ap_queue_usable(struct ap_queue *aq) +{ + bool rc = true; + + spin_lock_bh(&aq->lock); + + /* check for not configured or checkstopped */ + if (!aq->config || aq->chkstop) { + rc = false; + goto unlock_and_out; + } + + /* device state needs to be ok */ + if (aq->dev_state != AP_DEV_STATE_OPERATING) { + rc = false; + goto unlock_and_out; + } + + /* SE guest's queues additionally need to be bound */ + if (ap_is_se_guest()) { + if (!ap_q_supported_in_se(aq)) { + rc = false; + goto unlock_and_out; + } + if (ap_q_needs_bind(aq) && + !(aq->se_bstate == AP_BS_Q_USABLE || + aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY)) + rc = false; + } + +unlock_and_out: + spin_unlock_bh(&aq->lock); + return rc; +} +EXPORT_SYMBOL(ap_queue_usable); + +/** * ap_cancel_message(): Cancel a crypto request. * @aq: The AP device that has the message queued * @ap_msg: The message that is to be removed @@ -1143,7 +1359,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq) /* move queue device state to SHUTDOWN in progress */ aq->dev_state = AP_DEV_STATE_SHUTDOWN; spin_unlock_bh(&aq->lock); - del_timer_sync(&aq->timeout); + timer_delete_sync(&aq->timeout); } void ap_queue_remove(struct ap_queue *aq) @@ -1160,14 +1376,19 @@ void ap_queue_remove(struct ap_queue *aq) spin_unlock_bh(&aq->lock); } -void ap_queue_init_state(struct ap_queue *aq) +void _ap_queue_init_state(struct ap_queue *aq) { - spin_lock_bh(&aq->lock); aq->dev_state = AP_DEV_STATE_OPERATING; aq->sm_state = AP_SM_STATE_RESET_START; aq->last_err_rc = 0; aq->assoc_idx = ASSOC_IDX_INVALID; ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL)); +} + +void ap_queue_init_state(struct ap_queue *aq) +{ + spin_lock_bh(&aq->lock); + _ap_queue_init_state(aq); spin_unlock_bh(&aq->lock); } EXPORT_SYMBOL(ap_queue_init_state); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 6cfb6b2340c9..ad1cd699f53b 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -7,1333 +7,703 @@ * Author(s): Harald Freudenberger */ -#define KMSG_COMPONENT "pkey" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "pkey: " fmt -#include <linux/fs.h> #include <linux/init.h> #include <linux/miscdevice.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/slab.h> -#include <linux/kallsyms.h> -#include <linux/debugfs.h> -#include <linux/random.h> -#include <linux/cpufeature.h> -#include <asm/zcrypt.h> -#include <asm/cpacf.h> -#include <asm/pkey.h> -#include <crypto/aes.h> #include "zcrypt_api.h" #include "zcrypt_ccamisc.h" -#include "zcrypt_ep11misc.h" -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("IBM Corporation"); -MODULE_DESCRIPTION("s390 protected key interface"); - -#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */ -#define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header)) -#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */ -#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ -#define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */ +#include "pkey_base.h" /* - * debug feature data and functions + * Helper functions */ - -static debug_info_t *debug_info; - -#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__) -#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__) -#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__) -#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__) - -static void __init pkey_debug_init(void) -{ - /* 5 arguments per dbf entry (including the format string ptr) */ - debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); - debug_register_view(debug_info, &debug_sprintf_view); - debug_set_level(debug_info, 3); -} - -static void __exit pkey_debug_exit(void) +static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, size_t keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { - debug_unregister(debug_info); -} + int rc; -/* inside view of a protected key token (only type 0x00 version 0x01) */ -struct protaeskeytoken { - u8 type; /* 0x00 for PAES specific key tokens */ - u8 res0[3]; - u8 version; /* should be 0x01 for protected AES key token */ - u8 res1[3]; - u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ - u32 len; /* bytes actually stored in protkey[] */ - u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */ -} __packed; - -/* inside view of a clear key token (type 0x00 version 0x02) */ -struct clearkeytoken { - u8 type; /* 0x00 for PAES specific key tokens */ - u8 res0[3]; - u8 version; /* 0x02 for clear key token */ - u8 res1[3]; - u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */ - u32 len; /* bytes actually stored in clearkey[] */ - u8 clearkey[]; /* clear key value */ -} __packed; - -/* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */ -static inline u32 pkey_keytype_aes_to_size(u32 keytype) -{ - switch (keytype) { - case PKEY_KEYTYPE_AES_128: - return 16; - case PKEY_KEYTYPE_AES_192: - return 24; - case PKEY_KEYTYPE_AES_256: - return 32; - default: - return 0; + /* try the direct way */ + rc = pkey_handler_key_to_protkey(apqns, nr_apqns, + key, keylen, + protkey, protkeylen, + protkeytype, xflags); + + /* if this did not work, try the slowpath way */ + if (rc == -ENODEV) { + rc = pkey_handler_slowpath_key_to_protkey(apqns, nr_apqns, + key, keylen, + protkey, protkeylen, + protkeytype, xflags); + if (rc) + rc = -ENODEV; } + + pr_debug("rc=%d\n", rc); + return rc; } /* - * Create a protected key from a clear key value via PCKMO instruction. + * In-Kernel function: Transform a key blob (of any type) into a protected key */ -static int pkey_clr2protkey(u32 keytype, const u8 *clrkey, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +int pkey_key2protkey(const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags) { - /* mask of available pckmo subfunctions */ - static cpacf_mask_t pckmo_functions; - - u8 paramblock[112]; - u32 pkeytype; - int keysize; - long fc; - - switch (keytype) { - case PKEY_KEYTYPE_AES_128: - /* 16 byte key, 32 byte aes wkvp, total 48 bytes */ - keysize = 16; - pkeytype = keytype; - fc = CPACF_PCKMO_ENC_AES_128_KEY; - break; - case PKEY_KEYTYPE_AES_192: - /* 24 byte key, 32 byte aes wkvp, total 56 bytes */ - keysize = 24; - pkeytype = keytype; - fc = CPACF_PCKMO_ENC_AES_192_KEY; - break; - case PKEY_KEYTYPE_AES_256: - /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ - keysize = 32; - pkeytype = keytype; - fc = CPACF_PCKMO_ENC_AES_256_KEY; - break; - case PKEY_KEYTYPE_ECC_P256: - /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ - keysize = 32; - pkeytype = PKEY_KEYTYPE_ECC; - fc = CPACF_PCKMO_ENC_ECC_P256_KEY; - break; - case PKEY_KEYTYPE_ECC_P384: - /* 48 byte key, 32 byte aes wkvp, total 80 bytes */ - keysize = 48; - pkeytype = PKEY_KEYTYPE_ECC; - fc = CPACF_PCKMO_ENC_ECC_P384_KEY; - break; - case PKEY_KEYTYPE_ECC_P521: - /* 80 byte key, 32 byte aes wkvp, total 112 bytes */ - keysize = 80; - pkeytype = PKEY_KEYTYPE_ECC; - fc = CPACF_PCKMO_ENC_ECC_P521_KEY; - break; - case PKEY_KEYTYPE_ECC_ED25519: - /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ - keysize = 32; - pkeytype = PKEY_KEYTYPE_ECC; - fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY; - break; - case PKEY_KEYTYPE_ECC_ED448: - /* 64 byte key, 32 byte aes wkvp, total 96 bytes */ - keysize = 64; - pkeytype = PKEY_KEYTYPE_ECC; - fc = CPACF_PCKMO_ENC_ECC_ED448_KEY; - break; - default: - DEBUG_ERR("%s unknown/unsupported keytype %u\n", - __func__, keytype); - return -EINVAL; - } - - if (*protkeylen < keysize + AES_WK_VP_SIZE) { - DEBUG_ERR("%s prot key buffer size too small: %u < %d\n", - __func__, *protkeylen, keysize + AES_WK_VP_SIZE); - return -EINVAL; - } + int rc; - /* Did we already check for PCKMO ? */ - if (!pckmo_functions.bytes[0]) { - /* no, so check now */ - if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) - return -ENODEV; - } - /* check for the pckmo subfunction we need now */ - if (!cpacf_test_func(&pckmo_functions, fc)) { - DEBUG_ERR("%s pckmo functions not available\n", __func__); - return -ENODEV; + rc = key2protkey(NULL, 0, key, keylen, + protkey, protkeylen, protkeytype, xflags); + if (rc == -ENODEV) { + pkey_handler_request_modules(); + rc = key2protkey(NULL, 0, key, keylen, + protkey, protkeylen, protkeytype, xflags); } - /* prepare param block */ - memset(paramblock, 0, sizeof(paramblock)); - memcpy(paramblock, clrkey, keysize); - - /* call the pckmo instruction */ - cpacf_pckmo(fc, paramblock); - - /* copy created protected key to key buffer including the wkvp block */ - *protkeylen = keysize + AES_WK_VP_SIZE; - memcpy(protkey, paramblock, *protkeylen); - *protkeytype = pkeytype; - - return 0; + return rc; } +EXPORT_SYMBOL(pkey_key2protkey); /* - * Find card and transform secure key into protected key. + * Ioctl functions */ -static int pkey_skey2pkey(const u8 *key, u8 *protkey, - u32 *protkeylen, u32 *protkeytype) + +static void *_copy_key_from_user(void __user *ukey, size_t keylen) { - struct keytoken_header *hdr = (struct keytoken_header *)key; - u16 cardnr, domain; - int rc, verify; - - zcrypt_wait_api_operational(); - - /* - * The cca_xxx2protkey call may fail when a card has been - * addressed where the master key was changed after last fetch - * of the mkvp into the cache. Try 3 times: First without verify - * then with verify and last round with verify and old master - * key verification pattern match not ignored. - */ - for (verify = 0; verify < 3; verify++) { - rc = cca_findcard(key, &cardnr, &domain, verify); - if (rc < 0) - continue; - if (rc > 0 && verify < 2) - continue; - switch (hdr->version) { - case TOKVER_CCA_AES: - rc = cca_sec2protkey(cardnr, domain, key, - protkey, protkeylen, protkeytype); - break; - case TOKVER_CCA_VLSC: - rc = cca_cipher2protkey(cardnr, domain, key, - protkey, protkeylen, - protkeytype); - break; - default: - return -EINVAL; - } - if (rc == 0) - break; - } + if (!ukey || keylen < MINKEYBLOBBUFSIZE || keylen > KEYBLOBBUFSIZE) + return ERR_PTR(-EINVAL); - if (rc) - DEBUG_DBG("%s failed rc=%d\n", __func__, rc); + return memdup_user(ukey, keylen); +} - return rc; +static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns) +{ + if (!uapqns || nr_apqns == 0) + return NULL; + + return memdup_array_user(uapqns, nr_apqns, sizeof(struct pkey_apqn)); } -/* - * Construct EP11 key with given clear key value. - */ -static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen, - u8 *keybuf, size_t *keybuflen) +static int pkey_ioctl_genseck(struct pkey_genseck __user *ugs) { - u32 nr_apqns, *apqns = NULL; - u16 card, dom; - int i, rc; - - zcrypt_wait_api_operational(); - - /* build a list of apqns suitable for ep11 keys with cpacf support */ - rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, - ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, - NULL); - if (rc) - goto out; - - /* go through the list of apqns and try to bild an ep11 key */ - for (rc = -ENODEV, i = 0; i < nr_apqns; i++) { - card = apqns[i] >> 16; - dom = apqns[i] & 0xFFFF; - rc = ep11_clr2keyblob(card, dom, clrkeylen * 8, - 0, clrkey, keybuf, keybuflen, - PKEY_TYPE_EP11); - if (rc == 0) - break; - } + struct pkey_genseck kgs; + struct pkey_apqn apqn; + u32 keybuflen; + int rc; + + if (copy_from_user(&kgs, ugs, sizeof(kgs))) + return -EFAULT; + + apqn.card = kgs.cardnr; + apqn.domain = kgs.domain; + keybuflen = sizeof(kgs.seckey.seckey); + rc = pkey_handler_gen_key(&apqn, 1, + kgs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, + kgs.seckey.seckey, &keybuflen, NULL, 0); + pr_debug("gen_key()=%d\n", rc); + if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs))) + rc = -EFAULT; + memzero_explicit(&kgs, sizeof(kgs)); -out: - kfree(apqns); - if (rc) - DEBUG_DBG("%s failed rc=%d\n", __func__, rc); return rc; } -/* - * Find card and transform EP11 secure key into protected key. - */ -static int pkey_ep11key2pkey(const u8 *key, size_t keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_clr2seck(struct pkey_clr2seck __user *ucs) { - u32 nr_apqns, *apqns = NULL; - u16 card, dom; - int i, rc; - - zcrypt_wait_api_operational(); - - /* build a list of apqns suitable for this key */ - rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, - ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, - ep11_kb_wkvp(key, keylen)); - if (rc) - goto out; - - /* go through the list of apqns and try to derive an pkey */ - for (rc = -ENODEV, i = 0; i < nr_apqns; i++) { - card = apqns[i] >> 16; - dom = apqns[i] & 0xFFFF; - rc = ep11_kblob2protkey(card, dom, key, keylen, - protkey, protkeylen, protkeytype); - if (rc == 0) - break; - } + struct pkey_clr2seck kcs; + struct pkey_apqn apqn; + u32 keybuflen; + int rc; + + if (copy_from_user(&kcs, ucs, sizeof(kcs))) + return -EFAULT; + + apqn.card = kcs.cardnr; + apqn.domain = kcs.domain; + keybuflen = sizeof(kcs.seckey.seckey); + rc = pkey_handler_clr_to_key(&apqn, 1, + kcs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, + kcs.clrkey.clrkey, + pkey_keytype_aes_to_size(kcs.keytype), + kcs.seckey.seckey, &keybuflen, NULL, 0); + pr_debug("clr_to_key()=%d\n", rc); + if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs))) + rc = -EFAULT; + memzero_explicit(&kcs, sizeof(kcs)); -out: - kfree(apqns); - if (rc) - DEBUG_DBG("%s failed rc=%d\n", __func__, rc); return rc; } -/* - * Verify key and give back some info about the key. - */ -static int pkey_verifykey(const struct pkey_seckey *seckey, - u16 *pcardnr, u16 *pdomain, - u16 *pkeysize, u32 *pattributes) +static int pkey_ioctl_sec2protk(struct pkey_sec2protk __user *usp) { - struct secaeskeytoken *t = (struct secaeskeytoken *)seckey; - u16 cardnr, domain; + struct pkey_sec2protk ksp; + struct pkey_apqn apqn; int rc; - /* check the secure key for valid AES secure key */ - rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *)seckey, 0); - if (rc) - goto out; - if (pattributes) - *pattributes = PKEY_VERIFY_ATTR_AES; - if (pkeysize) - *pkeysize = t->bitsize; - - /* try to find a card which can handle this key */ - rc = cca_findcard(seckey->seckey, &cardnr, &domain, 1); - if (rc < 0) - goto out; - - if (rc > 0) { - /* key mkvp matches to old master key mkvp */ - DEBUG_DBG("%s secure key has old mkvp\n", __func__); - if (pattributes) - *pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP; - rc = 0; - } - - if (pcardnr) - *pcardnr = cardnr; - if (pdomain) - *pdomain = domain; + if (copy_from_user(&ksp, usp, sizeof(ksp))) + return -EFAULT; + + apqn.card = ksp.cardnr; + apqn.domain = ksp.domain; + ksp.protkey.len = sizeof(ksp.protkey.protkey); + rc = pkey_handler_key_to_protkey(&apqn, 1, + ksp.seckey.seckey, + sizeof(ksp.seckey.seckey), + ksp.protkey.protkey, + &ksp.protkey.len, &ksp.protkey.type, + 0); + pr_debug("key_to_protkey()=%d\n", rc); + if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) + rc = -EFAULT; + memzero_explicit(&ksp, sizeof(ksp)); -out: - DEBUG_DBG("%s rc=%d\n", __func__, rc); return rc; } -/* - * Generate a random protected key - */ -static int pkey_genprotkey(u32 keytype, u8 *protkey, - u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_clr2protk(struct pkey_clr2protk __user *ucp) { - u8 clrkey[32]; - int keysize; + struct pkey_clr2protk kcp; + struct clearkeytoken *t; + u32 keylen; + u8 *tmpbuf; int rc; - keysize = pkey_keytype_aes_to_size(keytype); - if (!keysize) { - DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__, - keytype); + if (copy_from_user(&kcp, ucp, sizeof(kcp))) + return -EFAULT; + + /* build a 'clear key token' from the clear key value */ + keylen = pkey_keytype_aes_to_size(kcp.keytype); + if (!keylen) { + PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", + __func__, kcp.keytype); + memzero_explicit(&kcp, sizeof(kcp)); return -EINVAL; } + tmpbuf = kzalloc(sizeof(*t) + keylen, GFP_KERNEL); + if (!tmpbuf) { + memzero_explicit(&kcp, sizeof(kcp)); + return -ENOMEM; + } + t = (struct clearkeytoken *)tmpbuf; + t->type = TOKTYPE_NON_CCA; + t->version = TOKVER_CLEAR_KEY; + t->keytype = (keylen - 8) >> 3; + t->len = keylen; + memcpy(t->clearkey, kcp.clrkey.clrkey, keylen); + kcp.protkey.len = sizeof(kcp.protkey.protkey); - /* generate a dummy random clear key */ - get_random_bytes(clrkey, keysize); + rc = key2protkey(NULL, 0, + tmpbuf, sizeof(*t) + keylen, + kcp.protkey.protkey, + &kcp.protkey.len, &kcp.protkey.type, 0); + pr_debug("key2protkey()=%d\n", rc); - /* convert it to a dummy protected key */ - rc = pkey_clr2protkey(keytype, clrkey, - protkey, protkeylen, protkeytype); - if (rc) - return rc; + kfree_sensitive(tmpbuf); - /* replace the key part of the protected key with random bytes */ - get_random_bytes(protkey, keysize); + if (!rc && copy_to_user(ucp, &kcp, sizeof(kcp))) + rc = -EFAULT; + memzero_explicit(&kcp, sizeof(kcp)); - return 0; + return rc; } -/* - * Verify if a protected key is still valid - */ -static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen, - u32 protkeytype) +static int pkey_ioctl_findcard(struct pkey_findcard __user *ufc) { - struct { - u8 iv[AES_BLOCK_SIZE]; - u8 key[MAXPROTKEYSIZE]; - } param; - u8 null_msg[AES_BLOCK_SIZE]; - u8 dest_buf[AES_BLOCK_SIZE]; - unsigned int k, pkeylen; - unsigned long fc; - - switch (protkeytype) { - case PKEY_KEYTYPE_AES_128: - pkeylen = 16 + AES_WK_VP_SIZE; - fc = CPACF_KMC_PAES_128; - break; - case PKEY_KEYTYPE_AES_192: - pkeylen = 24 + AES_WK_VP_SIZE; - fc = CPACF_KMC_PAES_192; - break; - case PKEY_KEYTYPE_AES_256: - pkeylen = 32 + AES_WK_VP_SIZE; - fc = CPACF_KMC_PAES_256; - break; - default: - DEBUG_ERR("%s unknown/unsupported keytype %u\n", __func__, - protkeytype); - return -EINVAL; - } - if (protkeylen != pkeylen) { - DEBUG_ERR("%s invalid protected key size %u for keytype %u\n", - __func__, protkeylen, protkeytype); - return -EINVAL; - } + struct pkey_findcard kfc; + struct pkey_apqn *apqns; + size_t nr_apqns; + int rc; - memset(null_msg, 0, sizeof(null_msg)); + if (copy_from_user(&kfc, ufc, sizeof(kfc))) + return -EFAULT; - memset(param.iv, 0, sizeof(param.iv)); - memcpy(param.key, protkey, protkeylen); + nr_apqns = MAXAPQNSINLIST; + apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), GFP_KERNEL); + if (!apqns) + return -ENOMEM; - k = cpacf_kmc(fc | CPACF_ENCRYPT, ¶m, null_msg, dest_buf, - sizeof(null_msg)); - if (k != sizeof(null_msg)) { - DEBUG_ERR("%s protected key is not valid\n", __func__); - return -EKEYREJECTED; + rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, + sizeof(kfc.seckey.seckey), + PKEY_FLAGS_MATCH_CUR_MKVP, + apqns, &nr_apqns, 0); + if (rc == -ENODEV) + rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, + sizeof(kfc.seckey.seckey), + PKEY_FLAGS_MATCH_ALT_MKVP, + apqns, &nr_apqns, 0); + pr_debug("apqns_for_key()=%d\n", rc); + if (rc) { + kfree(apqns); + return rc; } + kfc.cardnr = apqns[0].card; + kfc.domain = apqns[0].domain; + kfree(apqns); + if (copy_to_user(ufc, &kfc, sizeof(kfc))) + return -EFAULT; return 0; } -/* Helper for pkey_nonccatok2pkey, handles aes clear key token */ -static int nonccatokaes2pkey(const struct clearkeytoken *t, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_skey2pkey(struct pkey_skey2pkey __user *usp) { - size_t tmpbuflen = max_t(size_t, SECKEYBLOBSIZE, MAXEP11AESKEYBLOBSIZE); - u8 *tmpbuf = NULL; - u32 keysize; + struct pkey_skey2pkey ksp; int rc; - keysize = pkey_keytype_aes_to_size(t->keytype); - if (!keysize) { - DEBUG_ERR("%s unknown/unsupported keytype %u\n", - __func__, t->keytype); - return -EINVAL; - } - if (t->len != keysize) { - DEBUG_ERR("%s non clear key aes token: invalid key len %u\n", - __func__, t->len); - return -EINVAL; - } + if (copy_from_user(&ksp, usp, sizeof(ksp))) + return -EFAULT; + + ksp.protkey.len = sizeof(ksp.protkey.protkey); + rc = pkey_handler_key_to_protkey(NULL, 0, + ksp.seckey.seckey, + sizeof(ksp.seckey.seckey), + ksp.protkey.protkey, + &ksp.protkey.len, + &ksp.protkey.type, 0); + pr_debug("key_to_protkey()=%d\n", rc); + if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) + rc = -EFAULT; + memzero_explicit(&ksp, sizeof(ksp)); - /* try direct way with the PCKMO instruction */ - rc = pkey_clr2protkey(t->keytype, t->clearkey, - protkey, protkeylen, protkeytype); - if (!rc) - goto out; - - /* PCKMO failed, so try the CCA secure key way */ - tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC); - if (!tmpbuf) - return -ENOMEM; - zcrypt_wait_api_operational(); - rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype, t->clearkey, tmpbuf); - if (rc) - goto try_via_ep11; - rc = pkey_skey2pkey(tmpbuf, - protkey, protkeylen, protkeytype); - if (!rc) - goto out; - -try_via_ep11: - /* if the CCA way also failed, let's try via EP11 */ - rc = pkey_clr2ep11key(t->clearkey, t->len, - tmpbuf, &tmpbuflen); - if (rc) - goto failure; - rc = pkey_ep11key2pkey(tmpbuf, tmpbuflen, - protkey, protkeylen, protkeytype); - if (!rc) - goto out; - -failure: - DEBUG_ERR("%s unable to build protected key from clear", __func__); - -out: - kfree(tmpbuf); return rc; } -/* Helper for pkey_nonccatok2pkey, handles ecc clear key token */ -static int nonccatokecc2pkey(const struct clearkeytoken *t, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_verifykey(struct pkey_verifykey __user *uvk) { - u32 keylen; + u32 keytype, keybitsize, flags; + struct pkey_verifykey kvk; int rc; - switch (t->keytype) { - case PKEY_KEYTYPE_ECC_P256: - keylen = 32; - break; - case PKEY_KEYTYPE_ECC_P384: - keylen = 48; - break; - case PKEY_KEYTYPE_ECC_P521: - keylen = 80; - break; - case PKEY_KEYTYPE_ECC_ED25519: - keylen = 32; - break; - case PKEY_KEYTYPE_ECC_ED448: - keylen = 64; - break; - default: - DEBUG_ERR("%s unknown/unsupported keytype %u\n", - __func__, t->keytype); - return -EINVAL; - } + if (copy_from_user(&kvk, uvk, sizeof(kvk))) + return -EFAULT; - if (t->len != keylen) { - DEBUG_ERR("%s non clear key ecc token: invalid key len %u\n", - __func__, t->len); - return -EINVAL; - } - - /* only one path possible: via PCKMO instruction */ - rc = pkey_clr2protkey(t->keytype, t->clearkey, - protkey, protkeylen, protkeytype); - if (rc) { - DEBUG_ERR("%s unable to build protected key from clear", - __func__); - } + kvk.cardnr = 0xFFFF; + kvk.domain = 0xFFFF; + rc = pkey_handler_verify_key(kvk.seckey.seckey, + sizeof(kvk.seckey.seckey), + &kvk.cardnr, &kvk.domain, + &keytype, &keybitsize, &flags, 0); + pr_debug("verify_key()=%d\n", rc); + if (!rc && keytype != PKEY_TYPE_CCA_DATA) + rc = -EINVAL; + kvk.attributes = PKEY_VERIFY_ATTR_AES; + kvk.keysize = (u16)keybitsize; + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + kvk.attributes |= PKEY_VERIFY_ATTR_OLD_MKVP; + if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk))) + rc = -EFAULT; + memzero_explicit(&kvk, sizeof(kvk)); return rc; } -/* - * Transform a non-CCA key token into a protected key - */ -static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_genprotk(struct pkey_genprotk __user *ugp) { - struct keytoken_header *hdr = (struct keytoken_header *)key; - int rc = -EINVAL; + struct pkey_genprotk kgp; + int rc; - switch (hdr->version) { - case TOKVER_PROTECTED_KEY: { - struct protaeskeytoken *t; + if (copy_from_user(&kgp, ugp, sizeof(kgp))) + return -EFAULT; - if (keylen != sizeof(struct protaeskeytoken)) - goto out; - t = (struct protaeskeytoken *)key; - rc = pkey_verifyprotkey(t->protkey, t->len, t->keytype); - if (rc) - goto out; - memcpy(protkey, t->protkey, t->len); - *protkeylen = t->len; - *protkeytype = t->keytype; - break; - } - case TOKVER_CLEAR_KEY: { - struct clearkeytoken *t = (struct clearkeytoken *)key; - - if (keylen < sizeof(struct clearkeytoken) || - keylen != sizeof(*t) + t->len) - goto out; - switch (t->keytype) { - case PKEY_KEYTYPE_AES_128: - case PKEY_KEYTYPE_AES_192: - case PKEY_KEYTYPE_AES_256: - rc = nonccatokaes2pkey(t, protkey, - protkeylen, protkeytype); - break; - case PKEY_KEYTYPE_ECC_P256: - case PKEY_KEYTYPE_ECC_P384: - case PKEY_KEYTYPE_ECC_P521: - case PKEY_KEYTYPE_ECC_ED25519: - case PKEY_KEYTYPE_ECC_ED448: - rc = nonccatokecc2pkey(t, protkey, - protkeylen, protkeytype); - break; - default: - DEBUG_ERR("%s unknown/unsupported non cca clear key type %u\n", - __func__, t->keytype); - return -EINVAL; - } - break; - } - case TOKVER_EP11_AES: { - /* check ep11 key for exportable as protected key */ - rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); - if (rc) - goto out; - rc = pkey_ep11key2pkey(key, keylen, - protkey, protkeylen, protkeytype); - break; - } - case TOKVER_EP11_AES_WITH_HEADER: - /* check ep11 key with header for exportable as protected key */ - rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1); - if (rc) - goto out; - rc = pkey_ep11key2pkey(key, keylen, - protkey, protkeylen, protkeytype); - break; - default: - DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n", - __func__, hdr->version); - } + kgp.protkey.len = sizeof(kgp.protkey.protkey); + rc = pkey_handler_gen_key(NULL, 0, kgp.keytype, + PKEY_TYPE_PROTKEY, 0, 0, + kgp.protkey.protkey, &kgp.protkey.len, + &kgp.protkey.type, 0); + pr_debug("gen_key()=%d\n", rc); + if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp))) + rc = -EFAULT; + memzero_explicit(&kgp, sizeof(kgp)); -out: return rc; } -/* - * Transform a CCA internal key token into a protected key - */ -static int pkey_ccainttok2pkey(const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_verifyprotk(struct pkey_verifyprotk __user *uvp) { - struct keytoken_header *hdr = (struct keytoken_header *)key; + struct pkey_verifyprotk kvp; + struct protaeskeytoken *t; + u32 keytype; + u8 *tmpbuf; + int rc; - switch (hdr->version) { - case TOKVER_CCA_AES: - if (keylen != sizeof(struct secaeskeytoken)) - return -EINVAL; - break; - case TOKVER_CCA_VLSC: - if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) - return -EINVAL; - break; - default: - DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n", - __func__, hdr->version); + if (copy_from_user(&kvp, uvp, sizeof(kvp))) + return -EFAULT; + + keytype = pkey_aes_bitsize_to_keytype(8 * kvp.protkey.len); + if (!keytype) { + PKEY_DBF_ERR("%s unknown/unsupported protkey length %u\n", + __func__, kvp.protkey.len); + memzero_explicit(&kvp, sizeof(kvp)); return -EINVAL; } - return pkey_skey2pkey(key, protkey, protkeylen, protkeytype); + /* build a 'protected key token' from the raw protected key */ + tmpbuf = kzalloc(sizeof(*t), GFP_KERNEL); + if (!tmpbuf) { + memzero_explicit(&kvp, sizeof(kvp)); + return -ENOMEM; + } + t = (struct protaeskeytoken *)tmpbuf; + t->type = TOKTYPE_NON_CCA; + t->version = TOKVER_PROTECTED_KEY; + t->keytype = keytype; + t->len = kvp.protkey.len; + memcpy(t->protkey, kvp.protkey.protkey, kvp.protkey.len); + + rc = pkey_handler_verify_key(tmpbuf, sizeof(*t), + NULL, NULL, NULL, NULL, NULL, 0); + pr_debug("verify_key()=%d\n", rc); + + kfree_sensitive(tmpbuf); + memzero_explicit(&kvp, sizeof(kvp)); + + return rc; } -/* - * Transform a key blob (of any type) into a protected key - */ -int pkey_keyblob2pkey(const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_kblob2protk(struct pkey_kblob2pkey __user *utp) { - struct keytoken_header *hdr = (struct keytoken_header *)key; + struct pkey_kblob2pkey ktp; + u8 *kkey; int rc; - if (keylen < sizeof(struct keytoken_header)) { - DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen); - return -EINVAL; - } - - switch (hdr->type) { - case TOKTYPE_NON_CCA: - rc = pkey_nonccatok2pkey(key, keylen, - protkey, protkeylen, protkeytype); - break; - case TOKTYPE_CCA_INTERNAL: - rc = pkey_ccainttok2pkey(key, keylen, - protkey, protkeylen, protkeytype); - break; - default: - DEBUG_ERR("%s unknown/unsupported blob type %d\n", - __func__, hdr->type); - return -EINVAL; - } + if (copy_from_user(&ktp, utp, sizeof(ktp))) + return -EFAULT; + kkey = _copy_key_from_user(ktp.key, ktp.keylen); + if (IS_ERR(kkey)) + return PTR_ERR(kkey); + ktp.protkey.len = sizeof(ktp.protkey.protkey); + rc = key2protkey(NULL, 0, kkey, ktp.keylen, + ktp.protkey.protkey, &ktp.protkey.len, + &ktp.protkey.type, 0); + pr_debug("key2protkey()=%d\n", rc); + kfree_sensitive(kkey); + if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) + rc = -EFAULT; + memzero_explicit(&ktp, sizeof(ktp)); - DEBUG_DBG("%s rc=%d\n", __func__, rc); return rc; } -EXPORT_SYMBOL(pkey_keyblob2pkey); -static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns, - enum pkey_key_type ktype, enum pkey_key_size ksize, - u32 kflags, u8 *keybuf, size_t *keybufsize) +static int pkey_ioctl_genseck2(struct pkey_genseck2 __user *ugs) { - int i, card, dom, rc; - - /* check for at least one apqn given */ - if (!apqns || !nr_apqns) - return -EINVAL; + u32 klen = KEYBLOBBUFSIZE; + struct pkey_genseck2 kgs; + struct pkey_apqn *apqns; + u8 *kkey; + int rc; + u32 u; - /* check key type and size */ - switch (ktype) { - case PKEY_TYPE_CCA_DATA: - case PKEY_TYPE_CCA_CIPHER: - if (*keybufsize < SECKEYBLOBSIZE) - return -EINVAL; - break; - case PKEY_TYPE_EP11: - if (*keybufsize < MINEP11AESKEYBLOBSIZE) - return -EINVAL; - break; - case PKEY_TYPE_EP11_AES: - if (*keybufsize < (sizeof(struct ep11kblob_header) + - MINEP11AESKEYBLOBSIZE)) - return -EINVAL; - break; - default: + if (copy_from_user(&kgs, ugs, sizeof(kgs))) + return -EFAULT; + u = pkey_aes_bitsize_to_keytype(kgs.size); + if (!u) { + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, kgs.size); return -EINVAL; } - switch (ksize) { - case PKEY_SIZE_AES_128: - case PKEY_SIZE_AES_192: - case PKEY_SIZE_AES_256: - break; - default: - return -EINVAL; + apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries); + if (IS_ERR(apqns)) + return PTR_ERR(apqns); + kkey = kzalloc(klen, GFP_KERNEL); + if (!kkey) { + kfree(apqns); + return -ENOMEM; } - - /* simple try all apqns from the list */ - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { - card = apqns[i].card; - dom = apqns[i].domain; - if (ktype == PKEY_TYPE_EP11 || - ktype == PKEY_TYPE_EP11_AES) { - rc = ep11_genaeskey(card, dom, ksize, kflags, - keybuf, keybufsize, ktype); - } else if (ktype == PKEY_TYPE_CCA_DATA) { - rc = cca_genseckey(card, dom, ksize, keybuf); - *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); - } else { - /* TOKVER_CCA_VLSC */ - rc = cca_gencipherkey(card, dom, ksize, kflags, - keybuf, keybufsize); + rc = pkey_handler_gen_key(apqns, kgs.apqn_entries, + u, kgs.type, kgs.size, kgs.keygenflags, + kkey, &klen, NULL, 0); + pr_debug("gen_key()=%d\n", rc); + kfree(apqns); + if (rc) { + kfree_sensitive(kkey); + return rc; + } + if (kgs.key) { + if (kgs.keylen < klen) { + kfree_sensitive(kkey); + return -EINVAL; + } + if (copy_to_user(kgs.key, kkey, klen)) { + kfree_sensitive(kkey); + return -EFAULT; } - if (rc == 0) - break; } + kgs.keylen = klen; + if (copy_to_user(ugs, &kgs, sizeof(kgs))) + rc = -EFAULT; + kfree_sensitive(kkey); return rc; } -static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns, - enum pkey_key_type ktype, enum pkey_key_size ksize, - u32 kflags, const u8 *clrkey, - u8 *keybuf, size_t *keybufsize) +static int pkey_ioctl_clr2seck2(struct pkey_clr2seck2 __user *ucs) { - int i, card, dom, rc; - - /* check for at least one apqn given */ - if (!apqns || !nr_apqns) - return -EINVAL; - - /* check key type and size */ - switch (ktype) { - case PKEY_TYPE_CCA_DATA: - case PKEY_TYPE_CCA_CIPHER: - if (*keybufsize < SECKEYBLOBSIZE) - return -EINVAL; - break; - case PKEY_TYPE_EP11: - if (*keybufsize < MINEP11AESKEYBLOBSIZE) - return -EINVAL; - break; - case PKEY_TYPE_EP11_AES: - if (*keybufsize < (sizeof(struct ep11kblob_header) + - MINEP11AESKEYBLOBSIZE)) - return -EINVAL; - break; - default: + u32 klen = KEYBLOBBUFSIZE; + struct pkey_clr2seck2 kcs; + struct pkey_apqn *apqns; + u8 *kkey; + int rc; + u32 u; + + if (copy_from_user(&kcs, ucs, sizeof(kcs))) + return -EFAULT; + u = pkey_aes_bitsize_to_keytype(kcs.size); + if (!u) { + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, kcs.size); + memzero_explicit(&kcs, sizeof(kcs)); return -EINVAL; } - switch (ksize) { - case PKEY_SIZE_AES_128: - case PKEY_SIZE_AES_192: - case PKEY_SIZE_AES_256: - break; - default: - return -EINVAL; + apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries); + if (IS_ERR(apqns)) { + memzero_explicit(&kcs, sizeof(kcs)); + return PTR_ERR(apqns); } - - zcrypt_wait_api_operational(); - - /* simple try all apqns from the list */ - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { - card = apqns[i].card; - dom = apqns[i].domain; - if (ktype == PKEY_TYPE_EP11 || - ktype == PKEY_TYPE_EP11_AES) { - rc = ep11_clr2keyblob(card, dom, ksize, kflags, - clrkey, keybuf, keybufsize, - ktype); - } else if (ktype == PKEY_TYPE_CCA_DATA) { - rc = cca_clr2seckey(card, dom, ksize, - clrkey, keybuf); - *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); - } else { - /* TOKVER_CCA_VLSC */ - rc = cca_clr2cipherkey(card, dom, ksize, kflags, - clrkey, keybuf, keybufsize); + kkey = kzalloc(klen, GFP_KERNEL); + if (!kkey) { + kfree(apqns); + memzero_explicit(&kcs, sizeof(kcs)); + return -ENOMEM; + } + rc = pkey_handler_clr_to_key(apqns, kcs.apqn_entries, + u, kcs.type, kcs.size, kcs.keygenflags, + kcs.clrkey.clrkey, kcs.size / 8, + kkey, &klen, NULL, 0); + pr_debug("clr_to_key()=%d\n", rc); + kfree(apqns); + if (rc) { + kfree_sensitive(kkey); + memzero_explicit(&kcs, sizeof(kcs)); + return rc; + } + if (kcs.key) { + if (kcs.keylen < klen) { + kfree_sensitive(kkey); + memzero_explicit(&kcs, sizeof(kcs)); + return -EINVAL; + } + if (copy_to_user(kcs.key, kkey, klen)) { + kfree_sensitive(kkey); + memzero_explicit(&kcs, sizeof(kcs)); + return -EFAULT; } - if (rc == 0) - break; } + kcs.keylen = klen; + if (copy_to_user(ucs, &kcs, sizeof(kcs))) + rc = -EFAULT; + memzero_explicit(&kcs, sizeof(kcs)); + kfree_sensitive(kkey); return rc; } -static int pkey_verifykey2(const u8 *key, size_t keylen, - u16 *cardnr, u16 *domain, - enum pkey_key_type *ktype, - enum pkey_key_size *ksize, u32 *flags) +static int pkey_ioctl_verifykey2(struct pkey_verifykey2 __user *uvk) { - struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 _nr_apqns, *_apqns = NULL; + struct pkey_verifykey2 kvk; + u8 *kkey; int rc; - if (keylen < sizeof(struct keytoken_header)) - return -EINVAL; - - if (hdr->type == TOKTYPE_CCA_INTERNAL && - hdr->version == TOKVER_CCA_AES) { - struct secaeskeytoken *t = (struct secaeskeytoken *)key; + if (copy_from_user(&kvk, uvk, sizeof(kvk))) + return -EFAULT; + kkey = _copy_key_from_user(kvk.key, kvk.keylen); + if (IS_ERR(kkey)) + return PTR_ERR(kkey); - rc = cca_check_secaeskeytoken(debug_info, 3, key, 0); - if (rc) - goto out; - if (ktype) - *ktype = PKEY_TYPE_CCA_DATA; - if (ksize) - *ksize = (enum pkey_key_size)t->bitsize; - - rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, - ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1); - if (rc == 0 && flags) - *flags = PKEY_FLAGS_MATCH_CUR_MKVP; - if (rc == -ENODEV) { - rc = cca_findcard2(&_apqns, &_nr_apqns, - *cardnr, *domain, - ZCRYPT_CEX3C, AES_MK_SET, - 0, t->mkvp, 1); - if (rc == 0 && flags) - *flags = PKEY_FLAGS_MATCH_ALT_MKVP; - } - if (rc) - goto out; - - *cardnr = ((struct pkey_apqn *)_apqns)->card; - *domain = ((struct pkey_apqn *)_apqns)->domain; - - } else if (hdr->type == TOKTYPE_CCA_INTERNAL && - hdr->version == TOKVER_CCA_VLSC) { - struct cipherkeytoken *t = (struct cipherkeytoken *)key; - - rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1); - if (rc) - goto out; - if (ktype) - *ktype = PKEY_TYPE_CCA_CIPHER; - if (ksize) { - *ksize = PKEY_SIZE_UNKNOWN; - if (!t->plfver && t->wpllen == 512) - *ksize = PKEY_SIZE_AES_128; - else if (!t->plfver && t->wpllen == 576) - *ksize = PKEY_SIZE_AES_192; - else if (!t->plfver && t->wpllen == 640) - *ksize = PKEY_SIZE_AES_256; - } - - rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, - ZCRYPT_CEX6, AES_MK_SET, t->mkvp0, 0, 1); - if (rc == 0 && flags) - *flags = PKEY_FLAGS_MATCH_CUR_MKVP; - if (rc == -ENODEV) { - rc = cca_findcard2(&_apqns, &_nr_apqns, - *cardnr, *domain, - ZCRYPT_CEX6, AES_MK_SET, - 0, t->mkvp0, 1); - if (rc == 0 && flags) - *flags = PKEY_FLAGS_MATCH_ALT_MKVP; - } - if (rc) - goto out; - - *cardnr = ((struct pkey_apqn *)_apqns)->card; - *domain = ((struct pkey_apqn *)_apqns)->domain; - - } else if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_AES) { - struct ep11keyblob *kb = (struct ep11keyblob *)key; - int api; - - rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); - if (rc) - goto out; - if (ktype) - *ktype = PKEY_TYPE_EP11; - if (ksize) - *ksize = kb->head.bitlen; - - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, - ZCRYPT_CEX7, api, - ep11_kb_wkvp(key, keylen)); - if (rc) - goto out; - - if (flags) - *flags = PKEY_FLAGS_MATCH_CUR_MKVP; - - *cardnr = ((struct pkey_apqn *)_apqns)->card; - *domain = ((struct pkey_apqn *)_apqns)->domain; + rc = pkey_handler_verify_key(kkey, kvk.keylen, + &kvk.cardnr, &kvk.domain, + &kvk.type, &kvk.size, &kvk.flags, 0); + pr_debug("verify_key()=%d\n", rc); - } else if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_AES_WITH_HEADER) { - struct ep11kblob_header *kh = (struct ep11kblob_header *)key; - int api; + kfree_sensitive(kkey); + if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk))) + return -EFAULT; - rc = ep11_check_aes_key_with_hdr(debug_info, 3, - key, keylen, 1); - if (rc) - goto out; - if (ktype) - *ktype = PKEY_TYPE_EP11_AES; - if (ksize) - *ksize = kh->bitlen; - - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, - ZCRYPT_CEX7, api, - ep11_kb_wkvp(key, keylen)); - if (rc) - goto out; + return rc; +} - if (flags) - *flags = PKEY_FLAGS_MATCH_CUR_MKVP; +static int pkey_ioctl_kblob2protk2(struct pkey_kblob2pkey2 __user *utp) +{ + struct pkey_apqn *apqns = NULL; + struct pkey_kblob2pkey2 ktp; + u8 *kkey; + int rc; - *cardnr = ((struct pkey_apqn *)_apqns)->card; - *domain = ((struct pkey_apqn *)_apqns)->domain; - } else { - rc = -EINVAL; + if (copy_from_user(&ktp, utp, sizeof(ktp))) + return -EFAULT; + apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); + if (IS_ERR(apqns)) + return PTR_ERR(apqns); + kkey = _copy_key_from_user(ktp.key, ktp.keylen); + if (IS_ERR(kkey)) { + kfree(apqns); + return PTR_ERR(kkey); } + ktp.protkey.len = sizeof(ktp.protkey.protkey); + rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, + ktp.protkey.protkey, &ktp.protkey.len, + &ktp.protkey.type, 0); + pr_debug("key2protkey()=%d\n", rc); + kfree(apqns); + kfree_sensitive(kkey); + if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) + rc = -EFAULT; + memzero_explicit(&ktp, sizeof(ktp)); -out: - kfree(_apqns); return rc; } -static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, - const u8 *key, size_t keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_apqns4k(struct pkey_apqns4key __user *uak) { - struct keytoken_header *hdr = (struct keytoken_header *)key; - int i, card, dom, rc; - - /* check for at least one apqn given */ - if (!apqns || !nr_apqns) - return -EINVAL; - - if (keylen < sizeof(struct keytoken_header)) - return -EINVAL; + struct pkey_apqn *apqns = NULL; + struct pkey_apqns4key kak; + size_t nr_apqns, len; + u8 *kkey; + int rc; - if (hdr->type == TOKTYPE_CCA_INTERNAL) { - if (hdr->version == TOKVER_CCA_AES) { - if (keylen != sizeof(struct secaeskeytoken)) - return -EINVAL; - if (cca_check_secaeskeytoken(debug_info, 3, key, 0)) - return -EINVAL; - } else if (hdr->version == TOKVER_CCA_VLSC) { - if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) - return -EINVAL; - if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1)) - return -EINVAL; - } else { - DEBUG_ERR("%s unknown CCA internal token version %d\n", - __func__, hdr->version); + if (copy_from_user(&kak, uak, sizeof(kak))) + return -EFAULT; + nr_apqns = kak.apqn_entries; + if (nr_apqns) { + apqns = kmalloc_array(nr_apqns, + sizeof(struct pkey_apqn), + GFP_KERNEL); + if (!apqns) + return -ENOMEM; + } + kkey = _copy_key_from_user(kak.key, kak.keylen); + if (IS_ERR(kkey)) { + kfree(apqns); + return PTR_ERR(kkey); + } + rc = pkey_handler_apqns_for_key(kkey, kak.keylen, kak.flags, + apqns, &nr_apqns, 0); + pr_debug("apqns_for_key()=%d\n", rc); + kfree_sensitive(kkey); + if (rc && rc != -ENOSPC) { + kfree(apqns); + return rc; + } + if (!rc && kak.apqns) { + if (nr_apqns > kak.apqn_entries) { + kfree(apqns); return -EINVAL; } - } else if (hdr->type == TOKTYPE_NON_CCA) { - if (hdr->version == TOKVER_EP11_AES) { - if (ep11_check_aes_key(debug_info, 3, key, keylen, 1)) - return -EINVAL; - } else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) { - if (ep11_check_aes_key_with_hdr(debug_info, 3, - key, keylen, 1)) - return -EINVAL; - } else { - return pkey_nonccatok2pkey(key, keylen, - protkey, protkeylen, - protkeytype); - } - } else { - DEBUG_ERR("%s unknown/unsupported blob type %d\n", - __func__, hdr->type); - return -EINVAL; - } - - zcrypt_wait_api_operational(); - - /* simple try all apqns from the list */ - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { - card = apqns[i].card; - dom = apqns[i].domain; - if (hdr->type == TOKTYPE_CCA_INTERNAL && - hdr->version == TOKVER_CCA_AES) { - rc = cca_sec2protkey(card, dom, key, - protkey, protkeylen, protkeytype); - } else if (hdr->type == TOKTYPE_CCA_INTERNAL && - hdr->version == TOKVER_CCA_VLSC) { - rc = cca_cipher2protkey(card, dom, key, - protkey, protkeylen, - protkeytype); - } else { - rc = ep11_kblob2protkey(card, dom, key, keylen, - protkey, protkeylen, - protkeytype); + len = nr_apqns * sizeof(struct pkey_apqn); + if (len) { + if (copy_to_user(kak.apqns, apqns, len)) { + kfree(apqns); + return -EFAULT; + } } - if (rc == 0) - break; } + kak.apqn_entries = nr_apqns; + if (copy_to_user(uak, &kak, sizeof(kak))) + rc = -EFAULT; + kfree(apqns); return rc; } -static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) +static int pkey_ioctl_apqns4kt(struct pkey_apqns4keytype __user *uat) { - struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 _nr_apqns, *_apqns = NULL; + struct pkey_apqn *apqns = NULL; + struct pkey_apqns4keytype kat; + size_t nr_apqns, len; int rc; - if (keylen < sizeof(struct keytoken_header) || flags == 0) - return -EINVAL; - - zcrypt_wait_api_operational(); - - if (hdr->type == TOKTYPE_NON_CCA && - (hdr->version == TOKVER_EP11_AES_WITH_HEADER || - hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && - is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { - struct ep11keyblob *kb = (struct ep11keyblob *) - (key + sizeof(struct ep11kblob_header)); - int minhwtype = 0, api = 0; - - if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) - return -EINVAL; - if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { - minhwtype = ZCRYPT_CEX7; - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - } - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, api, kb->wkvp); - if (rc) - goto out; - } else if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_AES && - is_ep11_keyblob(key)) { - struct ep11keyblob *kb = (struct ep11keyblob *)key; - int minhwtype = 0, api = 0; - - if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) - return -EINVAL; - if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { - minhwtype = ZCRYPT_CEX7; - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - } - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, api, kb->wkvp); - if (rc) - goto out; - } else if (hdr->type == TOKTYPE_CCA_INTERNAL) { - u64 cur_mkvp = 0, old_mkvp = 0; - int minhwtype = ZCRYPT_CEX3C; - - if (hdr->version == TOKVER_CCA_AES) { - struct secaeskeytoken *t = (struct secaeskeytoken *)key; - - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - cur_mkvp = t->mkvp; - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) - old_mkvp = t->mkvp; - } else if (hdr->version == TOKVER_CCA_VLSC) { - struct cipherkeytoken *t = (struct cipherkeytoken *)key; - - minhwtype = ZCRYPT_CEX6; - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - cur_mkvp = t->mkvp0; - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) - old_mkvp = t->mkvp0; - } else { - /* unknown cca internal token type */ + if (copy_from_user(&kat, uat, sizeof(kat))) + return -EFAULT; + nr_apqns = kat.apqn_entries; + if (nr_apqns) { + apqns = kmalloc_array(nr_apqns, + sizeof(struct pkey_apqn), + GFP_KERNEL); + if (!apqns) + return -ENOMEM; + } + rc = pkey_handler_apqns_for_keytype(kat.type, + kat.cur_mkvp, kat.alt_mkvp, + kat.flags, apqns, &nr_apqns, 0); + pr_debug("apqns_for_keytype()=%d\n", rc); + if (rc && rc != -ENOSPC) { + kfree(apqns); + return rc; + } + if (!rc && kat.apqns) { + if (nr_apqns > kat.apqn_entries) { + kfree(apqns); return -EINVAL; } - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, AES_MK_SET, - cur_mkvp, old_mkvp, 1); - if (rc) - goto out; - } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { - struct eccprivkeytoken *t = (struct eccprivkeytoken *)key; - u64 cur_mkvp = 0, old_mkvp = 0; - - if (t->secid == 0x20) { - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - cur_mkvp = t->mkvp; - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) - old_mkvp = t->mkvp; - } else { - /* unknown cca internal 2 token type */ - return -EINVAL; + len = nr_apqns * sizeof(struct pkey_apqn); + if (len) { + if (copy_to_user(kat.apqns, apqns, len)) { + kfree(apqns); + return -EFAULT; + } } - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, APKA_MK_SET, - cur_mkvp, old_mkvp, 1); - if (rc) - goto out; - } else { - return -EINVAL; } + kat.apqn_entries = nr_apqns; + if (copy_to_user(uat, &kat, sizeof(kat))) + rc = -EFAULT; + kfree(apqns); - if (apqns) { - if (*nr_apqns < _nr_apqns) - rc = -ENOSPC; - else - memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); - } - *nr_apqns = _nr_apqns; - -out: - kfree(_apqns); return rc; } -static int pkey_apqns4keytype(enum pkey_key_type ktype, - u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) +static int pkey_ioctl_kblob2protk3(struct pkey_kblob2pkey3 __user *utp) { - u32 _nr_apqns, *_apqns = NULL; + u32 protkeylen = PROTKEYBLOBBUFSIZE; + struct pkey_apqn *apqns = NULL; + struct pkey_kblob2pkey3 ktp; + u8 *kkey, *protkey; int rc; - zcrypt_wait_api_operational(); - - if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) { - u64 cur_mkvp = 0, old_mkvp = 0; - int minhwtype = ZCRYPT_CEX3C; - - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - cur_mkvp = *((u64 *)cur_mkvp); - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) - old_mkvp = *((u64 *)alt_mkvp); - if (ktype == PKEY_TYPE_CCA_CIPHER) - minhwtype = ZCRYPT_CEX6; - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, AES_MK_SET, - cur_mkvp, old_mkvp, 1); - if (rc) - goto out; - } else if (ktype == PKEY_TYPE_CCA_ECC) { - u64 cur_mkvp = 0, old_mkvp = 0; - - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - cur_mkvp = *((u64 *)cur_mkvp); - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) - old_mkvp = *((u64 *)alt_mkvp); - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, APKA_MK_SET, - cur_mkvp, old_mkvp, 1); - if (rc) - goto out; - - } else if (ktype == PKEY_TYPE_EP11 || - ktype == PKEY_TYPE_EP11_AES || - ktype == PKEY_TYPE_EP11_ECC) { - u8 *wkvp = NULL; - int api; - - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - wkvp = cur_mkvp; - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, api, wkvp); - if (rc) - goto out; - - } else { - return -EINVAL; + if (copy_from_user(&ktp, utp, sizeof(ktp))) + return -EFAULT; + apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); + if (IS_ERR(apqns)) + return PTR_ERR(apqns); + kkey = _copy_key_from_user(ktp.key, ktp.keylen); + if (IS_ERR(kkey)) { + kfree(apqns); + return PTR_ERR(kkey); } - - if (apqns) { - if (*nr_apqns < _nr_apqns) - rc = -ENOSPC; - else - memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); + protkey = kmalloc(protkeylen, GFP_KERNEL); + if (!protkey) { + kfree(apqns); + kfree_sensitive(kkey); + return -ENOMEM; } - *nr_apqns = _nr_apqns; - -out: - kfree(_apqns); - return rc; -} - -static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, - const u8 *key, size_t keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) -{ - struct keytoken_header *hdr = (struct keytoken_header *)key; - int i, card, dom, rc; - - /* check for at least one apqn given */ - if (!apqns || !nr_apqns) - return -EINVAL; - - if (keylen < sizeof(struct keytoken_header)) - return -EINVAL; - - if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_AES_WITH_HEADER && - is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { - /* EP11 AES key blob with header */ - if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1)) - return -EINVAL; - } else if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_ECC_WITH_HEADER && - is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { - /* EP11 ECC key blob with header */ - if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1)) - return -EINVAL; - } else if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_AES && - is_ep11_keyblob(key)) { - /* EP11 AES key blob with header in session field */ - if (ep11_check_aes_key(debug_info, 3, key, keylen, 1)) - return -EINVAL; - } else if (hdr->type == TOKTYPE_CCA_INTERNAL) { - if (hdr->version == TOKVER_CCA_AES) { - /* CCA AES data key */ - if (keylen != sizeof(struct secaeskeytoken)) - return -EINVAL; - if (cca_check_secaeskeytoken(debug_info, 3, key, 0)) - return -EINVAL; - } else if (hdr->version == TOKVER_CCA_VLSC) { - /* CCA AES cipher key */ - if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) - return -EINVAL; - if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1)) - return -EINVAL; - } else { - DEBUG_ERR("%s unknown CCA internal token version %d\n", - __func__, hdr->version); - return -EINVAL; - } - } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { - /* CCA ECC (private) key */ - if (keylen < sizeof(struct eccprivkeytoken)) - return -EINVAL; - if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1)) - return -EINVAL; - } else if (hdr->type == TOKTYPE_NON_CCA) { - return pkey_nonccatok2pkey(key, keylen, - protkey, protkeylen, protkeytype); - } else { - DEBUG_ERR("%s unknown/unsupported blob type %d\n", - __func__, hdr->type); - return -EINVAL; + rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, + protkey, &protkeylen, &ktp.pkeytype, 0); + pr_debug("key2protkey()=%d\n", rc); + kfree(apqns); + kfree_sensitive(kkey); + if (rc) { + kfree_sensitive(protkey); + return rc; } - - /* simple try all apqns from the list */ - for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { - card = apqns[i].card; - dom = apqns[i].domain; - if (hdr->type == TOKTYPE_NON_CCA && - (hdr->version == TOKVER_EP11_AES_WITH_HEADER || - hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && - is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) - rc = ep11_kblob2protkey(card, dom, key, hdr->len, - protkey, protkeylen, - protkeytype); - else if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_AES && - is_ep11_keyblob(key)) - rc = ep11_kblob2protkey(card, dom, key, hdr->len, - protkey, protkeylen, - protkeytype); - else if (hdr->type == TOKTYPE_CCA_INTERNAL && - hdr->version == TOKVER_CCA_AES) - rc = cca_sec2protkey(card, dom, key, protkey, - protkeylen, protkeytype); - else if (hdr->type == TOKTYPE_CCA_INTERNAL && - hdr->version == TOKVER_CCA_VLSC) - rc = cca_cipher2protkey(card, dom, key, protkey, - protkeylen, protkeytype); - else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) - rc = cca_ecc2protkey(card, dom, key, protkey, - protkeylen, protkeytype); - else + if (ktp.pkey && ktp.pkeylen) { + if (protkeylen > ktp.pkeylen) { + kfree_sensitive(protkey); return -EINVAL; + } + if (copy_to_user(ktp.pkey, protkey, protkeylen)) { + kfree_sensitive(protkey); + return -EFAULT; + } } + kfree_sensitive(protkey); + ktp.pkeylen = protkeylen; + if (copy_to_user(utp, &ktp, sizeof(ktp))) + return -EFAULT; - return rc; -} - -/* - * File io functions - */ - -static void *_copy_key_from_user(void __user *ukey, size_t keylen) -{ - if (!ukey || keylen < MINKEYBLOBBUFSIZE || keylen > KEYBLOBBUFSIZE) - return ERR_PTR(-EINVAL); - - return memdup_user(ukey, keylen); -} - -static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns) -{ - if (!uapqns || nr_apqns == 0) - return NULL; - - return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn)); + return 0; } static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, @@ -1342,445 +712,57 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, int rc; switch (cmd) { - case PKEY_GENSECK: { - struct pkey_genseck __user *ugs = (void __user *)arg; - struct pkey_genseck kgs; - - if (copy_from_user(&kgs, ugs, sizeof(kgs))) - return -EFAULT; - rc = cca_genseckey(kgs.cardnr, kgs.domain, - kgs.keytype, kgs.seckey.seckey); - DEBUG_DBG("%s cca_genseckey()=%d\n", __func__, rc); - if (rc) - break; - if (copy_to_user(ugs, &kgs, sizeof(kgs))) - return -EFAULT; + case PKEY_GENSECK: + rc = pkey_ioctl_genseck((struct pkey_genseck __user *)arg); break; - } - case PKEY_CLR2SECK: { - struct pkey_clr2seck __user *ucs = (void __user *)arg; - struct pkey_clr2seck kcs; - - if (copy_from_user(&kcs, ucs, sizeof(kcs))) - return -EFAULT; - rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype, - kcs.clrkey.clrkey, kcs.seckey.seckey); - DEBUG_DBG("%s cca_clr2seckey()=%d\n", __func__, rc); - if (rc) - break; - if (copy_to_user(ucs, &kcs, sizeof(kcs))) - return -EFAULT; - memzero_explicit(&kcs, sizeof(kcs)); + case PKEY_CLR2SECK: + rc = pkey_ioctl_clr2seck((struct pkey_clr2seck __user *)arg); break; - } - case PKEY_SEC2PROTK: { - struct pkey_sec2protk __user *usp = (void __user *)arg; - struct pkey_sec2protk ksp; - - if (copy_from_user(&ksp, usp, sizeof(ksp))) - return -EFAULT; - ksp.protkey.len = sizeof(ksp.protkey.protkey); - rc = cca_sec2protkey(ksp.cardnr, ksp.domain, - ksp.seckey.seckey, ksp.protkey.protkey, - &ksp.protkey.len, &ksp.protkey.type); - DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc); - if (rc) - break; - if (copy_to_user(usp, &ksp, sizeof(ksp))) - return -EFAULT; + case PKEY_SEC2PROTK: + rc = pkey_ioctl_sec2protk((struct pkey_sec2protk __user *)arg); break; - } - case PKEY_CLR2PROTK: { - struct pkey_clr2protk __user *ucp = (void __user *)arg; - struct pkey_clr2protk kcp; - - if (copy_from_user(&kcp, ucp, sizeof(kcp))) - return -EFAULT; - kcp.protkey.len = sizeof(kcp.protkey.protkey); - rc = pkey_clr2protkey(kcp.keytype, kcp.clrkey.clrkey, - kcp.protkey.protkey, - &kcp.protkey.len, &kcp.protkey.type); - DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc); - if (rc) - break; - if (copy_to_user(ucp, &kcp, sizeof(kcp))) - return -EFAULT; - memzero_explicit(&kcp, sizeof(kcp)); + case PKEY_CLR2PROTK: + rc = pkey_ioctl_clr2protk((struct pkey_clr2protk __user *)arg); break; - } - case PKEY_FINDCARD: { - struct pkey_findcard __user *ufc = (void __user *)arg; - struct pkey_findcard kfc; - - if (copy_from_user(&kfc, ufc, sizeof(kfc))) - return -EFAULT; - rc = cca_findcard(kfc.seckey.seckey, - &kfc.cardnr, &kfc.domain, 1); - DEBUG_DBG("%s cca_findcard()=%d\n", __func__, rc); - if (rc < 0) - break; - if (copy_to_user(ufc, &kfc, sizeof(kfc))) - return -EFAULT; + case PKEY_FINDCARD: + rc = pkey_ioctl_findcard((struct pkey_findcard __user *)arg); break; - } - case PKEY_SKEY2PKEY: { - struct pkey_skey2pkey __user *usp = (void __user *)arg; - struct pkey_skey2pkey ksp; - - if (copy_from_user(&ksp, usp, sizeof(ksp))) - return -EFAULT; - ksp.protkey.len = sizeof(ksp.protkey.protkey); - rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey, - &ksp.protkey.len, &ksp.protkey.type); - DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc); - if (rc) - break; - if (copy_to_user(usp, &ksp, sizeof(ksp))) - return -EFAULT; + case PKEY_SKEY2PKEY: + rc = pkey_ioctl_skey2pkey((struct pkey_skey2pkey __user *)arg); break; - } - case PKEY_VERIFYKEY: { - struct pkey_verifykey __user *uvk = (void __user *)arg; - struct pkey_verifykey kvk; - - if (copy_from_user(&kvk, uvk, sizeof(kvk))) - return -EFAULT; - rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain, - &kvk.keysize, &kvk.attributes); - DEBUG_DBG("%s pkey_verifykey()=%d\n", __func__, rc); - if (rc) - break; - if (copy_to_user(uvk, &kvk, sizeof(kvk))) - return -EFAULT; + case PKEY_VERIFYKEY: + rc = pkey_ioctl_verifykey((struct pkey_verifykey __user *)arg); break; - } - case PKEY_GENPROTK: { - struct pkey_genprotk __user *ugp = (void __user *)arg; - struct pkey_genprotk kgp; - - if (copy_from_user(&kgp, ugp, sizeof(kgp))) - return -EFAULT; - kgp.protkey.len = sizeof(kgp.protkey.protkey); - rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey, - &kgp.protkey.len, &kgp.protkey.type); - DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc); - if (rc) - break; - if (copy_to_user(ugp, &kgp, sizeof(kgp))) - return -EFAULT; + case PKEY_GENPROTK: + rc = pkey_ioctl_genprotk((struct pkey_genprotk __user *)arg); break; - } - case PKEY_VERIFYPROTK: { - struct pkey_verifyprotk __user *uvp = (void __user *)arg; - struct pkey_verifyprotk kvp; - - if (copy_from_user(&kvp, uvp, sizeof(kvp))) - return -EFAULT; - rc = pkey_verifyprotkey(kvp.protkey.protkey, - kvp.protkey.len, kvp.protkey.type); - DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc); + case PKEY_VERIFYPROTK: + rc = pkey_ioctl_verifyprotk((struct pkey_verifyprotk __user *)arg); break; - } - case PKEY_KBLOB2PROTK: { - struct pkey_kblob2pkey __user *utp = (void __user *)arg; - struct pkey_kblob2pkey ktp; - u8 *kkey; - - if (copy_from_user(&ktp, utp, sizeof(ktp))) - return -EFAULT; - kkey = _copy_key_from_user(ktp.key, ktp.keylen); - if (IS_ERR(kkey)) - return PTR_ERR(kkey); - ktp.protkey.len = sizeof(ktp.protkey.protkey); - rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey, - &ktp.protkey.len, &ktp.protkey.type); - DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc); - memzero_explicit(kkey, ktp.keylen); - kfree(kkey); - if (rc) - break; - if (copy_to_user(utp, &ktp, sizeof(ktp))) - return -EFAULT; + case PKEY_KBLOB2PROTK: + rc = pkey_ioctl_kblob2protk((struct pkey_kblob2pkey __user *)arg); break; - } - case PKEY_GENSECK2: { - struct pkey_genseck2 __user *ugs = (void __user *)arg; - size_t klen = KEYBLOBBUFSIZE; - struct pkey_genseck2 kgs; - struct pkey_apqn *apqns; - u8 *kkey; - - if (copy_from_user(&kgs, ugs, sizeof(kgs))) - return -EFAULT; - apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries); - if (IS_ERR(apqns)) - return PTR_ERR(apqns); - kkey = kzalloc(klen, GFP_KERNEL); - if (!kkey) { - kfree(apqns); - return -ENOMEM; - } - rc = pkey_genseckey2(apqns, kgs.apqn_entries, - kgs.type, kgs.size, kgs.keygenflags, - kkey, &klen); - DEBUG_DBG("%s pkey_genseckey2()=%d\n", __func__, rc); - kfree(apqns); - if (rc) { - kfree(kkey); - break; - } - if (kgs.key) { - if (kgs.keylen < klen) { - kfree(kkey); - return -EINVAL; - } - if (copy_to_user(kgs.key, kkey, klen)) { - kfree(kkey); - return -EFAULT; - } - } - kgs.keylen = klen; - if (copy_to_user(ugs, &kgs, sizeof(kgs))) - rc = -EFAULT; - kfree(kkey); + case PKEY_GENSECK2: + rc = pkey_ioctl_genseck2((struct pkey_genseck2 __user *)arg); break; - } - case PKEY_CLR2SECK2: { - struct pkey_clr2seck2 __user *ucs = (void __user *)arg; - size_t klen = KEYBLOBBUFSIZE; - struct pkey_clr2seck2 kcs; - struct pkey_apqn *apqns; - u8 *kkey; - - if (copy_from_user(&kcs, ucs, sizeof(kcs))) - return -EFAULT; - apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries); - if (IS_ERR(apqns)) - return PTR_ERR(apqns); - kkey = kzalloc(klen, GFP_KERNEL); - if (!kkey) { - kfree(apqns); - return -ENOMEM; - } - rc = pkey_clr2seckey2(apqns, kcs.apqn_entries, - kcs.type, kcs.size, kcs.keygenflags, - kcs.clrkey.clrkey, kkey, &klen); - DEBUG_DBG("%s pkey_clr2seckey2()=%d\n", __func__, rc); - kfree(apqns); - if (rc) { - kfree(kkey); - break; - } - if (kcs.key) { - if (kcs.keylen < klen) { - kfree(kkey); - return -EINVAL; - } - if (copy_to_user(kcs.key, kkey, klen)) { - kfree(kkey); - return -EFAULT; - } - } - kcs.keylen = klen; - if (copy_to_user(ucs, &kcs, sizeof(kcs))) - rc = -EFAULT; - memzero_explicit(&kcs, sizeof(kcs)); - kfree(kkey); + case PKEY_CLR2SECK2: + rc = pkey_ioctl_clr2seck2((struct pkey_clr2seck2 __user *)arg); break; - } - case PKEY_VERIFYKEY2: { - struct pkey_verifykey2 __user *uvk = (void __user *)arg; - struct pkey_verifykey2 kvk; - u8 *kkey; - - if (copy_from_user(&kvk, uvk, sizeof(kvk))) - return -EFAULT; - kkey = _copy_key_from_user(kvk.key, kvk.keylen); - if (IS_ERR(kkey)) - return PTR_ERR(kkey); - rc = pkey_verifykey2(kkey, kvk.keylen, - &kvk.cardnr, &kvk.domain, - &kvk.type, &kvk.size, &kvk.flags); - DEBUG_DBG("%s pkey_verifykey2()=%d\n", __func__, rc); - kfree(kkey); - if (rc) - break; - if (copy_to_user(uvk, &kvk, sizeof(kvk))) - return -EFAULT; + case PKEY_VERIFYKEY2: + rc = pkey_ioctl_verifykey2((struct pkey_verifykey2 __user *)arg); break; - } - case PKEY_KBLOB2PROTK2: { - struct pkey_kblob2pkey2 __user *utp = (void __user *)arg; - struct pkey_apqn *apqns = NULL; - struct pkey_kblob2pkey2 ktp; - u8 *kkey; - - if (copy_from_user(&ktp, utp, sizeof(ktp))) - return -EFAULT; - apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); - if (IS_ERR(apqns)) - return PTR_ERR(apqns); - kkey = _copy_key_from_user(ktp.key, ktp.keylen); - if (IS_ERR(kkey)) { - kfree(apqns); - return PTR_ERR(kkey); - } - ktp.protkey.len = sizeof(ktp.protkey.protkey); - rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries, - kkey, ktp.keylen, - ktp.protkey.protkey, &ktp.protkey.len, - &ktp.protkey.type); - DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc); - kfree(apqns); - memzero_explicit(kkey, ktp.keylen); - kfree(kkey); - if (rc) - break; - if (copy_to_user(utp, &ktp, sizeof(ktp))) - return -EFAULT; + case PKEY_KBLOB2PROTK2: + rc = pkey_ioctl_kblob2protk2((struct pkey_kblob2pkey2 __user *)arg); break; - } - case PKEY_APQNS4K: { - struct pkey_apqns4key __user *uak = (void __user *)arg; - struct pkey_apqn *apqns = NULL; - struct pkey_apqns4key kak; - size_t nr_apqns, len; - u8 *kkey; - - if (copy_from_user(&kak, uak, sizeof(kak))) - return -EFAULT; - nr_apqns = kak.apqn_entries; - if (nr_apqns) { - apqns = kmalloc_array(nr_apqns, - sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!apqns) - return -ENOMEM; - } - kkey = _copy_key_from_user(kak.key, kak.keylen); - if (IS_ERR(kkey)) { - kfree(apqns); - return PTR_ERR(kkey); - } - rc = pkey_apqns4key(kkey, kak.keylen, kak.flags, - apqns, &nr_apqns); - DEBUG_DBG("%s pkey_apqns4key()=%d\n", __func__, rc); - kfree(kkey); - if (rc && rc != -ENOSPC) { - kfree(apqns); - break; - } - if (!rc && kak.apqns) { - if (nr_apqns > kak.apqn_entries) { - kfree(apqns); - return -EINVAL; - } - len = nr_apqns * sizeof(struct pkey_apqn); - if (len) { - if (copy_to_user(kak.apqns, apqns, len)) { - kfree(apqns); - return -EFAULT; - } - } - } - kak.apqn_entries = nr_apqns; - if (copy_to_user(uak, &kak, sizeof(kak))) - rc = -EFAULT; - kfree(apqns); + case PKEY_APQNS4K: + rc = pkey_ioctl_apqns4k((struct pkey_apqns4key __user *)arg); break; - } - case PKEY_APQNS4KT: { - struct pkey_apqns4keytype __user *uat = (void __user *)arg; - struct pkey_apqn *apqns = NULL; - struct pkey_apqns4keytype kat; - size_t nr_apqns, len; - - if (copy_from_user(&kat, uat, sizeof(kat))) - return -EFAULT; - nr_apqns = kat.apqn_entries; - if (nr_apqns) { - apqns = kmalloc_array(nr_apqns, - sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!apqns) - return -ENOMEM; - } - rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp, - kat.flags, apqns, &nr_apqns); - DEBUG_DBG("%s pkey_apqns4keytype()=%d\n", __func__, rc); - if (rc && rc != -ENOSPC) { - kfree(apqns); - break; - } - if (!rc && kat.apqns) { - if (nr_apqns > kat.apqn_entries) { - kfree(apqns); - return -EINVAL; - } - len = nr_apqns * sizeof(struct pkey_apqn); - if (len) { - if (copy_to_user(kat.apqns, apqns, len)) { - kfree(apqns); - return -EFAULT; - } - } - } - kat.apqn_entries = nr_apqns; - if (copy_to_user(uat, &kat, sizeof(kat))) - rc = -EFAULT; - kfree(apqns); + case PKEY_APQNS4KT: + rc = pkey_ioctl_apqns4kt((struct pkey_apqns4keytype __user *)arg); break; - } - case PKEY_KBLOB2PROTK3: { - struct pkey_kblob2pkey3 __user *utp = (void __user *)arg; - u32 protkeylen = PROTKEYBLOBBUFSIZE; - struct pkey_apqn *apqns = NULL; - struct pkey_kblob2pkey3 ktp; - u8 *kkey, *protkey; - - if (copy_from_user(&ktp, utp, sizeof(ktp))) - return -EFAULT; - apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); - if (IS_ERR(apqns)) - return PTR_ERR(apqns); - kkey = _copy_key_from_user(ktp.key, ktp.keylen); - if (IS_ERR(kkey)) { - kfree(apqns); - return PTR_ERR(kkey); - } - protkey = kmalloc(protkeylen, GFP_KERNEL); - if (!protkey) { - kfree(apqns); - kfree(kkey); - return -ENOMEM; - } - rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, - kkey, ktp.keylen, - protkey, &protkeylen, &ktp.pkeytype); - DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc); - kfree(apqns); - memzero_explicit(kkey, ktp.keylen); - kfree(kkey); - if (rc) { - kfree(protkey); - break; - } - if (ktp.pkey && ktp.pkeylen) { - if (protkeylen > ktp.pkeylen) { - kfree(protkey); - return -EINVAL; - } - if (copy_to_user(ktp.pkey, protkey, protkeylen)) { - kfree(protkey); - return -EFAULT; - } - } - kfree(protkey); - ktp.pkeylen = protkeylen; - if (copy_to_user(utp, &ktp, sizeof(ktp))) - return -EFAULT; + case PKEY_KBLOB2PROTK3: + rc = pkey_ioctl_kblob2protk3((struct pkey_kblob2pkey3 __user *)arg); break; - } default: /* unknown/unsupported ioctl cmd */ return -ENOTTY; @@ -1790,499 +772,12 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, } /* - * Sysfs and file io operations - */ - -/* - * Sysfs attribute read function for all protected key binary attributes. - * The implementation can not deal with partial reads, because a new random - * protected key blob is generated with each read. In case of partial reads - * (i.e. off != 0 or count < key blob size) -EINVAL is returned. - */ -static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf, - loff_t off, size_t count) -{ - struct protaeskeytoken protkeytoken; - struct pkey_protkey protkey; - int rc; - - if (off != 0 || count < sizeof(protkeytoken)) - return -EINVAL; - if (is_xts) - if (count < 2 * sizeof(protkeytoken)) - return -EINVAL; - - memset(&protkeytoken, 0, sizeof(protkeytoken)); - protkeytoken.type = TOKTYPE_NON_CCA; - protkeytoken.version = TOKVER_PROTECTED_KEY; - protkeytoken.keytype = keytype; - - protkey.len = sizeof(protkey.protkey); - rc = pkey_genprotkey(protkeytoken.keytype, - protkey.protkey, &protkey.len, &protkey.type); - if (rc) - return rc; - - protkeytoken.len = protkey.len; - memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); - - memcpy(buf, &protkeytoken, sizeof(protkeytoken)); - - if (is_xts) { - /* xts needs a second protected key, reuse protkey struct */ - protkey.len = sizeof(protkey.protkey); - rc = pkey_genprotkey(protkeytoken.keytype, - protkey.protkey, &protkey.len, &protkey.type); - if (rc) - return rc; - - protkeytoken.len = protkey.len; - memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); - - memcpy(buf + sizeof(protkeytoken), &protkeytoken, - sizeof(protkeytoken)); - - return 2 * sizeof(protkeytoken); - } - - return sizeof(protkeytoken); -} - -static ssize_t protkey_aes_128_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, - off, count); -} - -static ssize_t protkey_aes_192_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, - off, count); -} - -static ssize_t protkey_aes_256_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, - off, count); -} - -static ssize_t protkey_aes_128_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, - off, count); -} - -static ssize_t protkey_aes_256_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, - off, count); -} - -static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken)); -static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken)); -static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken)); -static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken)); -static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken)); - -static struct bin_attribute *protkey_attrs[] = { - &bin_attr_protkey_aes_128, - &bin_attr_protkey_aes_192, - &bin_attr_protkey_aes_256, - &bin_attr_protkey_aes_128_xts, - &bin_attr_protkey_aes_256_xts, - NULL -}; - -static struct attribute_group protkey_attr_group = { - .name = "protkey", - .bin_attrs = protkey_attrs, -}; - -/* - * Sysfs attribute read function for all secure key ccadata binary attributes. - * The implementation can not deal with partial reads, because a new random - * protected key blob is generated with each read. In case of partial reads - * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + * File io operations */ -static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf, - loff_t off, size_t count) -{ - struct pkey_seckey *seckey = (struct pkey_seckey *)buf; - int rc; - - if (off != 0 || count < sizeof(struct secaeskeytoken)) - return -EINVAL; - if (is_xts) - if (count < 2 * sizeof(struct secaeskeytoken)) - return -EINVAL; - - rc = cca_genseckey(-1, -1, keytype, seckey->seckey); - if (rc) - return rc; - - if (is_xts) { - seckey++; - rc = cca_genseckey(-1, -1, keytype, seckey->seckey); - if (rc) - return rc; - - return 2 * sizeof(struct secaeskeytoken); - } - - return sizeof(struct secaeskeytoken); -} - -static ssize_t ccadata_aes_128_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, - off, count); -} - -static ssize_t ccadata_aes_192_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, - off, count); -} - -static ssize_t ccadata_aes_256_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, - off, count); -} - -static ssize_t ccadata_aes_128_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, - off, count); -} - -static ssize_t ccadata_aes_256_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, - off, count); -} - -static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken)); -static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken)); -static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken)); -static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken)); -static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken)); - -static struct bin_attribute *ccadata_attrs[] = { - &bin_attr_ccadata_aes_128, - &bin_attr_ccadata_aes_192, - &bin_attr_ccadata_aes_256, - &bin_attr_ccadata_aes_128_xts, - &bin_attr_ccadata_aes_256_xts, - NULL -}; - -static struct attribute_group ccadata_attr_group = { - .name = "ccadata", - .bin_attrs = ccadata_attrs, -}; - -#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80) - -/* - * Sysfs attribute read function for all secure key ccacipher binary attributes. - * The implementation can not deal with partial reads, because a new random - * secure key blob is generated with each read. In case of partial reads - * (i.e. off != 0 or count < key blob size) -EINVAL is returned. - */ -static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits, - bool is_xts, char *buf, loff_t off, - size_t count) -{ - size_t keysize = CCACIPHERTOKENSIZE; - u32 nr_apqns, *apqns = NULL; - int i, rc, card, dom; - - if (off != 0 || count < CCACIPHERTOKENSIZE) - return -EINVAL; - if (is_xts) - if (count < 2 * CCACIPHERTOKENSIZE) - return -EINVAL; - - /* build a list of apqns able to generate an cipher key */ - rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX6, 0, 0, 0, 0); - if (rc) - return rc; - - memset(buf, 0, is_xts ? 2 * keysize : keysize); - - /* simple try all apqns from the list */ - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { - card = apqns[i] >> 16; - dom = apqns[i] & 0xFFFF; - rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize); - if (rc == 0) - break; - } - if (rc) - return rc; - - if (is_xts) { - keysize = CCACIPHERTOKENSIZE; - buf += CCACIPHERTOKENSIZE; - rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize); - if (rc == 0) - return 2 * CCACIPHERTOKENSIZE; - } - - return CCACIPHERTOKENSIZE; -} - -static ssize_t ccacipher_aes_128_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf, - off, count); -} - -static ssize_t ccacipher_aes_192_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf, - off, count); -} - -static ssize_t ccacipher_aes_256_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf, - off, count); -} - -static ssize_t ccacipher_aes_128_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf, - off, count); -} - -static ssize_t ccacipher_aes_256_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf, - off, count); -} - -static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE); -static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE); -static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE); -static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE); -static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE); - -static struct bin_attribute *ccacipher_attrs[] = { - &bin_attr_ccacipher_aes_128, - &bin_attr_ccacipher_aes_192, - &bin_attr_ccacipher_aes_256, - &bin_attr_ccacipher_aes_128_xts, - &bin_attr_ccacipher_aes_256_xts, - NULL -}; - -static struct attribute_group ccacipher_attr_group = { - .name = "ccacipher", - .bin_attrs = ccacipher_attrs, -}; - -/* - * Sysfs attribute read function for all ep11 aes key binary attributes. - * The implementation can not deal with partial reads, because a new random - * secure key blob is generated with each read. In case of partial reads - * (i.e. off != 0 or count < key blob size) -EINVAL is returned. - * This function and the sysfs attributes using it provide EP11 key blobs - * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently - * 336 bytes. - */ -static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits, - bool is_xts, char *buf, loff_t off, - size_t count) -{ - size_t keysize = MAXEP11AESKEYBLOBSIZE; - u32 nr_apqns, *apqns = NULL; - int i, rc, card, dom; - - if (off != 0 || count < MAXEP11AESKEYBLOBSIZE) - return -EINVAL; - if (is_xts) - if (count < 2 * MAXEP11AESKEYBLOBSIZE) - return -EINVAL; - - /* build a list of apqns able to generate an cipher key */ - rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, - ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, - NULL); - if (rc) - return rc; - - memset(buf, 0, is_xts ? 2 * keysize : keysize); - - /* simple try all apqns from the list */ - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { - card = apqns[i] >> 16; - dom = apqns[i] & 0xFFFF; - rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize, - PKEY_TYPE_EP11_AES); - if (rc == 0) - break; - } - if (rc) - return rc; - - if (is_xts) { - keysize = MAXEP11AESKEYBLOBSIZE; - buf += MAXEP11AESKEYBLOBSIZE; - rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize, - PKEY_TYPE_EP11_AES); - if (rc == 0) - return 2 * MAXEP11AESKEYBLOBSIZE; - } - - return MAXEP11AESKEYBLOBSIZE; -} - -static ssize_t ep11_aes_128_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf, - off, count); -} - -static ssize_t ep11_aes_192_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf, - off, count); -} - -static ssize_t ep11_aes_256_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf, - off, count); -} - -static ssize_t ep11_aes_128_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf, - off, count); -} - -static ssize_t ep11_aes_256_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf, - off, count); -} - -static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE); -static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE); -static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE); -static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE); -static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE); - -static struct bin_attribute *ep11_attrs[] = { - &bin_attr_ep11_aes_128, - &bin_attr_ep11_aes_192, - &bin_attr_ep11_aes_256, - &bin_attr_ep11_aes_128_xts, - &bin_attr_ep11_aes_256_xts, - NULL -}; - -static struct attribute_group ep11_attr_group = { - .name = "ep11", - .bin_attrs = ep11_attrs, -}; - -static const struct attribute_group *pkey_attr_groups[] = { - &protkey_attr_group, - &ccadata_attr_group, - &ccacipher_attr_group, - &ep11_attr_group, - NULL, -}; static const struct file_operations pkey_fops = { .owner = THIS_MODULE, .open = nonseekable_open, - .llseek = no_llseek, .unlocked_ioctl = pkey_unlocked_ioctl, }; @@ -2294,43 +789,13 @@ static struct miscdevice pkey_dev = { .groups = pkey_attr_groups, }; -/* - * Module init - */ -static int __init pkey_init(void) +int __init pkey_api_init(void) { - cpacf_mask_t func_mask; - - /* - * The pckmo instruction should be available - even if we don't - * actually invoke it. This instruction comes with MSA 3 which - * is also the minimum level for the kmc instructions which - * are able to work with protected keys. - */ - if (!cpacf_query(CPACF_PCKMO, &func_mask)) - return -ENODEV; - - /* check for kmc instructions available */ - if (!cpacf_query(CPACF_KMC, &func_mask)) - return -ENODEV; - if (!cpacf_test_func(&func_mask, CPACF_KMC_PAES_128) || - !cpacf_test_func(&func_mask, CPACF_KMC_PAES_192) || - !cpacf_test_func(&func_mask, CPACF_KMC_PAES_256)) - return -ENODEV; - - pkey_debug_init(); - + /* register as a misc device */ return misc_register(&pkey_dev); } -/* - * Module exit - */ -static void __exit pkey_exit(void) +void __exit pkey_api_exit(void) { misc_deregister(&pkey_dev); - pkey_debug_exit(); } - -module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init); -module_exit(pkey_exit); diff --git a/drivers/s390/crypto/pkey_base.c b/drivers/s390/crypto/pkey_base.c new file mode 100644 index 000000000000..d60cd987c16d --- /dev/null +++ b/drivers/s390/crypto/pkey_base.c @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pkey base: debug feature, pkey handler registry + * + * Copyright IBM Corp. 2024 + */ + +#define pr_fmt(fmt) "pkey: " fmt + +#include <linux/cpufeature.h> +#include <linux/export.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/rculist.h> + +#include "pkey_base.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("s390 protected key base and api"); + +/* + * pkey debug feature + */ +debug_info_t *pkey_dbf_info; +EXPORT_SYMBOL(pkey_dbf_info); + +/* + * pkey handler registry + */ + +static DEFINE_SPINLOCK(handler_list_write_lock); +static LIST_HEAD(handler_list); + +int pkey_handler_register(struct pkey_handler *handler) +{ + const struct pkey_handler *h; + + if (!handler || + !handler->is_supported_key || + !handler->is_supported_keytype) + return -EINVAL; + + if (!try_module_get(handler->module)) + return -ENXIO; + + spin_lock(&handler_list_write_lock); + + rcu_read_lock(); + list_for_each_entry_rcu(h, &handler_list, list) { + if (h == handler) { + rcu_read_unlock(); + spin_unlock(&handler_list_write_lock); + module_put(handler->module); + return -EEXIST; + } + } + rcu_read_unlock(); + + list_add_rcu(&handler->list, &handler_list); + spin_unlock(&handler_list_write_lock); + synchronize_rcu(); + + module_put(handler->module); + + PKEY_DBF_INFO("%s pkey handler '%s' registered\n", __func__, + handler->name ?: "<no name>"); + + return 0; +} +EXPORT_SYMBOL(pkey_handler_register); + +int pkey_handler_unregister(struct pkey_handler *handler) +{ + spin_lock(&handler_list_write_lock); + list_del_rcu(&handler->list); + INIT_LIST_HEAD_RCU(&handler->list); + spin_unlock(&handler_list_write_lock); + synchronize_rcu(); + + PKEY_DBF_INFO("%s pkey handler '%s' unregistered\n", __func__, + handler->name ?: "<no name>"); + + return 0; +} +EXPORT_SYMBOL(pkey_handler_unregister); + +/* + * Handler invocation functions. + */ + +const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen) +{ + const struct pkey_handler *h; + + rcu_read_lock(); + list_for_each_entry_rcu(h, &handler_list, list) { + if (!try_module_get(h->module)) + continue; + if (h->is_supported_key(key, keylen)) { + rcu_read_unlock(); + return h; + } + module_put(h->module); + } + rcu_read_unlock(); + + return NULL; +} +EXPORT_SYMBOL(pkey_handler_get_keybased); + +const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt) +{ + const struct pkey_handler *h; + + rcu_read_lock(); + list_for_each_entry_rcu(h, &handler_list, list) { + if (!try_module_get(h->module)) + continue; + if (h->is_supported_keytype(kt)) { + rcu_read_unlock(); + return h; + } + module_put(h->module); + } + rcu_read_unlock(); + + return NULL; +} +EXPORT_SYMBOL(pkey_handler_get_keytypebased); + +void pkey_handler_put(const struct pkey_handler *handler) +{ + const struct pkey_handler *h; + + if (!handler) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(h, &handler_list, list) { + if (h == handler) { + module_put(h->module); + break; + } + } + rcu_read_unlock(); +} +EXPORT_SYMBOL(pkey_handler_put); + +int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) +{ + const struct pkey_handler *h; + int rc = -ENODEV; + + h = pkey_handler_get_keybased(key, keylen); + if (h && h->key_to_protkey) { + rc = h->key_to_protkey(apqns, nr_apqns, key, keylen, + protkey, protkeylen, + protkeytype, xflags); + } + pkey_handler_put(h); + + return rc; +} +EXPORT_SYMBOL(pkey_handler_key_to_protkey); + +/* + * This handler invocation is special as there may be more than + * one handler providing support for the very same key (type). + * And the handler may not respond true on is_supported_key(), + * so simple try and check return value here. + */ +int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, + size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, + u32 *protkeytype, u32 xflags) +{ + const struct pkey_handler *h, *htmp[10]; + int i, n = 0, rc = -ENODEV; + + rcu_read_lock(); + list_for_each_entry_rcu(h, &handler_list, list) { + if (!try_module_get(h->module)) + continue; + if (h->slowpath_key_to_protkey && n < ARRAY_SIZE(htmp)) + htmp[n++] = h; + else + module_put(h->module); + } + rcu_read_unlock(); + + for (i = 0; i < n; i++) { + h = htmp[i]; + if (rc) + rc = h->slowpath_key_to_protkey(apqns, nr_apqns, + key, keylen, + protkey, protkeylen, + protkeytype, xflags); + module_put(h->module); + } + + return rc; +} +EXPORT_SYMBOL(pkey_handler_slowpath_key_to_protkey); + +int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags) +{ + const struct pkey_handler *h; + int rc = -ENODEV; + + h = pkey_handler_get_keytypebased(keysubtype); + if (h && h->gen_key) { + rc = h->gen_key(apqns, nr_apqns, keytype, keysubtype, + keybitsize, flags, + keybuf, keybuflen, keyinfo, xflags); + } + pkey_handler_put(h); + + return rc; +} +EXPORT_SYMBOL(pkey_handler_gen_key); + +int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + const u8 *clrkey, u32 clrkeylen, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, + u32 xflags) +{ + const struct pkey_handler *h; + int rc = -ENODEV; + + h = pkey_handler_get_keytypebased(keysubtype); + if (h && h->clr_to_key) { + rc = h->clr_to_key(apqns, nr_apqns, keytype, keysubtype, + keybitsize, flags, clrkey, clrkeylen, + keybuf, keybuflen, keyinfo, xflags); + } + pkey_handler_put(h); + + return rc; +} +EXPORT_SYMBOL(pkey_handler_clr_to_key); + +int pkey_handler_verify_key(const u8 *key, u32 keylen, + u16 *card, u16 *dom, + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags) +{ + const struct pkey_handler *h; + int rc = -ENODEV; + + h = pkey_handler_get_keybased(key, keylen); + if (h && h->verify_key) { + rc = h->verify_key(key, keylen, card, dom, + keytype, keybitsize, flags, xflags); + } + pkey_handler_put(h); + + return rc; +} +EXPORT_SYMBOL(pkey_handler_verify_key); + +int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags) +{ + const struct pkey_handler *h; + int rc = -ENODEV; + + h = pkey_handler_get_keybased(key, keylen); + if (h && h->apqns_for_key) + rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns, + xflags); + pkey_handler_put(h); + + return rc; +} +EXPORT_SYMBOL(pkey_handler_apqns_for_key); + +int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype, + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags) +{ + const struct pkey_handler *h; + int rc = -ENODEV; + + h = pkey_handler_get_keytypebased(keysubtype); + if (h && h->apqns_for_keytype) { + rc = h->apqns_for_keytype(keysubtype, + cur_mkvp, alt_mkvp, flags, + apqns, nr_apqns, xflags); + } + pkey_handler_put(h); + + return rc; +} +EXPORT_SYMBOL(pkey_handler_apqns_for_keytype); + +void pkey_handler_request_modules(void) +{ +#ifdef CONFIG_MODULES + static const char * const pkey_handler_modules[] = { +#if IS_MODULE(CONFIG_PKEY_CCA) + "pkey_cca", +#endif +#if IS_MODULE(CONFIG_PKEY_EP11) + "pkey_ep11", +#endif +#if IS_MODULE(CONFIG_PKEY_PCKMO) + "pkey_pckmo", +#endif +#if IS_MODULE(CONFIG_PKEY_UV) + "pkey_uv", +#endif + }; + int i; + + for (i = 0; i < ARRAY_SIZE(pkey_handler_modules); i++) { + const struct pkey_handler *h; + bool found = false; + + rcu_read_lock(); + list_for_each_entry_rcu(h, &handler_list, list) { + if (h->module && + !strcmp(h->module->name, pkey_handler_modules[i])) { + found = true; + break; + } + } + rcu_read_unlock(); + if (!found) { + pr_debug("request_module(%s)\n", pkey_handler_modules[i]); + request_module(pkey_handler_modules[i]); + } + } +#endif +} +EXPORT_SYMBOL(pkey_handler_request_modules); + +/* + * Module init + */ +static int __init pkey_init(void) +{ + int rc; + + /* init debug feature */ + pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); + debug_register_view(pkey_dbf_info, &debug_sprintf_view); + debug_set_level(pkey_dbf_info, 4); + + /* the handler registry does not need any init */ + + rc = pkey_api_init(); + if (rc) + debug_unregister(pkey_dbf_info); + + return rc; +} + +/* + * Module exit + */ +static void __exit pkey_exit(void) +{ + pkey_api_exit(); +} + +module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init); +module_exit(pkey_exit); diff --git a/drivers/s390/crypto/pkey_base.h b/drivers/s390/crypto/pkey_base.h new file mode 100644 index 000000000000..9cdb3e74477f --- /dev/null +++ b/drivers/s390/crypto/pkey_base.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright IBM Corp. 2024 + * + * Pkey base: debug feature, defines and structs + * common to all pkey code. + */ + +#ifndef _PKEY_BASE_H_ +#define _PKEY_BASE_H_ + +#include <linux/types.h> +#include <asm/debug.h> +#include <asm/pkey.h> + +/* + * pkey debug feature + */ + +extern debug_info_t *pkey_dbf_info; + +#define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__) +#define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__) +#define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__) + +/* + * common defines and common structs + */ + +#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */ +#define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header)) +#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */ +#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ +#define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */ + +/* inside view of a generic protected key token */ +struct protkeytoken { + u8 type; /* 0x00 for PAES specific key tokens */ + u8 res0[3]; + u8 version; /* should be 0x01 for protected key token */ + u8 res1[3]; + u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ + u32 len; /* bytes actually stored in protkey[] */ + u8 protkey[]; /* the protected key blob */ +} __packed; + +/* inside view of a protected AES key token */ +struct protaeskeytoken { + u8 type; /* 0x00 for PAES specific key tokens */ + u8 res0[3]; + u8 version; /* should be 0x01 for protected key token */ + u8 res1[3]; + u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ + u32 len; /* bytes actually stored in protkey[] */ + u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */ +} __packed; + +/* inside view of a clear key token (type 0x00 version 0x02) */ +struct clearkeytoken { + u8 type; /* 0x00 for PAES specific key tokens */ + u8 res0[3]; + u8 version; /* 0x02 for clear key token */ + u8 res1[3]; + u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */ + u32 len; /* bytes actually stored in clearkey[] */ + u8 clearkey[]; /* clear key value */ +} __packed; + +/* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */ +static inline u32 pkey_keytype_aes_to_size(u32 keytype) +{ + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + return 16; + case PKEY_KEYTYPE_AES_192: + return 24; + case PKEY_KEYTYPE_AES_256: + return 32; + default: + return 0; + } +} + +/* helper function which translates AES key bit size into PKEY_KEYTYPE_AES_* */ +static inline u32 pkey_aes_bitsize_to_keytype(u32 keybitsize) +{ + switch (keybitsize) { + case 128: + return PKEY_KEYTYPE_AES_128; + case 192: + return PKEY_KEYTYPE_AES_192; + case 256: + return PKEY_KEYTYPE_AES_256; + default: + return 0; + } +} + +/* + * helper function which translates the PKEY_KEYTYPE_* + * to the protected key size minus the WK VP length + */ +static inline u32 pkey_keytype_to_size(u32 keytype) +{ + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + return 16; + case PKEY_KEYTYPE_AES_192: + return 24; + case PKEY_KEYTYPE_AES_256: + return 32; + case PKEY_KEYTYPE_ECC_P256: + return 32; + case PKEY_KEYTYPE_ECC_P384: + return 48; + case PKEY_KEYTYPE_ECC_P521: + return 80; + case PKEY_KEYTYPE_ECC_ED25519: + return 32; + case PKEY_KEYTYPE_ECC_ED448: + return 54; + case PKEY_KEYTYPE_AES_XTS_128: + return 32; + case PKEY_KEYTYPE_AES_XTS_256: + return 64; + case PKEY_KEYTYPE_HMAC_512: + return 64; + case PKEY_KEYTYPE_HMAC_1024: + return 128; + default: + return 0; + } +} + +/* + * pkey_api.c: + */ +int __init pkey_api_init(void); +void __exit pkey_api_exit(void); + +/* + * pkey_sysfs.c: + */ + +extern const struct attribute_group *pkey_attr_groups[]; + +/* + * pkey handler registry + */ + +struct pkey_handler { + struct module *module; + const char *name; + /* + * is_supported_key() and is_supported_keytype() are called + * within an rcu_read_lock() scope and thus must not sleep! + */ + bool (*is_supported_key)(const u8 *key, u32 keylen); + bool (*is_supported_keytype)(enum pkey_key_type); + int (*key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); + int (*slowpath_key_to_protkey)(const struct pkey_apqn *apqns, + size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, + u32 *protkeytype, u32 xflags); + int (*gen_key)(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); + int (*clr_to_key)(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + const u8 *clrkey, u32 clrkeylen, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); + int (*verify_key)(const u8 *key, u32 keylen, + u16 *card, u16 *dom, + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags); + int (*apqns_for_key)(const u8 *key, u32 keylen, u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); + int (*apqns_for_keytype)(enum pkey_key_type ktype, + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); + /* used internal by pkey base */ + struct list_head list; +}; + +int pkey_handler_register(struct pkey_handler *handler); +int pkey_handler_unregister(struct pkey_handler *handler); + +/* + * invocation function for the registered pkey handlers + */ + +const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen); +const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt); +void pkey_handler_put(const struct pkey_handler *handler); + +int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); +int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, + size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, + u32 *protkeytype, u32 xflags); +int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); +int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + const u8 *clrkey, u32 clrkeylen, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, + u32 xflags); +int pkey_handler_verify_key(const u8 *key, u32 keylen, + u16 *card, u16 *dom, + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags); +int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); +int pkey_handler_apqns_for_keytype(enum pkey_key_type ktype, + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); + +/* + * Unconditional try to load all handler modules + */ +void pkey_handler_request_modules(void); + +#endif /* _PKEY_BASE_H_ */ diff --git a/drivers/s390/crypto/pkey_cca.c b/drivers/s390/crypto/pkey_cca.c new file mode 100644 index 000000000000..d4550d8d8eea --- /dev/null +++ b/drivers/s390/crypto/pkey_cca.c @@ -0,0 +1,625 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pkey cca specific code + * + * Copyright IBM Corp. 2024 + */ + +#define pr_fmt(fmt) "pkey: " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/cpufeature.h> + +#include "zcrypt_ccamisc.h" +#include "pkey_base.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("s390 protected key CCA handler"); + +#if IS_MODULE(CONFIG_PKEY_CCA) +static struct ap_device_id pkey_cca_card_ids[] = { + { .dev_type = AP_DEVICE_TYPE_CEX4 }, + { .dev_type = AP_DEVICE_TYPE_CEX5 }, + { .dev_type = AP_DEVICE_TYPE_CEX6 }, + { .dev_type = AP_DEVICE_TYPE_CEX7 }, + { .dev_type = AP_DEVICE_TYPE_CEX8 }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(ap, pkey_cca_card_ids); +#endif + +/* + * Check key blob for known and supported CCA key. + */ +static bool is_cca_key(const u8 *key, u32 keylen) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + + if (keylen < sizeof(*hdr)) + return false; + + switch (hdr->type) { + case TOKTYPE_CCA_INTERNAL: + switch (hdr->version) { + case TOKVER_CCA_AES: + case TOKVER_CCA_VLSC: + return true; + default: + return false; + } + case TOKTYPE_CCA_INTERNAL_PKA: + return true; + default: + return false; + } +} + +static bool is_cca_keytype(enum pkey_key_type key_type) +{ + switch (key_type) { + case PKEY_TYPE_CCA_DATA: + case PKEY_TYPE_CCA_CIPHER: + case PKEY_TYPE_CCA_ECC: + return true; + default: + return false; + } +} + +static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; + int rc; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + + if (!flags) + flags = PKEY_FLAGS_MATCH_CUR_MKVP | PKEY_FLAGS_MATCH_ALT_MKVP; + + if (keylen < sizeof(struct keytoken_header)) + return -EINVAL; + + zcrypt_wait_api_operational(); + + if (hdr->type == TOKTYPE_CCA_INTERNAL) { + u64 cur_mkvp = 0, old_mkvp = 0; + int minhwtype = ZCRYPT_CEX3C; + + if (hdr->version == TOKVER_CCA_AES) { + struct secaeskeytoken *t = (struct secaeskeytoken *)key; + + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = t->mkvp; + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = t->mkvp; + } else if (hdr->version == TOKVER_CCA_VLSC) { + struct cipherkeytoken *t = (struct cipherkeytoken *)key; + + minhwtype = ZCRYPT_CEX6; + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = t->mkvp0; + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = t->mkvp0; + } else { + /* unknown CCA internal token type */ + return -EINVAL; + } + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, AES_MK_SET, + cur_mkvp, old_mkvp, xflags); + if (rc) + goto out; + + } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { + struct eccprivkeytoken *t = (struct eccprivkeytoken *)key; + u64 cur_mkvp = 0, old_mkvp = 0; + + if (t->secid == 0x20) { + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = t->mkvp; + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = t->mkvp; + } else { + /* unknown CCA internal 2 token type */ + return -EINVAL; + } + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, APKA_MK_SET, + cur_mkvp, old_mkvp, xflags); + if (rc) + goto out; + + } else { + PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n", + __func__, hdr->type, hdr->version); + return -EINVAL; + } + + if (apqns) { + if (*nr_apqns < _nr_apqns) + rc = -ENOSPC; + else + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); + } + *nr_apqns = _nr_apqns; + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int cca_apqns4type(enum pkey_key_type ktype, + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 pflags) +{ + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; + int rc; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + + zcrypt_wait_api_operational(); + + if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) { + u64 cur_mkvp = 0, old_mkvp = 0; + int minhwtype = ZCRYPT_CEX3C; + + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = *((u64 *)cur_mkvp); + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = *((u64 *)alt_mkvp); + if (ktype == PKEY_TYPE_CCA_CIPHER) + minhwtype = ZCRYPT_CEX6; + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, AES_MK_SET, + cur_mkvp, old_mkvp, xflags); + if (rc) + goto out; + + } else if (ktype == PKEY_TYPE_CCA_ECC) { + u64 cur_mkvp = 0, old_mkvp = 0; + + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = *((u64 *)cur_mkvp); + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = *((u64 *)alt_mkvp); + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, APKA_MK_SET, + cur_mkvp, old_mkvp, xflags); + if (rc) + goto out; + + } else { + PKEY_DBF_ERR("%s unknown/unsupported key type %d", + __func__, (int)ktype); + return -EINVAL; + } + + if (apqns) { + if (*nr_apqns < _nr_apqns) + rc = -ENOSPC; + else + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); + } + *nr_apqns = _nr_apqns; + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 pflags) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; + u32 xflags; + int i, rc; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + + if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_AES) { + /* CCA AES data key */ + if (keylen < sizeof(struct secaeskeytoken)) + return -EINVAL; + if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0)) + return -EINVAL; + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_VLSC) { + /* CCA AES cipher key */ + if (keylen < hdr->len) + return -EINVAL; + if (cca_check_secaescipherkey(pkey_dbf_info, + 3, key, 0, 1)) + return -EINVAL; + } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { + /* CCA ECC (private) key */ + if (keylen < sizeof(struct eccprivkeytoken)) + return -EINVAL; + if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1)) + return -EINVAL; + } else { + PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n", + __func__, hdr->type, hdr->version); + return -EINVAL; + } + + zcrypt_wait_api_operational(); + + if (!apqns || (nr_apqns == 1 && + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { + nr_apqns = MAXAPQNSINLIST; + rc = cca_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags); + if (rc) + goto out; + apqns = _apqns; + } + + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { + if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_AES) { + rc = cca_sec2protkey(apqns[i].card, apqns[i].domain, + key, protkey, + protkeylen, protkeytype, xflags); + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_VLSC) { + rc = cca_cipher2protkey(apqns[i].card, apqns[i].domain, + key, protkey, + protkeylen, protkeytype, xflags); + } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { + rc = cca_ecc2protkey(apqns[i].card, apqns[i].domain, + key, protkey, + protkeylen, protkeytype, xflags); + } else { + rc = -EINVAL; + break; + } + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Generate CCA secure key. + * As of now only CCA AES Data or Cipher secure keys are + * supported. + * keytype is one of the PKEY_KEYTYPE_* constants, + * subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER, + * keybitsize is the bit size of the key (may be 0 for + * keytype PKEY_KEYTYPE_AES_*). + */ +static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 subtype, + u32 keybitsize, u32 flags, + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) +{ + struct pkey_apqn _apqns[MAXAPQNSINLIST]; + int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + + /* check keytype, subtype, keybitsize */ + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + len = pkey_keytype_aes_to_size(keytype); + if (keybitsize && keybitsize != 8 * len) { + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); + return -EINVAL; + } + keybitsize = 8 * len; + switch (subtype) { + case PKEY_TYPE_CCA_DATA: + case PKEY_TYPE_CCA_CIPHER: + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", + __func__, subtype); + return -EINVAL; + } + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", + __func__, keytype); + return -EINVAL; + } + + zcrypt_wait_api_operational(); + + if (!apqns || (nr_apqns == 1 && + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { + nr_apqns = MAXAPQNSINLIST; + rc = cca_apqns4type(subtype, NULL, NULL, 0, + _apqns, &nr_apqns, pflags); + if (rc) + goto out; + apqns = _apqns; + } + + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { + if (subtype == PKEY_TYPE_CCA_CIPHER) { + rc = cca_gencipherkey(apqns[i].card, apqns[i].domain, + keybitsize, flags, + keybuf, keybuflen, xflags); + } else { + /* PKEY_TYPE_CCA_DATA */ + rc = cca_genseckey(apqns[i].card, apqns[i].domain, + keybitsize, keybuf, xflags); + *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); + } + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Generate CCA secure key with given clear key value. + * As of now only CCA AES Data or Cipher secure keys are + * supported. + * keytype is one of the PKEY_KEYTYPE_* constants, + * subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER, + * keybitsize is the bit size of the key (may be 0 for + * keytype PKEY_KEYTYPE_AES_*). + */ +static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 subtype, + u32 keybitsize, u32 flags, + const u8 *clrkey, u32 clrkeylen, + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) +{ + struct pkey_apqn _apqns[MAXAPQNSINLIST]; + int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + + /* check keytype, subtype, clrkeylen, keybitsize */ + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + len = pkey_keytype_aes_to_size(keytype); + if (keybitsize && keybitsize != 8 * len) { + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); + return -EINVAL; + } + keybitsize = 8 * len; + if (clrkeylen != len) { + PKEY_DBF_ERR("%s invalid clear key len %d != %d\n", + __func__, clrkeylen, len); + return -EINVAL; + } + switch (subtype) { + case PKEY_TYPE_CCA_DATA: + case PKEY_TYPE_CCA_CIPHER: + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", + __func__, subtype); + return -EINVAL; + } + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", + __func__, keytype); + return -EINVAL; + } + + zcrypt_wait_api_operational(); + + if (!apqns || (nr_apqns == 1 && + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { + nr_apqns = MAXAPQNSINLIST; + rc = cca_apqns4type(subtype, NULL, NULL, 0, + _apqns, &nr_apqns, pflags); + if (rc) + goto out; + apqns = _apqns; + } + + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { + if (subtype == PKEY_TYPE_CCA_CIPHER) { + rc = cca_clr2cipherkey(apqns[i].card, apqns[i].domain, + keybitsize, flags, clrkey, + keybuf, keybuflen, xflags); + } else { + /* PKEY_TYPE_CCA_DATA */ + rc = cca_clr2seckey(apqns[i].card, apqns[i].domain, + keybitsize, clrkey, keybuf, xflags); + *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); + } + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int cca_verifykey(const u8 *key, u32 keylen, + u16 *card, u16 *dom, + u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns); + u32 xflags; + int rc; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + + zcrypt_wait_api_operational(); + + if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_AES) { + struct secaeskeytoken *t = (struct secaeskeytoken *)key; + + rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0); + if (rc) + goto out; + *keytype = PKEY_TYPE_CCA_DATA; + *keybitsize = t->bitsize; + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, + ZCRYPT_CEX3C, AES_MK_SET, + t->mkvp, 0, xflags); + if (!rc) + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; + if (rc == -ENODEV) { + nr_apqns = ARRAY_SIZE(apqns); + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, + ZCRYPT_CEX3C, AES_MK_SET, + 0, t->mkvp, xflags); + if (!rc) + *flags = PKEY_FLAGS_MATCH_ALT_MKVP; + } + if (rc) + goto out; + + *card = ((struct pkey_apqn *)apqns)->card; + *dom = ((struct pkey_apqn *)apqns)->domain; + + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_VLSC) { + struct cipherkeytoken *t = (struct cipherkeytoken *)key; + + rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1); + if (rc) + goto out; + *keytype = PKEY_TYPE_CCA_CIPHER; + *keybitsize = PKEY_SIZE_UNKNOWN; + if (!t->plfver && t->wpllen == 512) + *keybitsize = PKEY_SIZE_AES_128; + else if (!t->plfver && t->wpllen == 576) + *keybitsize = PKEY_SIZE_AES_192; + else if (!t->plfver && t->wpllen == 640) + *keybitsize = PKEY_SIZE_AES_256; + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, + ZCRYPT_CEX6, AES_MK_SET, + t->mkvp0, 0, xflags); + if (!rc) + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; + if (rc == -ENODEV) { + nr_apqns = ARRAY_SIZE(apqns); + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, + ZCRYPT_CEX6, AES_MK_SET, + 0, t->mkvp0, xflags); + if (!rc) + *flags = PKEY_FLAGS_MATCH_ALT_MKVP; + } + if (rc) + goto out; + + *card = ((struct pkey_apqn *)apqns)->card; + *dom = ((struct pkey_apqn *)apqns)->domain; + + } else { + /* unknown/unsupported key blob */ + rc = -EINVAL; + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * This function provides an alternate but usually slow way + * to convert a 'clear key token' with AES key material into + * a protected key. This is done via an intermediate step + * which creates a CCA AES DATA secure key first and then + * derives the protected key from this secure key. + */ +static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns, + size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, + u32 *protkeytype, u32 pflags) +{ + const struct keytoken_header *hdr = (const struct keytoken_header *)key; + const struct clearkeytoken *t = (const struct clearkeytoken *)key; + u8 tmpbuf[SECKEYBLOBSIZE]; /* 64 bytes */ + u32 tmplen, keysize = 0; + int i, rc; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + + if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_CLEAR_KEY) + keysize = pkey_keytype_aes_to_size(t->keytype); + if (!keysize || t->len != keysize) + return -EINVAL; + + /* try two times in case of failure */ + for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { + tmplen = SECKEYBLOBSIZE; + rc = cca_clr2key(NULL, 0, t->keytype, PKEY_TYPE_CCA_DATA, + 8 * keysize, 0, t->clearkey, t->len, + tmpbuf, &tmplen, NULL, pflags); + pr_debug("cca_clr2key()=%d\n", rc); + if (rc) + continue; + rc = cca_key2protkey(NULL, 0, tmpbuf, tmplen, + protkey, protkeylen, protkeytype, pflags); + pr_debug("cca_key2protkey()=%d\n", rc); + } + + pr_debug("rc=%d\n", rc); + return rc; +} + +static struct pkey_handler cca_handler = { + .module = THIS_MODULE, + .name = "PKEY CCA handler", + .is_supported_key = is_cca_key, + .is_supported_keytype = is_cca_keytype, + .key_to_protkey = cca_key2protkey, + .slowpath_key_to_protkey = cca_slowpath_key2protkey, + .gen_key = cca_gen_key, + .clr_to_key = cca_clr2key, + .verify_key = cca_verifykey, + .apqns_for_key = cca_apqns4key, + .apqns_for_keytype = cca_apqns4type, +}; + +/* + * Module init + */ +static int __init pkey_cca_init(void) +{ + /* register this module as pkey handler for all the cca stuff */ + return pkey_handler_register(&cca_handler); +} + +/* + * Module exit + */ +static void __exit pkey_cca_exit(void) +{ + /* unregister this module as pkey handler */ + pkey_handler_unregister(&cca_handler); +} + +module_init(pkey_cca_init); +module_exit(pkey_cca_exit); diff --git a/drivers/s390/crypto/pkey_ep11.c b/drivers/s390/crypto/pkey_ep11.c new file mode 100644 index 000000000000..654eed20d0d9 --- /dev/null +++ b/drivers/s390/crypto/pkey_ep11.c @@ -0,0 +1,571 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pkey ep11 specific code + * + * Copyright IBM Corp. 2024 + */ + +#define pr_fmt(fmt) "pkey: " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/cpufeature.h> + +#include "zcrypt_ccamisc.h" +#include "zcrypt_ep11misc.h" +#include "pkey_base.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("s390 protected key EP11 handler"); + +#if IS_MODULE(CONFIG_PKEY_EP11) +static struct ap_device_id pkey_ep11_card_ids[] = { + { .dev_type = AP_DEVICE_TYPE_CEX4 }, + { .dev_type = AP_DEVICE_TYPE_CEX5 }, + { .dev_type = AP_DEVICE_TYPE_CEX6 }, + { .dev_type = AP_DEVICE_TYPE_CEX7 }, + { .dev_type = AP_DEVICE_TYPE_CEX8 }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(ap, pkey_ep11_card_ids); +#endif + +/* + * Check key blob for known and supported EP11 key. + */ +static bool is_ep11_key(const u8 *key, u32 keylen) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + + if (keylen < sizeof(*hdr)) + return false; + + switch (hdr->type) { + case TOKTYPE_NON_CCA: + switch (hdr->version) { + case TOKVER_EP11_AES: + case TOKVER_EP11_AES_WITH_HEADER: + case TOKVER_EP11_ECC_WITH_HEADER: + return true; + default: + return false; + } + default: + return false; + } +} + +static bool is_ep11_keytype(enum pkey_key_type key_type) +{ + switch (key_type) { + case PKEY_TYPE_EP11: + case PKEY_TYPE_EP11_AES: + case PKEY_TYPE_EP11_ECC: + return true; + default: + return false; + } +} + +static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; + int rc; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + + if (!flags) + flags = PKEY_FLAGS_MATCH_CUR_MKVP; + + if (keylen < sizeof(struct keytoken_header) || flags == 0) + return -EINVAL; + + zcrypt_wait_api_operational(); + + if (hdr->type == TOKTYPE_NON_CCA && + (hdr->version == TOKVER_EP11_AES_WITH_HEADER || + hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + struct ep11keyblob *kb = (struct ep11keyblob *) + (key + sizeof(struct ep11kblob_header)); + int minhwtype = 0, api = 0; + + if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) + return -EINVAL; + if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { + minhwtype = ZCRYPT_CEX7; + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; + } + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, api, kb->wkvp, xflags); + if (rc) + goto out; + + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES && + is_ep11_keyblob(key)) { + struct ep11keyblob *kb = (struct ep11keyblob *)key; + int minhwtype = 0, api = 0; + + if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) + return -EINVAL; + if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { + minhwtype = ZCRYPT_CEX7; + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; + } + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, api, kb->wkvp, xflags); + if (rc) + goto out; + + } else { + PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n", + __func__, hdr->type, hdr->version); + return -EINVAL; + } + + if (apqns) { + if (*nr_apqns < _nr_apqns) + rc = -ENOSPC; + else + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); + } + *nr_apqns = _nr_apqns; + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int ep11_apqns4type(enum pkey_key_type ktype, + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) +{ + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; + int rc; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + + zcrypt_wait_api_operational(); + + if (ktype == PKEY_TYPE_EP11 || + ktype == PKEY_TYPE_EP11_AES || + ktype == PKEY_TYPE_EP11_ECC) { + u8 *wkvp = NULL; + int api; + + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + wkvp = cur_mkvp; + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, api, wkvp, xflags); + if (rc) + goto out; + + } else { + PKEY_DBF_ERR("%s unknown/unsupported key type %d\n", + __func__, (int)ktype); + return -EINVAL; + } + + if (apqns) { + if (*nr_apqns < _nr_apqns) + rc = -ENOSPC; + else + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); + } + *nr_apqns = _nr_apqns; + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 pflags) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; + u32 xflags; + int i, rc; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + + if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES_WITH_HEADER && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + /* EP11 AES key blob with header */ + if (ep11_check_aes_key_with_hdr(pkey_dbf_info, + 3, key, keylen, 1)) + return -EINVAL; + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_ECC_WITH_HEADER && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + /* EP11 ECC key blob with header */ + if (ep11_check_ecc_key_with_hdr(pkey_dbf_info, + 3, key, keylen, 1)) + return -EINVAL; + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES && + is_ep11_keyblob(key)) { + /* EP11 AES key blob with header in session field */ + if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1)) + return -EINVAL; + } else { + PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n", + __func__, hdr->type, hdr->version); + return -EINVAL; + } + + zcrypt_wait_api_operational(); + + if (!apqns || (nr_apqns == 1 && + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { + nr_apqns = MAXAPQNSINLIST; + rc = ep11_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags); + if (rc) + goto out; + apqns = _apqns; + } + + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { + if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES_WITH_HEADER && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, + key, hdr->len, protkey, + protkeylen, protkeytype, xflags); + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_ECC_WITH_HEADER && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, + key, hdr->len, protkey, + protkeylen, protkeytype, xflags); + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES && + is_ep11_keyblob(key)) { + rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, + key, hdr->len, protkey, + protkeylen, protkeytype, xflags); + } else { + rc = -EINVAL; + break; + } + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Generate EP11 secure key. + * As of now only EP11 AES secure keys are supported. + * keytype is one of the PKEY_KEYTYPE_* constants, + * subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES + * or 0 (results in subtype PKEY_TYPE_EP11_AES), + * keybitsize is the bit size of the key (may be 0 for + * keytype PKEY_KEYTYPE_AES_*). + */ +static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 subtype, + u32 keybitsize, u32 flags, + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) +{ + struct pkey_apqn _apqns[MAXAPQNSINLIST]; + int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + + /* check keytype, subtype, keybitsize */ + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + len = pkey_keytype_aes_to_size(keytype); + if (keybitsize && keybitsize != 8 * len) { + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); + return -EINVAL; + } + keybitsize = 8 * len; + switch (subtype) { + case PKEY_TYPE_EP11: + case PKEY_TYPE_EP11_AES: + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", + __func__, subtype); + return -EINVAL; + } + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", + __func__, keytype); + return -EINVAL; + } + + zcrypt_wait_api_operational(); + + if (!apqns || (nr_apqns == 1 && + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { + nr_apqns = MAXAPQNSINLIST; + rc = ep11_apqns4type(subtype, NULL, NULL, 0, + _apqns, &nr_apqns, pflags); + if (rc) + goto out; + apqns = _apqns; + } + + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { + rc = ep11_genaeskey(apqns[i].card, apqns[i].domain, + keybitsize, flags, + keybuf, keybuflen, subtype, xflags); + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Generate EP11 secure key with given clear key value. + * As of now only EP11 AES secure keys are supported. + * keytype is one of the PKEY_KEYTYPE_* constants, + * subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES + * or 0 (assumes PKEY_TYPE_EP11_AES then). + * keybitsize is the bit size of the key (may be 0 for + * keytype PKEY_KEYTYPE_AES_*). + */ +static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 subtype, + u32 keybitsize, u32 flags, + const u8 *clrkey, u32 clrkeylen, + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) +{ + struct pkey_apqn _apqns[MAXAPQNSINLIST]; + int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + + /* check keytype, subtype, clrkeylen, keybitsize */ + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + len = pkey_keytype_aes_to_size(keytype); + if (keybitsize && keybitsize != 8 * len) { + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); + return -EINVAL; + } + keybitsize = 8 * len; + if (clrkeylen != len) { + PKEY_DBF_ERR("%s invalid clear key len %d != %d\n", + __func__, clrkeylen, len); + return -EINVAL; + } + switch (subtype) { + case PKEY_TYPE_EP11: + case PKEY_TYPE_EP11_AES: + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", + __func__, subtype); + return -EINVAL; + } + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", + __func__, keytype); + return -EINVAL; + } + + zcrypt_wait_api_operational(); + + if (!apqns || (nr_apqns == 1 && + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { + nr_apqns = MAXAPQNSINLIST; + rc = ep11_apqns4type(subtype, NULL, NULL, 0, + _apqns, &nr_apqns, pflags); + if (rc) + goto out; + apqns = _apqns; + } + + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { + rc = ep11_clr2keyblob(apqns[i].card, apqns[i].domain, + keybitsize, flags, clrkey, + keybuf, keybuflen, subtype, xflags); + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int ep11_verifykey(const u8 *key, u32 keylen, + u16 *card, u16 *dom, + u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns); + u32 xflags; + int rc; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + + zcrypt_wait_api_operational(); + + if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES) { + struct ep11keyblob *kb = (struct ep11keyblob *)key; + int api; + + rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1); + if (rc) + goto out; + *keytype = PKEY_TYPE_EP11; + *keybitsize = kb->head.bitlen; + + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; + rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom, + ZCRYPT_CEX7, api, + ep11_kb_wkvp(key, keylen), xflags); + if (rc) + goto out; + + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; + + *card = ((struct pkey_apqn *)apqns)->card; + *dom = ((struct pkey_apqn *)apqns)->domain; + + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES_WITH_HEADER) { + struct ep11kblob_header *kh = (struct ep11kblob_header *)key; + int api; + + rc = ep11_check_aes_key_with_hdr(pkey_dbf_info, + 3, key, keylen, 1); + if (rc) + goto out; + *keytype = PKEY_TYPE_EP11_AES; + *keybitsize = kh->bitlen; + + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; + rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom, + ZCRYPT_CEX7, api, + ep11_kb_wkvp(key, keylen), xflags); + if (rc) + goto out; + + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; + + *card = ((struct pkey_apqn *)apqns)->card; + *dom = ((struct pkey_apqn *)apqns)->domain; + + } else { + /* unknown/unsupported key blob */ + rc = -EINVAL; + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * This function provides an alternate but usually slow way + * to convert a 'clear key token' with AES key material into + * a protected key. That is done via an intermediate step + * which creates an EP11 AES secure key first and then derives + * the protected key from this secure key. + */ +static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns, + size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, + u32 *protkeytype, u32 pflags) +{ + const struct keytoken_header *hdr = (const struct keytoken_header *)key; + const struct clearkeytoken *t = (const struct clearkeytoken *)key; + u8 tmpbuf[MAXEP11AESKEYBLOBSIZE]; /* 336 bytes */ + u32 tmplen, keysize = 0; + int i, rc; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + + if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_CLEAR_KEY) + keysize = pkey_keytype_aes_to_size(t->keytype); + if (!keysize || t->len != keysize) + return -EINVAL; + + /* try two times in case of failure */ + for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { + tmplen = MAXEP11AESKEYBLOBSIZE; + rc = ep11_clr2key(NULL, 0, t->keytype, PKEY_TYPE_EP11, + 8 * keysize, 0, t->clearkey, t->len, + tmpbuf, &tmplen, NULL, pflags); + pr_debug("ep11_clr2key()=%d\n", rc); + if (rc) + continue; + rc = ep11_key2protkey(NULL, 0, tmpbuf, tmplen, + protkey, protkeylen, protkeytype, pflags); + pr_debug("ep11_key2protkey()=%d\n", rc); + } + + pr_debug("rc=%d\n", rc); + return rc; +} + +static struct pkey_handler ep11_handler = { + .module = THIS_MODULE, + .name = "PKEY EP11 handler", + .is_supported_key = is_ep11_key, + .is_supported_keytype = is_ep11_keytype, + .key_to_protkey = ep11_key2protkey, + .slowpath_key_to_protkey = ep11_slowpath_key2protkey, + .gen_key = ep11_gen_key, + .clr_to_key = ep11_clr2key, + .verify_key = ep11_verifykey, + .apqns_for_key = ep11_apqns4key, + .apqns_for_keytype = ep11_apqns4type, +}; + +/* + * Module init + */ +static int __init pkey_ep11_init(void) +{ + /* register this module as pkey handler for all the ep11 stuff */ + return pkey_handler_register(&ep11_handler); +} + +/* + * Module exit + */ +static void __exit pkey_ep11_exit(void) +{ + /* unregister this module as pkey handler */ + pkey_handler_unregister(&ep11_handler); +} + +module_init(pkey_ep11_init); +module_exit(pkey_ep11_exit); diff --git a/drivers/s390/crypto/pkey_pckmo.c b/drivers/s390/crypto/pkey_pckmo.c new file mode 100644 index 000000000000..793326c4c59a --- /dev/null +++ b/drivers/s390/crypto/pkey_pckmo.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pkey pckmo specific code + * + * Copyright IBM Corp. 2024 + */ + +#define pr_fmt(fmt) "pkey: " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/cpufeature.h> +#include <asm/cpacf.h> +#include <crypto/aes.h> +#include <linux/random.h> + +#include "zcrypt_ccamisc.h" +#include "pkey_base.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("s390 protected key PCKMO handler"); + +/* + * Check key blob for known and supported here. + */ +static bool is_pckmo_key(const u8 *key, u32 keylen) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + struct clearkeytoken *t = (struct clearkeytoken *)key; + + if (keylen < sizeof(*hdr)) + return false; + + switch (hdr->type) { + case TOKTYPE_NON_CCA: + switch (hdr->version) { + case TOKVER_CLEAR_KEY: + if (pkey_keytype_to_size(t->keytype)) + return true; + return false; + case TOKVER_PROTECTED_KEY: + return true; + default: + return false; + } + default: + return false; + } +} + +static bool is_pckmo_keytype(enum pkey_key_type keytype) +{ + switch (keytype) { + case PKEY_TYPE_PROTKEY: + return true; + default: + return false; + } +} + +/* + * Create a protected key from a clear key value via PCKMO instruction. + */ +static int pckmo_clr2protkey(u32 keytype, const u8 *clrkey, u32 clrkeylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + /* mask of available pckmo subfunctions */ + static cpacf_mask_t pckmo_functions; + + int keysize, rc = -EINVAL; + u8 paramblock[160]; + u32 pkeytype = 0; + unsigned int fc; + + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + fc = CPACF_PCKMO_ENC_AES_128_KEY; + break; + case PKEY_KEYTYPE_AES_192: + fc = CPACF_PCKMO_ENC_AES_192_KEY; + break; + case PKEY_KEYTYPE_AES_256: + fc = CPACF_PCKMO_ENC_AES_256_KEY; + break; + case PKEY_KEYTYPE_ECC_P256: + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_P256_KEY; + break; + case PKEY_KEYTYPE_ECC_P384: + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_P384_KEY; + break; + case PKEY_KEYTYPE_ECC_P521: + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_P521_KEY; + break; + case PKEY_KEYTYPE_ECC_ED25519: + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY; + break; + case PKEY_KEYTYPE_ECC_ED448: + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_ED448_KEY; + break; + case PKEY_KEYTYPE_AES_XTS_128: + fc = CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY; + break; + case PKEY_KEYTYPE_AES_XTS_256: + fc = CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY; + break; + case PKEY_KEYTYPE_HMAC_512: + fc = CPACF_PCKMO_ENC_HMAC_512_KEY; + break; + case PKEY_KEYTYPE_HMAC_1024: + fc = CPACF_PCKMO_ENC_HMAC_1024_KEY; + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", + __func__, keytype); + goto out; + } + + keysize = pkey_keytype_to_size(keytype); + pkeytype = pkeytype ?: keytype; + + if (clrkeylen && clrkeylen < keysize) { + PKEY_DBF_ERR("%s clear key size too small: %u < %d\n", + __func__, clrkeylen, keysize); + goto out; + } + if (*protkeylen < keysize + AES_WK_VP_SIZE) { + PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n", + __func__, *protkeylen, keysize + AES_WK_VP_SIZE); + goto out; + } + + /* Did we already check for PCKMO ? */ + if (!pckmo_functions.bytes[0]) { + /* no, so check now */ + if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) { + PKEY_DBF_ERR("%s cpacf_query() failed\n", __func__); + rc = -ENODEV; + goto out; + } + } + /* check for the pckmo subfunction we need now */ + if (!cpacf_test_func(&pckmo_functions, fc)) { + PKEY_DBF_ERR("%s pckmo fc 0x%02x not available\n", + __func__, fc); + rc = -ENODEV; + goto out; + } + + /* prepare param block */ + memset(paramblock, 0, sizeof(paramblock)); + memcpy(paramblock, clrkey, keysize); + + /* call the pckmo instruction */ + cpacf_pckmo(fc, paramblock); + + /* copy created protected key to key buffer including the wkvp block */ + *protkeylen = keysize + AES_WK_VP_SIZE; + memcpy(protkey, paramblock, *protkeylen); + *protkeytype = pkeytype; + + rc = 0; + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Verify a raw protected key blob. + */ +static int pckmo_verify_protkey(const u8 *protkey, u32 protkeylen, + u32 protkeytype) +{ + u8 clrkey[16] = { 0 }, tmpkeybuf[16 + AES_WK_VP_SIZE]; + u32 tmpkeybuflen, tmpkeytype; + int keysize, rc = -EINVAL; + u8 *wkvp; + + /* check protkey type and size */ + keysize = pkey_keytype_to_size(protkeytype); + if (!keysize) { + PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__, + protkeytype); + goto out; + } + if (protkeylen < keysize + AES_WK_VP_SIZE) + goto out; + + /* generate a dummy AES 128 protected key */ + tmpkeybuflen = sizeof(tmpkeybuf); + rc = pckmo_clr2protkey(PKEY_KEYTYPE_AES_128, + clrkey, sizeof(clrkey), + tmpkeybuf, &tmpkeybuflen, &tmpkeytype); + if (rc) + goto out; + memzero_explicit(tmpkeybuf, 16); + wkvp = tmpkeybuf + 16; + + /* compare WK VP from the temp key with that of the given prot key */ + if (memcmp(wkvp, protkey + keysize, AES_WK_VP_SIZE)) { + PKEY_DBF_ERR("%s protected key WK VP mismatch\n", __func__); + rc = -EKEYREJECTED; + goto out; + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int pckmo_key2protkey(const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + int rc = -EINVAL; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + if (hdr->type != TOKTYPE_NON_CCA) + return -EINVAL; + + switch (hdr->version) { + case TOKVER_PROTECTED_KEY: { + struct protkeytoken *t = (struct protkeytoken *)key; + u32 keysize; + + if (keylen < sizeof(*t)) + goto out; + keysize = pkey_keytype_to_size(t->keytype); + if (!keysize) { + PKEY_DBF_ERR("%s protected key token: unknown keytype %u\n", + __func__, t->keytype); + goto out; + } + switch (t->keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + if (t->len != keysize + AES_WK_VP_SIZE || + keylen < sizeof(struct protaeskeytoken)) + goto out; + rc = pckmo_verify_protkey(t->protkey, t->len, + t->keytype); + if (rc) + goto out; + break; + default: + if (t->len != keysize + AES_WK_VP_SIZE || + keylen < sizeof(*t) + keysize + AES_WK_VP_SIZE) + goto out; + break; + } + memcpy(protkey, t->protkey, t->len); + *protkeylen = t->len; + *protkeytype = t->keytype; + rc = 0; + break; + } + case TOKVER_CLEAR_KEY: { + struct clearkeytoken *t = (struct clearkeytoken *)key; + u32 keysize; + + if (keylen < sizeof(*t) || + keylen < sizeof(*t) + t->len) + goto out; + keysize = pkey_keytype_to_size(t->keytype); + if (!keysize) { + PKEY_DBF_ERR("%s clear key token: unknown keytype %u\n", + __func__, t->keytype); + goto out; + } + if (t->len != keysize) { + PKEY_DBF_ERR("%s clear key token: invalid key len %u\n", + __func__, t->len); + goto out; + } + rc = pckmo_clr2protkey(t->keytype, t->clearkey, t->len, + protkey, protkeylen, protkeytype); + break; + } + default: + PKEY_DBF_ERR("%s unknown non-CCA token version %d\n", + __func__, hdr->version); + break; + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Generate a random protected key. + */ +static int pckmo_gen_protkey(u32 keytype, u32 subtype, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + u8 clrkey[128]; + int keysize; + int rc; + + keysize = pkey_keytype_to_size(keytype); + if (!keysize) { + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", + __func__, keytype); + return -EINVAL; + } + if (subtype != PKEY_TYPE_PROTKEY) { + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", + __func__, subtype); + return -EINVAL; + } + + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + case PKEY_KEYTYPE_AES_XTS_128: + case PKEY_KEYTYPE_AES_XTS_256: + case PKEY_KEYTYPE_HMAC_512: + case PKEY_KEYTYPE_HMAC_1024: + break; + default: + PKEY_DBF_ERR("%s unsupported keytype %d\n", + __func__, keytype); + return -EINVAL; + } + + /* generate a dummy random clear key */ + get_random_bytes(clrkey, keysize); + + /* convert it to a dummy protected key */ + rc = pckmo_clr2protkey(keytype, clrkey, keysize, + protkey, protkeylen, protkeytype); + if (rc) + goto out; + + /* replace the key part of the protected key with random bytes */ + get_random_bytes(protkey, keysize); + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Verify a protected key token blob. + */ +static int pckmo_verify_key(const u8 *key, u32 keylen) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + int rc = -EINVAL; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + if (hdr->type != TOKTYPE_NON_CCA) + return -EINVAL; + + switch (hdr->version) { + case TOKVER_PROTECTED_KEY: { + struct protkeytoken *t = (struct protkeytoken *)key; + u32 keysize; + + if (keylen < sizeof(*t)) + goto out; + keysize = pkey_keytype_to_size(t->keytype); + if (!keysize || t->len != keysize + AES_WK_VP_SIZE) + goto out; + switch (t->keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + if (keylen < sizeof(struct protaeskeytoken)) + goto out; + break; + default: + if (keylen < sizeof(*t) + keysize + AES_WK_VP_SIZE) + goto out; + break; + } + rc = pckmo_verify_protkey(t->protkey, t->len, t->keytype); + break; + } + default: + PKEY_DBF_ERR("%s unknown non-CCA token version %d\n", + __func__, hdr->version); + break; + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Wrapper functions used for the pkey handler struct + */ + +static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns, + size_t _nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *keyinfo, + u32 _xflags __always_unused) +{ + return pckmo_key2protkey(key, keylen, + protkey, protkeylen, keyinfo); +} + +static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns, + u32 keytype, u32 keysubtype, + u32 _keybitsize, u32 _flags, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, + u32 _xflags __always_unused) +{ + return pckmo_gen_protkey(keytype, keysubtype, + keybuf, keybuflen, keyinfo); +} + +static int pkey_pckmo_verifykey(const u8 *key, u32 keylen, + u16 *_card, u16 *_dom, + u32 *_keytype, u32 *_keybitsize, + u32 *_flags, u32 _xflags __always_unused) +{ + return pckmo_verify_key(key, keylen); +} + +static struct pkey_handler pckmo_handler = { + .module = THIS_MODULE, + .name = "PKEY PCKMO handler", + .is_supported_key = is_pckmo_key, + .is_supported_keytype = is_pckmo_keytype, + .key_to_protkey = pkey_pckmo_key2protkey, + .gen_key = pkey_pckmo_gen_key, + .verify_key = pkey_pckmo_verifykey, +}; + +/* + * Module init + */ +static int __init pkey_pckmo_init(void) +{ + cpacf_mask_t func_mask; + + /* + * The pckmo instruction should be available - even if we don't + * actually invoke it. This instruction comes with MSA 3 which + * is also the minimum level for the kmc instructions which + * are able to work with protected keys. + */ + if (!cpacf_query(CPACF_PCKMO, &func_mask)) + return -ENODEV; + + /* register this module as pkey handler for all the pckmo stuff */ + return pkey_handler_register(&pckmo_handler); +} + +/* + * Module exit + */ +static void __exit pkey_pckmo_exit(void) +{ + /* unregister this module as pkey handler */ + pkey_handler_unregister(&pckmo_handler); +} + +module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_pckmo_init); +module_exit(pkey_pckmo_exit); diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c new file mode 100644 index 000000000000..b6b0a46cb8a8 --- /dev/null +++ b/drivers/s390/crypto/pkey_sysfs.c @@ -0,0 +1,646 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pkey module sysfs related functions + * + * Copyright IBM Corp. 2024 + */ + +#define pr_fmt(fmt) "pkey: " fmt + +#include <linux/sysfs.h> + +#include "zcrypt_ccamisc.h" +#include "zcrypt_ep11misc.h" + +#include "pkey_base.h" + +/* + * Wrapper around pkey_handler_gen_key() which deals with the + * ENODEV return code and then tries to enforce a pkey handler + * module load. + */ +static int sys_pkey_handler_gen_key(u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo) +{ + int rc; + + rc = pkey_handler_gen_key(NULL, 0, + keytype, keysubtype, + keybitsize, flags, + keybuf, keybuflen, keyinfo, 0); + if (rc == -ENODEV) { + pkey_handler_request_modules(); + rc = pkey_handler_gen_key(NULL, 0, + keytype, keysubtype, + keybitsize, flags, + keybuf, keybuflen, keyinfo, 0); + } + + return rc; +} + +/* + * Sysfs attribute read function for all protected key binary attributes. + * The implementation can not deal with partial reads, because a new random + * protected key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + */ +static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf, + loff_t off, size_t count) +{ + struct protaeskeytoken protkeytoken; + struct pkey_protkey protkey; + int rc; + + if (off != 0 || count < sizeof(protkeytoken)) + return -EINVAL; + if (is_xts) + if (count < 2 * sizeof(protkeytoken)) + return -EINVAL; + + memset(&protkeytoken, 0, sizeof(protkeytoken)); + protkeytoken.type = TOKTYPE_NON_CCA; + protkeytoken.version = TOKVER_PROTECTED_KEY; + protkeytoken.keytype = keytype; + + protkey.len = sizeof(protkey.protkey); + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0, + protkey.protkey, &protkey.len, + &protkey.type); + if (rc) + return rc; + + protkeytoken.len = protkey.len; + memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); + + memcpy(buf, &protkeytoken, sizeof(protkeytoken)); + + if (is_xts) { + /* xts needs a second protected key, reuse protkey struct */ + protkey.len = sizeof(protkey.protkey); + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0, + protkey.protkey, &protkey.len, + &protkey.type); + if (rc) + return rc; + + protkeytoken.len = protkey.len; + memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); + + memcpy(buf + sizeof(protkeytoken), &protkeytoken, + sizeof(protkeytoken)); + + return 2 * sizeof(protkeytoken); + } + + return sizeof(protkeytoken); +} + +/* + * Sysfs attribute read function for the AES XTS prot key binary attributes. + * The implementation can not deal with partial reads, because a new random + * protected key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + */ +static ssize_t pkey_protkey_aes_xts_attr_read(u32 keytype, char *buf, + loff_t off, size_t count) +{ + struct protkeytoken *t = (struct protkeytoken *)buf; + u32 protlen, prottype; + int rc; + + switch (keytype) { + case PKEY_KEYTYPE_AES_XTS_128: + protlen = 64; + break; + case PKEY_KEYTYPE_AES_XTS_256: + protlen = 96; + break; + default: + return -EINVAL; + } + + if (off != 0 || count < sizeof(*t) + protlen) + return -EINVAL; + + memset(t, 0, sizeof(*t) + protlen); + t->type = TOKTYPE_NON_CCA; + t->version = TOKVER_PROTECTED_KEY; + t->keytype = keytype; + + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0, + t->protkey, &protlen, &prottype); + if (rc) + return rc; + + t->len = protlen; + + return sizeof(*t) + protlen; +} + +/* + * Sysfs attribute read function for the HMAC prot key binary attributes. + * The implementation can not deal with partial reads, because a new random + * protected key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + */ +static ssize_t pkey_protkey_hmac_attr_read(u32 keytype, char *buf, + loff_t off, size_t count) +{ + struct protkeytoken *t = (struct protkeytoken *)buf; + u32 protlen, prottype; + int rc; + + switch (keytype) { + case PKEY_KEYTYPE_HMAC_512: + protlen = 96; + break; + case PKEY_KEYTYPE_HMAC_1024: + protlen = 160; + break; + default: + return -EINVAL; + } + + if (off != 0 || count < sizeof(*t) + protlen) + return -EINVAL; + + memset(t, 0, sizeof(*t) + protlen); + t->type = TOKTYPE_NON_CCA; + t->version = TOKVER_PROTECTED_KEY; + t->keytype = keytype; + + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0, + t->protkey, &protlen, &prottype); + if (rc) + return rc; + + t->len = protlen; + + return sizeof(*t) + protlen; +} + +static ssize_t protkey_aes_128_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, + off, count); +} + +static ssize_t protkey_aes_192_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, + off, count); +} + +static ssize_t protkey_aes_256_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, + off, count); +} + +static ssize_t protkey_aes_128_xts_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, + off, count); +} + +static ssize_t protkey_aes_256_xts_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, + off, count); +} + +static ssize_t protkey_aes_xts_128_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_128, + buf, off, count); +} + +static ssize_t protkey_aes_xts_256_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_256, + buf, off, count); +} + +static ssize_t protkey_hmac_512_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_512, + buf, off, count); +} + +static ssize_t protkey_hmac_1024_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_1024, + buf, off, count); +} + +static const BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken)); +static const BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken)); +static const BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken)); +static const BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken)); +static const BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken)); +static const BIN_ATTR_RO(protkey_aes_xts_128, sizeof(struct protkeytoken) + 64); +static const BIN_ATTR_RO(protkey_aes_xts_256, sizeof(struct protkeytoken) + 96); +static const BIN_ATTR_RO(protkey_hmac_512, sizeof(struct protkeytoken) + 96); +static const BIN_ATTR_RO(protkey_hmac_1024, sizeof(struct protkeytoken) + 160); + +static const struct bin_attribute *const protkey_attrs[] = { + &bin_attr_protkey_aes_128, + &bin_attr_protkey_aes_192, + &bin_attr_protkey_aes_256, + &bin_attr_protkey_aes_128_xts, + &bin_attr_protkey_aes_256_xts, + &bin_attr_protkey_aes_xts_128, + &bin_attr_protkey_aes_xts_256, + &bin_attr_protkey_hmac_512, + &bin_attr_protkey_hmac_1024, + NULL +}; + +static const struct attribute_group protkey_attr_group = { + .name = "protkey", + .bin_attrs = protkey_attrs, +}; + +/* + * Sysfs attribute read function for all secure key ccadata binary attributes. + * The implementation can not deal with partial reads, because a new random + * protected key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + */ +static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf, + loff_t off, size_t count) +{ + struct pkey_seckey *seckey = (struct pkey_seckey *)buf; + u32 buflen; + int rc; + + if (off != 0 || count < sizeof(struct secaeskeytoken)) + return -EINVAL; + if (is_xts) + if (count < 2 * sizeof(struct secaeskeytoken)) + return -EINVAL; + + buflen = sizeof(seckey->seckey); + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0, + seckey->seckey, &buflen, NULL); + if (rc) + return rc; + + if (is_xts) { + seckey++; + buflen = sizeof(seckey->seckey); + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0, + seckey->seckey, &buflen, NULL); + if (rc) + return rc; + + return 2 * sizeof(struct secaeskeytoken); + } + + return sizeof(struct secaeskeytoken); +} + +static ssize_t ccadata_aes_128_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, + off, count); +} + +static ssize_t ccadata_aes_192_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, + off, count); +} + +static ssize_t ccadata_aes_256_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, + off, count); +} + +static ssize_t ccadata_aes_128_xts_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, + off, count); +} + +static ssize_t ccadata_aes_256_xts_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, + off, count); +} + +static const BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken)); +static const BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken)); +static const BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken)); +static const BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken)); +static const BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken)); + +static const struct bin_attribute *const ccadata_attrs[] = { + &bin_attr_ccadata_aes_128, + &bin_attr_ccadata_aes_192, + &bin_attr_ccadata_aes_256, + &bin_attr_ccadata_aes_128_xts, + &bin_attr_ccadata_aes_256_xts, + NULL +}; + +static const struct attribute_group ccadata_attr_group = { + .name = "ccadata", + .bin_attrs = ccadata_attrs, +}; + +#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80) + +/* + * Sysfs attribute read function for all secure key ccacipher binary attributes. + * The implementation can not deal with partial reads, because a new random + * secure key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + */ +static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits, + bool is_xts, char *buf, loff_t off, + size_t count) +{ + u32 keysize = CCACIPHERTOKENSIZE; + int rc; + + if (off != 0 || count < CCACIPHERTOKENSIZE) + return -EINVAL; + if (is_xts) + if (count < 2 * CCACIPHERTOKENSIZE) + return -EINVAL; + + memset(buf, 0, is_xts ? 2 * keysize : keysize); + + rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits), + PKEY_TYPE_CCA_CIPHER, keybits, 0, + buf, &keysize, NULL); + if (rc) + return rc; + + if (is_xts) { + keysize = CCACIPHERTOKENSIZE; + buf += CCACIPHERTOKENSIZE; + rc = sys_pkey_handler_gen_key( + pkey_aes_bitsize_to_keytype(keybits), + PKEY_TYPE_CCA_CIPHER, keybits, 0, + buf, &keysize, NULL); + if (rc) + return rc; + return 2 * CCACIPHERTOKENSIZE; + } + + return CCACIPHERTOKENSIZE; +} + +static ssize_t ccacipher_aes_128_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf, + off, count); +} + +static ssize_t ccacipher_aes_192_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf, + off, count); +} + +static ssize_t ccacipher_aes_256_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf, + off, count); +} + +static ssize_t ccacipher_aes_128_xts_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf, + off, count); +} + +static ssize_t ccacipher_aes_256_xts_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf, + off, count); +} + +static const BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE); +static const BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE); +static const BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE); +static const BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE); +static const BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE); + +static const struct bin_attribute *const ccacipher_attrs[] = { + &bin_attr_ccacipher_aes_128, + &bin_attr_ccacipher_aes_192, + &bin_attr_ccacipher_aes_256, + &bin_attr_ccacipher_aes_128_xts, + &bin_attr_ccacipher_aes_256_xts, + NULL +}; + +static const struct attribute_group ccacipher_attr_group = { + .name = "ccacipher", + .bin_attrs = ccacipher_attrs, +}; + +/* + * Sysfs attribute read function for all ep11 aes key binary attributes. + * The implementation can not deal with partial reads, because a new random + * secure key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + * This function and the sysfs attributes using it provide EP11 key blobs + * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently + * 336 bytes. + */ +static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits, + bool is_xts, char *buf, loff_t off, + size_t count) +{ + u32 keysize = MAXEP11AESKEYBLOBSIZE; + int rc; + + if (off != 0 || count < MAXEP11AESKEYBLOBSIZE) + return -EINVAL; + if (is_xts) + if (count < 2 * MAXEP11AESKEYBLOBSIZE) + return -EINVAL; + + memset(buf, 0, is_xts ? 2 * keysize : keysize); + + rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits), + PKEY_TYPE_EP11_AES, keybits, 0, + buf, &keysize, NULL); + if (rc) + return rc; + + if (is_xts) { + keysize = MAXEP11AESKEYBLOBSIZE; + buf += MAXEP11AESKEYBLOBSIZE; + rc = sys_pkey_handler_gen_key( + pkey_aes_bitsize_to_keytype(keybits), + PKEY_TYPE_EP11_AES, keybits, 0, + buf, &keysize, NULL); + if (rc) + return rc; + return 2 * MAXEP11AESKEYBLOBSIZE; + } + + return MAXEP11AESKEYBLOBSIZE; +} + +static ssize_t ep11_aes_128_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf, + off, count); +} + +static ssize_t ep11_aes_192_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf, + off, count); +} + +static ssize_t ep11_aes_256_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf, + off, count); +} + +static ssize_t ep11_aes_128_xts_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf, + off, count); +} + +static ssize_t ep11_aes_256_xts_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf, + off, count); +} + +static const BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE); +static const BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE); +static const BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE); +static const BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE); +static const BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE); + +static const struct bin_attribute *const ep11_attrs[] = { + &bin_attr_ep11_aes_128, + &bin_attr_ep11_aes_192, + &bin_attr_ep11_aes_256, + &bin_attr_ep11_aes_128_xts, + &bin_attr_ep11_aes_256_xts, + NULL +}; + +static const struct attribute_group ep11_attr_group = { + .name = "ep11", + .bin_attrs = ep11_attrs, +}; + +const struct attribute_group *pkey_attr_groups[] = { + &protkey_attr_group, + &ccadata_attr_group, + &ccacipher_attr_group, + &ep11_attr_group, + NULL, +}; diff --git a/drivers/s390/crypto/pkey_uv.c b/drivers/s390/crypto/pkey_uv.c new file mode 100644 index 000000000000..6cd3c49384b5 --- /dev/null +++ b/drivers/s390/crypto/pkey_uv.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pkey uv specific code + * + * Copyright IBM Corp. 2024 + */ + +#define pr_fmt(fmt) "pkey: " fmt + +#include <linux/cpufeature.h> +#include <linux/init.h> +#include <linux/module.h> +#include <asm/uv.h> + +#include "zcrypt_ccamisc.h" +#include "pkey_base.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("s390 protected key UV handler"); + +/* + * One pre-allocated uv_secret_list for use with uv_find_secret() + */ +static struct uv_secret_list *uv_list; +static DEFINE_MUTEX(uv_list_mutex); + +/* + * UV secret token struct and defines. + */ + +#define TOKVER_UV_SECRET 0x09 + +struct uvsecrettoken { + u8 type; /* 0x00 = TOKTYPE_NON_CCA */ + u8 res0[3]; + u8 version; /* 0x09 = TOKVER_UV_SECRET */ + u8 res1[3]; + u16 secret_type; /* one of enum uv_secret_types from uv.h */ + u16 secret_len; /* length in bytes of the secret */ + u8 secret_id[UV_SECRET_ID_LEN]; /* the secret id for this secret */ +} __packed; + +/* + * Check key blob for known and supported UV key. + */ +static bool is_uv_key(const u8 *key, u32 keylen) +{ + struct uvsecrettoken *t = (struct uvsecrettoken *)key; + + if (keylen < sizeof(*t)) + return false; + + switch (t->type) { + case TOKTYPE_NON_CCA: + switch (t->version) { + case TOKVER_UV_SECRET: + switch (t->secret_type) { + case UV_SECRET_AES_128: + case UV_SECRET_AES_192: + case UV_SECRET_AES_256: + case UV_SECRET_AES_XTS_128: + case UV_SECRET_AES_XTS_256: + case UV_SECRET_HMAC_SHA_256: + case UV_SECRET_HMAC_SHA_512: + case UV_SECRET_ECDSA_P256: + case UV_SECRET_ECDSA_P384: + case UV_SECRET_ECDSA_P521: + case UV_SECRET_ECDSA_ED25519: + case UV_SECRET_ECDSA_ED448: + return true; + default: + return false; + } + default: + return false; + } + default: + return false; + } +} + +static bool is_uv_keytype(enum pkey_key_type keytype) +{ + switch (keytype) { + case PKEY_TYPE_UVSECRET: + return true; + default: + return false; + } +} + +static int get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN], + struct uv_secret_list_item_hdr *secret) +{ + int rc; + + mutex_lock(&uv_list_mutex); + memset(uv_list, 0, sizeof(*uv_list)); + rc = uv_find_secret(secret_id, uv_list, secret); + mutex_unlock(&uv_list_mutex); + + return rc; +} + +static int retrieve_secret(const u8 secret_id[UV_SECRET_ID_LEN], + u16 *secret_type, u8 *buf, u32 *buflen) +{ + struct uv_secret_list_item_hdr secret_meta_data; + int rc; + + rc = get_secret_metadata(secret_id, &secret_meta_data); + if (rc) + return rc; + + if (*buflen < secret_meta_data.length) + return -EINVAL; + + rc = uv_retrieve_secret(secret_meta_data.index, + buf, secret_meta_data.length); + if (rc) + return rc; + + *secret_type = secret_meta_data.type; + *buflen = secret_meta_data.length; + + return 0; +} + +static int uv_get_size_and_type(u16 secret_type, u32 *pkeysize, u32 *pkeytype) +{ + int rc = 0; + + switch (secret_type) { + case UV_SECRET_AES_128: + *pkeysize = 16 + AES_WK_VP_SIZE; + *pkeytype = PKEY_KEYTYPE_AES_128; + break; + case UV_SECRET_AES_192: + *pkeysize = 24 + AES_WK_VP_SIZE; + *pkeytype = PKEY_KEYTYPE_AES_192; + break; + case UV_SECRET_AES_256: + *pkeysize = 32 + AES_WK_VP_SIZE; + *pkeytype = PKEY_KEYTYPE_AES_256; + break; + case UV_SECRET_AES_XTS_128: + *pkeysize = 16 + 16 + AES_WK_VP_SIZE; + *pkeytype = PKEY_KEYTYPE_AES_XTS_128; + break; + case UV_SECRET_AES_XTS_256: + *pkeysize = 32 + 32 + AES_WK_VP_SIZE; + *pkeytype = PKEY_KEYTYPE_AES_XTS_256; + break; + case UV_SECRET_HMAC_SHA_256: + *pkeysize = 64 + AES_WK_VP_SIZE; + *pkeytype = PKEY_KEYTYPE_HMAC_512; + break; + case UV_SECRET_HMAC_SHA_512: + *pkeysize = 128 + AES_WK_VP_SIZE; + *pkeytype = PKEY_KEYTYPE_HMAC_1024; + break; + case UV_SECRET_ECDSA_P256: + *pkeysize = 32 + AES_WK_VP_SIZE; + *pkeytype = PKEY_KEYTYPE_ECC_P256; + break; + case UV_SECRET_ECDSA_P384: + *pkeysize = 48 + AES_WK_VP_SIZE; + *pkeytype = PKEY_KEYTYPE_ECC_P384; + break; + case UV_SECRET_ECDSA_P521: + *pkeysize = 80 + AES_WK_VP_SIZE; + *pkeytype = PKEY_KEYTYPE_ECC_P521; + break; + case UV_SECRET_ECDSA_ED25519: + *pkeysize = 32 + AES_WK_VP_SIZE; + *pkeytype = PKEY_KEYTYPE_ECC_ED25519; + break; + case UV_SECRET_ECDSA_ED448: + *pkeysize = 64 + AES_WK_VP_SIZE; + *pkeytype = PKEY_KEYTYPE_ECC_ED448; + break; + default: + rc = -EINVAL; + } + + return rc; +} + +static int uv_key2protkey(const struct pkey_apqn *_apqns __always_unused, + size_t _nr_apqns __always_unused, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *keyinfo, + u32 _xflags __always_unused) +{ + struct uvsecrettoken *t = (struct uvsecrettoken *)key; + u32 pkeysize, pkeytype; + u16 secret_type; + int rc; + + rc = uv_get_size_and_type(t->secret_type, &pkeysize, &pkeytype); + if (rc) + goto out; + + if (*protkeylen < pkeysize) { + PKEY_DBF_ERR("%s prot key buffer size too small: %u < %u\n", + __func__, *protkeylen, pkeysize); + rc = -EINVAL; + goto out; + } + + rc = retrieve_secret(t->secret_id, &secret_type, protkey, protkeylen); + if (rc) { + PKEY_DBF_ERR("%s retrieve_secret() failed with %d\n", + __func__, rc); + goto out; + } + if (secret_type != t->secret_type) { + PKEY_DBF_ERR("%s retrieved secret type %u != expected type %u\n", + __func__, secret_type, t->secret_type); + rc = -EINVAL; + goto out; + } + + if (keyinfo) + *keyinfo = pkeytype; + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int uv_verifykey(const u8 *key, u32 keylen, + u16 *_card __always_unused, + u16 *_dom __always_unused, + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags __always_unused) +{ + struct uvsecrettoken *t = (struct uvsecrettoken *)key; + struct uv_secret_list_item_hdr secret_meta_data; + u32 pkeysize, pkeytype, bitsize; + int rc; + + rc = uv_get_size_and_type(t->secret_type, &pkeysize, &pkeytype); + if (rc) + goto out; + + rc = get_secret_metadata(t->secret_id, &secret_meta_data); + if (rc) + goto out; + + if (secret_meta_data.type != t->secret_type) { + rc = -EINVAL; + goto out; + } + + /* set keytype; keybitsize and flags are not supported */ + if (keytype) + *keytype = PKEY_TYPE_UVSECRET; + if (keybitsize) { + bitsize = 8 * pkey_keytype_to_size(pkeytype); + *keybitsize = bitsize ?: PKEY_SIZE_UNKNOWN; + } + if (flags) + *flags = pkeytype; + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static struct pkey_handler uv_handler = { + .module = THIS_MODULE, + .name = "PKEY UV handler", + .is_supported_key = is_uv_key, + .is_supported_keytype = is_uv_keytype, + .key_to_protkey = uv_key2protkey, + .verify_key = uv_verifykey, +}; + +/* + * Module init + */ +static int __init pkey_uv_init(void) +{ + int rc; + + if (!is_prot_virt_guest()) + return -ENODEV; + + if (!test_bit_inv(BIT_UVC_CMD_RETR_SECRET, uv_info.inst_calls_list)) + return -ENODEV; + + uv_list = kmalloc(sizeof(*uv_list), GFP_KERNEL); + if (!uv_list) + return -ENOMEM; + + rc = pkey_handler_register(&uv_handler); + if (rc) + kfree(uv_list); + + return rc; +} + +/* + * Module exit + */ +static void __exit pkey_uv_exit(void) +{ + pkey_handler_unregister(&uv_handler); + mutex_lock(&uv_list_mutex); + kvfree(uv_list); + mutex_unlock(&uv_list_mutex); +} + +module_cpu_feature_match(S390_CPU_FEATURE_UV, pkey_uv_init); +module_exit(pkey_uv_exit); diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index a5ab03e42ff1..67a807e2e75b 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -26,6 +26,18 @@ MODULE_LICENSE("GPL v2"); struct ap_matrix_dev *matrix_dev; debug_info_t *vfio_ap_dbf_info; +static ssize_t features_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "guest_matrix hotplug ap_config\n"); +} +static DEVICE_ATTR_RO(features); + +static struct attribute *matrix_dev_attrs[] = { + &dev_attr_features.attr, + NULL, +}; +ATTRIBUTE_GROUPS(matrix_dev); + /* Only type 10 adapters (CEX4 and later) are supported * by the AP matrix device driver */ @@ -60,7 +72,7 @@ static void vfio_ap_matrix_dev_release(struct device *dev) kfree(matrix_dev); } -static struct bus_type matrix_bus = { +static const struct bus_type matrix_bus = { .name = "matrix", }; @@ -68,6 +80,7 @@ static struct device_driver matrix_driver = { .name = "vfio_ap", .bus = &matrix_bus, .suppress_bind_attrs = true, + .dev_groups = matrix_dev_groups, }; static int vfio_ap_matrix_dev_create(void) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 0509f80622cd..48da32ad0493 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -32,7 +32,8 @@ #define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */ -static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable); +static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev); +static int vfio_ap_mdev_reset_qlist(struct list_head *qlist); static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); static const struct vfio_device_ops vfio_ap_matrix_dev_ops; static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q); @@ -353,16 +354,32 @@ static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib) if (!*nib) return -EINVAL; - if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT))) + if (!kvm_s390_is_gpa_in_memslot(vcpu->kvm, *nib)) return -EINVAL; return 0; } -static int ensure_nib_shared(unsigned long addr, struct gmap *gmap) +/** + * ensure_nib_shared() - Ensure the address of the NIB is secure and shared + * @addr: the physical (absolute) address of the NIB + * + * This function checks whether the NIB page, which has been pinned with + * vfio_pin_pages(), is a shared page belonging to a secure guest. + * + * It will call uv_pin_shared() on it; if the page was already pinned shared + * (i.e. if the NIB belongs to a secure guest and is shared), then 0 + * (success) is returned. If the NIB was not shared, vfio_pin_pages() had + * exported it and now it does not belong to the secure guest anymore. In + * that case, an error is returned. + * + * Context: the NIB (at physical address @addr) has to be pinned with + * vfio_pin_pages() before calling this function. + * + * Return: 0 in case of success, otherwise an error < 0. + */ +static int ensure_nib_shared(unsigned long addr) { - int ret; - /* * The nib has to be located in shared storage since guest and * host access it. vfio_pin_pages() will do a pin shared and @@ -373,12 +390,7 @@ static int ensure_nib_shared(unsigned long addr, struct gmap *gmap) * * If the page is already pinned shared the UV will return a success. */ - ret = uv_pin_shared(addr); - if (ret) { - /* vfio_pin_pages() likely exported the page so let's re-import */ - gmap_convert_to_secure(gmap, addr); - } - return ret; + return uv_pin_shared(addr); } /** @@ -393,8 +405,8 @@ static int ensure_nib_shared(unsigned long addr, struct gmap *gmap) * Register the guest ISC to GIB interface and retrieve the * host ISC to issue the host side PQAP/AQIC * - * Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the - * vfio_pin_pages failed. + * status.response_code may be set to AP_RESPONSE_INVALID_ADDRESS in case the + * vfio_pin_pages or kvm_s390_gisc_register failed. * * Otherwise return the ap_queue_status returned by the ap_aqic(), * all retry handling will be done by the guest. @@ -424,6 +436,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, return status; } + /* The pin will probably be successful even if the NIB was not shared */ ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1, IOMMU_READ | IOMMU_WRITE, &h_page); switch (ret) { @@ -446,7 +459,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, /* NIB in non-shared storage is a rc 6 for PV guests */ if (kvm_s390_pv_cpu_is_protected(vcpu) && - ensure_nib_shared(h_nib & PAGE_MASK, kvm->arch.gmap)) { + ensure_nib_shared(h_nib & PAGE_MASK)) { vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); status.response_code = AP_RESPONSE_INVALID_ADDRESS; return status; @@ -457,7 +470,8 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n", __func__, nisc, isc, q->apqn); - status.response_code = AP_RESPONSE_INVALID_GISA; + vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); + status.response_code = AP_RESPONSE_INVALID_ADDRESS; return status; } @@ -475,8 +489,11 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, break; case AP_RESPONSE_OTHERWISE_CHANGED: /* We could not modify IRQ settings: clear new configuration */ + ret = kvm_s390_gisc_unregister(kvm, isc); + if (ret) + VFIO_AP_DBF_WARN("%s: kvm_s390_gisc_unregister: rc=%d isc=%d, apqn=%#04x\n", + __func__, ret, isc, q->apqn); vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); - kvm_s390_gisc_unregister(kvm, isc); break; default: pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn, @@ -633,13 +650,22 @@ static void vfio_ap_matrix_init(struct ap_config_info *info, matrix->adm_max = info->apxa ? info->nd : 15; } +static void signal_guest_ap_cfg_changed(struct ap_matrix_mdev *matrix_mdev) +{ + if (matrix_mdev->cfg_chg_trigger) + eventfd_signal(matrix_mdev->cfg_chg_trigger); +} + static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev) { - if (matrix_mdev->kvm) + if (matrix_mdev->kvm) { kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->shadow_apcb.apm, matrix_mdev->shadow_apcb.aqm, matrix_mdev->shadow_apcb.adm); + + signal_guest_ap_cfg_changed(matrix_mdev); + } } static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) @@ -654,6 +680,21 @@ static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) AP_DOMAINS); } +static bool _queue_passable(struct vfio_ap_queue *q) +{ + if (!q) + return false; + + switch (q->reset_status.response_code) { + case AP_RESPONSE_NORMAL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + return true; + default: + return false; + } +} + /* * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev * to ensure no queue devices are passed through to @@ -661,26 +702,32 @@ static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) * device driver. * * @matrix_mdev: the matrix mdev whose matrix is to be filtered. + * @apm_filtered: a 256-bit bitmap for storing the APIDs filtered from the + * guest's AP configuration that are still in the host's AP + * configuration. * * Note: If an APQN referencing a queue device that is not bound to the vfio_ap * driver, its APID will be filtered from the guest's APCB. The matrix * structure precludes filtering an individual APQN, so its APID will be - * filtered. + * filtered. Consequently, all queues associated with the adapter that + * are in the host's AP configuration must be reset. If queues are + * subsequently made available again to the guest, they should re-appear + * in a reset state * * Return: a boolean value indicating whether the KVM guest's APCB was changed * by the filtering or not. */ -static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm, - struct ap_matrix_mdev *matrix_mdev) +static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev, + unsigned long *apm_filtered) { unsigned long apid, apqi, apqn; DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES); DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS); - struct vfio_ap_queue *q; bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES); bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS); vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); + bitmap_clear(apm_filtered, 0, AP_DEVICES); /* * Copy the adapters, domains and control domains to the shadow_apcb @@ -692,8 +739,9 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm, bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm, (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS); - for_each_set_bit_inv(apid, apm, AP_DEVICES) { - for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { + for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) { + for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, + AP_DOMAINS) { /* * If the APQN is not bound to the vfio_ap device * driver, then we can't assign it to the guest's @@ -703,10 +751,17 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm, * hardware device. */ apqn = AP_MKQID(apid, apqi); - q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); - if (!q || q->reset_status.response_code) { - clear_bit_inv(apid, - matrix_mdev->shadow_apcb.apm); + if (!_queue_passable(vfio_ap_mdev_get_queue(matrix_mdev, apqn))) { + clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); + + /* + * If the adapter was previously plugged into + * the guest, let's let the caller know that + * the APID was filtered. + */ + if (test_bit_inv(apid, prev_shadow_apm)) + set_bit_inv(apid, apm_filtered); + break; } } @@ -746,6 +801,7 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev) if (ret) goto err_put_vdev; matrix_mdev->req_trigger = NULL; + matrix_mdev->cfg_chg_trigger = NULL; dev_set_drvdata(&mdev->dev, matrix_mdev); mutex_lock(&matrix_dev->mdevs_lock); list_add(&matrix_mdev->node, &matrix_dev->mdev_list); @@ -760,10 +816,11 @@ err_put_vdev: static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev, struct vfio_ap_queue *q) { - if (q) { - q->matrix_mdev = matrix_mdev; - hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn); - } + if (!q || vfio_ap_mdev_get_queue(matrix_mdev, q->apqn)) + return; + + q->matrix_mdev = matrix_mdev; + hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn); } static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn) @@ -808,7 +865,7 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev) mutex_lock(&matrix_dev->guests_lock); mutex_lock(&matrix_dev->mdevs_lock); - vfio_ap_mdev_reset_queues(&matrix_mdev->qtable); + vfio_ap_mdev_reset_queues(matrix_mdev); vfio_ap_mdev_unlink_fr_queues(matrix_mdev); list_del(&matrix_mdev->node); mutex_unlock(&matrix_dev->mdevs_lock); @@ -816,48 +873,66 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev) vfio_put_device(&matrix_mdev->vdev); } -#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \ - "already assigned to %s" +#define MDEV_SHARING_ERR "Userspace may not assign queue %02lx.%04lx to mdev: already assigned to %s" + +#define MDEV_IN_USE_ERR "Can not reserve queue %02lx.%04lx for host driver: in use by mdev" + +static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *assignee, + struct ap_matrix_mdev *assigned_to, + unsigned long *apm, unsigned long *aqm) +{ + unsigned long apid, apqi; + + for_each_set_bit_inv(apid, apm, AP_DEVICES) { + for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { + dev_warn(mdev_dev(assignee->mdev), MDEV_SHARING_ERR, + apid, apqi, dev_name(mdev_dev(assigned_to->mdev))); + } + } +} -static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev, - unsigned long *apm, - unsigned long *aqm) +static void vfio_ap_mdev_log_in_use_err(struct ap_matrix_mdev *assignee, + unsigned long *apm, unsigned long *aqm) { unsigned long apid, apqi; - const struct device *dev = mdev_dev(matrix_mdev->mdev); - const char *mdev_name = dev_name(dev); - for_each_set_bit_inv(apid, apm, AP_DEVICES) + for_each_set_bit_inv(apid, apm, AP_DEVICES) { for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) - dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name); + dev_warn(mdev_dev(assignee->mdev), MDEV_IN_USE_ERR, apid, apqi); + } } /** * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs * + * @assignee: the matrix mdev to which @mdev_apm and @mdev_aqm are being + * assigned; or, NULL if this function was called by the AP bus + * driver in_use callback to verify none of the APQNs being reserved + * for the host device driver are in use by a vfio_ap mediated device * @mdev_apm: mask indicating the APIDs of the APQNs to be verified * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified * - * Verifies that each APQN derived from the Cartesian product of a bitmap of - * AP adapter IDs and AP queue indexes is not configured for any matrix - * mediated device. AP queue sharing is not allowed. + * Verifies that each APQN derived from the Cartesian product of APIDs + * represented by the bits set in @mdev_apm and the APQIs of the bits set in + * @mdev_aqm is not assigned to a mediated device other than the mdev to which + * the APQN is being assigned (@assignee). AP queue sharing is not allowed. * * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE. */ -static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, +static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *assignee, + unsigned long *mdev_apm, unsigned long *mdev_aqm) { - struct ap_matrix_mdev *matrix_mdev; + struct ap_matrix_mdev *assigned_to; DECLARE_BITMAP(apm, AP_DEVICES); DECLARE_BITMAP(aqm, AP_DOMAINS); - list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { + list_for_each_entry(assigned_to, &matrix_dev->mdev_list, node) { /* - * If the input apm and aqm are fields of the matrix_mdev - * object, then move on to the next matrix_mdev. + * If the mdev to which the mdev_apm and mdev_aqm is being + * assigned is the same as the mdev being verified */ - if (mdev_apm == matrix_mdev->matrix.apm && - mdev_aqm == matrix_mdev->matrix.aqm) + if (assignee == assigned_to) continue; memset(apm, 0, sizeof(apm)); @@ -867,15 +942,16 @@ static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, * We work on full longs, as we can only exclude the leftover * bits in non-inverse order. The leftover is all zeros. */ - if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm, - AP_DEVICES)) + if (!bitmap_and(apm, mdev_apm, assigned_to->matrix.apm, AP_DEVICES)) continue; - if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm, - AP_DOMAINS)) + if (!bitmap_and(aqm, mdev_aqm, assigned_to->matrix.aqm, AP_DOMAINS)) continue; - vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm); + if (assignee) + vfio_ap_mdev_log_sharing_err(assignee, assigned_to, apm, aqm); + else + vfio_ap_mdev_log_in_use_err(assigned_to, apm, aqm); return -EADDRINUSE; } @@ -892,7 +968,7 @@ static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, * * Return: One of the following values: * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function, - * most likely -EBUSY indicating the ap_perms_mutex lock is already held. + * most likely -EBUSY indicating the ap_attr_mutex lock is already held. * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the * zcrypt default driver. * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev @@ -904,7 +980,8 @@ static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev) matrix_mdev->matrix.aqm)) return -EADDRNOTAVAIL; - return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm, + return vfio_ap_mdev_verify_no_sharing(matrix_mdev, + matrix_mdev->matrix.apm, matrix_mdev->matrix.aqm); } @@ -918,6 +995,47 @@ static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev, AP_MKQID(apid, apqi)); } +static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev, + unsigned long apid, + struct list_head *qlist) +{ + struct vfio_ap_queue *q; + unsigned long apqi; + + for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) { + q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); + if (q) + list_add_tail(&q->reset_qnode, qlist); + } +} + +static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev, + unsigned long apid) +{ + struct list_head qlist; + + INIT_LIST_HEAD(&qlist); + collect_queues_to_reset(matrix_mdev, apid, &qlist); + vfio_ap_mdev_reset_qlist(&qlist); +} + +static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev, + unsigned long *apm_reset) +{ + struct list_head qlist; + unsigned long apid; + + if (bitmap_empty(apm_reset, AP_DEVICES)) + return 0; + + INIT_LIST_HEAD(&qlist); + + for_each_set_bit_inv(apid, apm_reset, AP_DEVICES) + collect_queues_to_reset(matrix_mdev, apid, &qlist); + + return vfio_ap_mdev_reset_qlist(&qlist); +} + /** * assign_adapter_store - parses the APID from @buf and sets the * corresponding bit in the mediated matrix device's APM @@ -958,10 +1076,10 @@ static ssize_t assign_adapter_store(struct device *dev, { int ret; unsigned long apid; - DECLARE_BITMAP(apm_delta, AP_DEVICES); + DECLARE_BITMAP(apm_filtered, AP_DEVICES); struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - mutex_lock(&ap_perms_mutex); + mutex_lock(&ap_attr_mutex); get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &apid); @@ -987,17 +1105,16 @@ static ssize_t assign_adapter_store(struct device *dev, } vfio_ap_mdev_link_adapter(matrix_mdev, apid); - memset(apm_delta, 0, sizeof(apm_delta)); - set_bit_inv(apid, apm_delta); - if (vfio_ap_mdev_filter_matrix(apm_delta, - matrix_mdev->matrix.aqm, matrix_mdev)) + if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { vfio_ap_mdev_update_guest_apcb(matrix_mdev); + reset_queues_for_apids(matrix_mdev, apm_filtered); + } ret = count; done: release_update_locks_for_mdev(matrix_mdev); - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); return ret; } @@ -1023,11 +1140,12 @@ static struct vfio_ap_queue * adapter was assigned. * @matrix_mdev: the matrix mediated device to which the adapter was assigned. * @apid: the APID of the unassigned adapter. - * @qtable: table for storing queues associated with unassigned adapter. + * @qlist: list for storing queues associated with unassigned adapter that + * need to be reset. */ static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev, unsigned long apid, - struct ap_queue_table *qtable) + struct list_head *qlist) { unsigned long apqi; struct vfio_ap_queue *q; @@ -1035,38 +1153,53 @@ static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev, for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) { q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); - if (q && qtable) { + if (q && qlist) { if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) - hash_add(qtable->queues, &q->mdev_qnode, - q->apqn); + list_add_tail(&q->reset_qnode, qlist); } } } -static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev, - unsigned long apid) +static void vfio_ap_mdev_hot_unplug_adapters(struct ap_matrix_mdev *matrix_mdev, + unsigned long *apids) { - int loop_cursor; - struct vfio_ap_queue *q; - struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL); + struct vfio_ap_queue *q, *tmpq; + struct list_head qlist; + unsigned long apid; + bool apcb_update = false; - hash_init(qtable->queues); - vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable); + INIT_LIST_HEAD(&qlist); - if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) { - clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); - vfio_ap_mdev_update_guest_apcb(matrix_mdev); + for_each_set_bit_inv(apid, apids, AP_DEVICES) { + vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist); + + if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) { + clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); + apcb_update = true; + } } - vfio_ap_mdev_reset_queues(qtable); + /* Only update apcb if needed to avoid impacting guest */ + if (apcb_update) + vfio_ap_mdev_update_guest_apcb(matrix_mdev); + + vfio_ap_mdev_reset_qlist(&qlist); - hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { + list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) { vfio_ap_unlink_mdev_fr_queue(q); - hash_del(&q->mdev_qnode); + list_del(&q->reset_qnode); } +} + +static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev, + unsigned long apid) +{ + DECLARE_BITMAP(apids, AP_DEVICES); - kfree(qtable); + bitmap_zero(apids, AP_DEVICES); + set_bit_inv(apid, apids); + vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, apids); } /** @@ -1167,10 +1300,10 @@ static ssize_t assign_domain_store(struct device *dev, { int ret; unsigned long apqi; - DECLARE_BITMAP(aqm_delta, AP_DOMAINS); + DECLARE_BITMAP(apm_filtered, AP_DEVICES); struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); - mutex_lock(&ap_perms_mutex); + mutex_lock(&ap_attr_mutex); get_update_locks_for_mdev(matrix_mdev); ret = kstrtoul(buf, 0, &apqi); @@ -1196,17 +1329,16 @@ static ssize_t assign_domain_store(struct device *dev, } vfio_ap_mdev_link_domain(matrix_mdev, apqi); - memset(aqm_delta, 0, sizeof(aqm_delta)); - set_bit_inv(apqi, aqm_delta); - if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta, - matrix_mdev)) + if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { vfio_ap_mdev_update_guest_apcb(matrix_mdev); + reset_queues_for_apids(matrix_mdev, apm_filtered); + } ret = count; done: release_update_locks_for_mdev(matrix_mdev); - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); return ret; } @@ -1214,7 +1346,7 @@ static DEVICE_ATTR_WO(assign_domain); static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev, unsigned long apqi, - struct ap_queue_table *qtable) + struct list_head *qlist) { unsigned long apid; struct vfio_ap_queue *q; @@ -1222,38 +1354,53 @@ static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev, for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); - if (q && qtable) { + if (q && qlist) { if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) - hash_add(qtable->queues, &q->mdev_qnode, - q->apqn); + list_add_tail(&q->reset_qnode, qlist); } } } -static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev, - unsigned long apqi) +static void vfio_ap_mdev_hot_unplug_domains(struct ap_matrix_mdev *matrix_mdev, + unsigned long *apqis) { - int loop_cursor; - struct vfio_ap_queue *q; - struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL); + struct vfio_ap_queue *q, *tmpq; + struct list_head qlist; + unsigned long apqi; + bool apcb_update = false; - hash_init(qtable->queues); - vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable); + INIT_LIST_HEAD(&qlist); - if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { - clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm); - vfio_ap_mdev_update_guest_apcb(matrix_mdev); + for_each_set_bit_inv(apqi, apqis, AP_DOMAINS) { + vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist); + + if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { + clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm); + apcb_update = true; + } } - vfio_ap_mdev_reset_queues(qtable); + /* Only update apcb if needed to avoid impacting guest */ + if (apcb_update) + vfio_ap_mdev_update_guest_apcb(matrix_mdev); - hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { + vfio_ap_mdev_reset_qlist(&qlist); + + list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) { vfio_ap_unlink_mdev_fr_queue(q); - hash_del(&q->mdev_qnode); + list_del(&q->reset_qnode); } +} + +static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev, + unsigned long apqi) +{ + DECLARE_BITMAP(apqis, AP_DOMAINS); - kfree(qtable); + bitmap_zero(apqis, AP_DEVICES); + set_bit_inv(apqi, apqis); + vfio_ap_mdev_hot_unplug_domains(matrix_mdev, apqis); } /** @@ -1416,18 +1563,13 @@ static ssize_t control_domains_show(struct device *dev, char *buf) { unsigned long id; - int nchars = 0; - int n; - char *bufpos = buf; struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); unsigned long max_domid = matrix_mdev->matrix.adm_max; + int nchars = 0; mutex_lock(&matrix_dev->mdevs_lock); - for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) { - n = sprintf(bufpos, "%04lx\n", id); - bufpos += n; - nchars += n; - } + for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) + nchars += sysfs_emit_at(buf, nchars, "%04lx\n", id); mutex_unlock(&matrix_dev->mdevs_lock); return nchars; @@ -1436,7 +1578,6 @@ static DEVICE_ATTR_RO(control_domains); static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf) { - char *bufpos = buf; unsigned long apid; unsigned long apqi; unsigned long apid1; @@ -1444,33 +1585,21 @@ static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf) unsigned long napm_bits = matrix->apm_max + 1; unsigned long naqm_bits = matrix->aqm_max + 1; int nchars = 0; - int n; apid1 = find_first_bit_inv(matrix->apm, napm_bits); apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits); if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) { for_each_set_bit_inv(apid, matrix->apm, napm_bits) { - for_each_set_bit_inv(apqi, matrix->aqm, - naqm_bits) { - n = sprintf(bufpos, "%02lx.%04lx\n", apid, - apqi); - bufpos += n; - nchars += n; - } + for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) + nchars += sysfs_emit_at(buf, nchars, "%02lx.%04lx\n", apid, apqi); } } else if (apid1 < napm_bits) { - for_each_set_bit_inv(apid, matrix->apm, napm_bits) { - n = sprintf(bufpos, "%02lx.\n", apid); - bufpos += n; - nchars += n; - } + for_each_set_bit_inv(apid, matrix->apm, napm_bits) + nchars += sysfs_emit_at(buf, nchars, "%02lx.\n", apid); } else if (apqi1 < naqm_bits) { - for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) { - n = sprintf(bufpos, ".%04lx\n", apqi); - bufpos += n; - nchars += n; - } + for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) + nchars += sysfs_emit_at(buf, nchars, ".%04lx\n", apqi); } return nchars; @@ -1504,6 +1633,158 @@ static ssize_t guest_matrix_show(struct device *dev, } static DEVICE_ATTR_RO(guest_matrix); +static ssize_t write_ap_bitmap(unsigned long *bitmap, char *buf, int offset, char sep) +{ + return sysfs_emit_at(buf, offset, "0x%016lx%016lx%016lx%016lx%c", + bitmap[0], bitmap[1], bitmap[2], bitmap[3], sep); +} + +static ssize_t ap_config_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); + int idx = 0; + + idx += write_ap_bitmap(matrix_mdev->matrix.apm, buf, idx, ','); + idx += write_ap_bitmap(matrix_mdev->matrix.aqm, buf, idx, ','); + idx += write_ap_bitmap(matrix_mdev->matrix.adm, buf, idx, '\n'); + + return idx; +} + +/* Number of characters needed for a complete hex mask representing the bits in .. */ +#define AP_DEVICES_STRLEN (AP_DEVICES / 4 + 3) +#define AP_DOMAINS_STRLEN (AP_DOMAINS / 4 + 3) +#define AP_CONFIG_STRLEN (AP_DEVICES_STRLEN + 2 * AP_DOMAINS_STRLEN) + +static int parse_bitmap(char **strbufptr, unsigned long *bitmap, int nbits) +{ + char *curmask; + + curmask = strsep(strbufptr, ",\n"); + if (!curmask) + return -EINVAL; + + bitmap_clear(bitmap, 0, nbits); + return ap_hex2bitmap(curmask, bitmap, nbits); +} + +static int ap_matrix_overflow_check(struct ap_matrix_mdev *matrix_mdev) +{ + unsigned long bit; + + for_each_set_bit_inv(bit, matrix_mdev->matrix.apm, AP_DEVICES) { + if (bit > matrix_mdev->matrix.apm_max) + return -ENODEV; + } + + for_each_set_bit_inv(bit, matrix_mdev->matrix.aqm, AP_DOMAINS) { + if (bit > matrix_mdev->matrix.aqm_max) + return -ENODEV; + } + + for_each_set_bit_inv(bit, matrix_mdev->matrix.adm, AP_DOMAINS) { + if (bit > matrix_mdev->matrix.adm_max) + return -ENODEV; + } + + return 0; +} + +static void ap_matrix_copy(struct ap_matrix *dst, struct ap_matrix *src) +{ + /* This check works around false positive gcc -Wstringop-overread */ + if (!src) + return; + + bitmap_copy(dst->apm, src->apm, AP_DEVICES); + bitmap_copy(dst->aqm, src->aqm, AP_DOMAINS); + bitmap_copy(dst->adm, src->adm, AP_DOMAINS); +} + +static ssize_t ap_config_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); + struct ap_matrix m_new, m_old, m_added, m_removed; + DECLARE_BITMAP(apm_filtered, AP_DEVICES); + unsigned long newbit; + char *newbuf, *rest; + int rc = count; + bool do_update; + + newbuf = kstrndup(buf, AP_CONFIG_STRLEN, GFP_KERNEL); + if (!newbuf) + return -ENOMEM; + rest = newbuf; + + mutex_lock(&ap_attr_mutex); + get_update_locks_for_mdev(matrix_mdev); + + /* Save old state */ + ap_matrix_copy(&m_old, &matrix_mdev->matrix); + if (parse_bitmap(&rest, m_new.apm, AP_DEVICES) || + parse_bitmap(&rest, m_new.aqm, AP_DOMAINS) || + parse_bitmap(&rest, m_new.adm, AP_DOMAINS)) { + rc = -EINVAL; + goto out; + } + + bitmap_andnot(m_removed.apm, m_old.apm, m_new.apm, AP_DEVICES); + bitmap_andnot(m_removed.aqm, m_old.aqm, m_new.aqm, AP_DOMAINS); + bitmap_andnot(m_added.apm, m_new.apm, m_old.apm, AP_DEVICES); + bitmap_andnot(m_added.aqm, m_new.aqm, m_old.aqm, AP_DOMAINS); + + /* Need new bitmaps in matrix_mdev for validation */ + ap_matrix_copy(&matrix_mdev->matrix, &m_new); + + /* Ensure new state is valid, else undo new state */ + rc = vfio_ap_mdev_validate_masks(matrix_mdev); + if (rc) { + ap_matrix_copy(&matrix_mdev->matrix, &m_old); + goto out; + } + rc = ap_matrix_overflow_check(matrix_mdev); + if (rc) { + ap_matrix_copy(&matrix_mdev->matrix, &m_old); + goto out; + } + rc = count; + + /* Need old bitmaps in matrix_mdev for unplug/unlink */ + ap_matrix_copy(&matrix_mdev->matrix, &m_old); + + /* Unlink removed adapters/domains */ + vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, m_removed.apm); + vfio_ap_mdev_hot_unplug_domains(matrix_mdev, m_removed.aqm); + + /* Need new bitmaps in matrix_mdev for linking new adapters/domains */ + ap_matrix_copy(&matrix_mdev->matrix, &m_new); + + /* Link newly added adapters */ + for_each_set_bit_inv(newbit, m_added.apm, AP_DEVICES) + vfio_ap_mdev_link_adapter(matrix_mdev, newbit); + + for_each_set_bit_inv(newbit, m_added.aqm, AP_DOMAINS) + vfio_ap_mdev_link_domain(matrix_mdev, newbit); + + /* filter resources not bound to vfio-ap */ + do_update = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered); + do_update |= vfio_ap_mdev_filter_cdoms(matrix_mdev); + + /* Apply changes to shadow apbc if things changed */ + if (do_update) { + vfio_ap_mdev_update_guest_apcb(matrix_mdev); + reset_queues_for_apids(matrix_mdev, apm_filtered); + } +out: + release_update_locks_for_mdev(matrix_mdev); + mutex_unlock(&ap_attr_mutex); + kfree(newbuf); + return rc; +} +static DEVICE_ATTR_RW(ap_config); + static struct attribute *vfio_ap_mdev_attrs[] = { &dev_attr_assign_adapter.attr, &dev_attr_unassign_adapter.attr, @@ -1511,6 +1792,7 @@ static struct attribute *vfio_ap_mdev_attrs[] = { &dev_attr_unassign_domain.attr, &dev_attr_assign_control_domain.attr, &dev_attr_unassign_control_domain.attr, + &dev_attr_ap_config.attr, &dev_attr_control_domains.attr, &dev_attr_matrix.attr, &dev_attr_guest_matrix.attr, @@ -1608,7 +1890,7 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) get_update_locks_for_kvm(kvm); kvm_arch_crypto_clear_masks(kvm); - vfio_ap_mdev_reset_queues(&matrix_mdev->qtable); + vfio_ap_mdev_reset_queues(matrix_mdev); kvm_put_kvm(kvm); matrix_mdev->kvm = NULL; @@ -1638,6 +1920,7 @@ static int apq_status_check(int apqn, struct ap_queue_status *status) switch (status->response_code) { case AP_RESPONSE_NORMAL: case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: return 0; case AP_RESPONSE_RESET_IN_PROGRESS: case AP_RESPONSE_BUSY: @@ -1694,14 +1977,6 @@ static void apq_reset_check(struct work_struct *reset_work) memcpy(&q->reset_status, &status, sizeof(status)); continue; } - /* - * When an AP adapter is deconfigured, the - * associated queues are reset, so let's set the - * status response code to 0 so the queue may be - * passed through (i.e., not filtered) - */ - if (status.response_code == AP_RESPONSE_DECONFIGURED) - q->reset_status.response_code = 0; if (q->saved_isc != VFIO_AP_ISC_INVALID) vfio_ap_free_aqic_resources(q); break; @@ -1728,12 +2003,7 @@ static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q) queue_work(system_long_wq, &q->reset_work); break; case AP_RESPONSE_DECONFIGURED: - /* - * When an AP adapter is deconfigured, the associated - * queues are reset, so let's set the status response code to 0 - * so the queue may be passed through (i.e., not filtered). - */ - q->reset_status.response_code = 0; + case AP_RESPONSE_CHECKSTOPPED: vfio_ap_free_aqic_resources(q); break; default: @@ -1744,15 +2014,33 @@ static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q) } } -static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable) +static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev) { int ret = 0, loop_cursor; struct vfio_ap_queue *q; - hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) + hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) vfio_ap_mdev_reset_queue(q); - hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { + hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) { + flush_work(&q->reset_work); + + if (q->reset_status.response_code) + ret = -EIO; + } + + return ret; +} + +static int vfio_ap_mdev_reset_qlist(struct list_head *qlist) +{ + int ret = 0; + struct vfio_ap_queue *q; + + list_for_each_entry(q, qlist, reset_qnode) + vfio_ap_mdev_reset_queue(q); + + list_for_each_entry(q, qlist, reset_qnode) { flush_work(&q->reset_work); if (q->reset_status.response_code) @@ -1788,17 +2076,26 @@ static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count) matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev); + get_update_locks_for_mdev(matrix_mdev); + + if (matrix_mdev->kvm) { + kvm_arch_crypto_clear_masks(matrix_mdev->kvm); + signal_guest_ap_cfg_changed(matrix_mdev); + } + if (matrix_mdev->req_trigger) { if (!(count % 10)) dev_notice_ratelimited(dev, "Relaying device request to user (#%u)\n", count); - eventfd_signal(matrix_mdev->req_trigger, 1); + eventfd_signal(matrix_mdev->req_trigger); } else if (count == 0) { dev_notice(dev, "No device request registered, blocked until released by user\n"); } + + release_update_locks_for_mdev(matrix_mdev); } static int vfio_ap_mdev_get_device_info(unsigned long arg) @@ -1839,6 +2136,10 @@ static ssize_t vfio_ap_get_irq_info(unsigned long arg) info.count = 1; info.flags = VFIO_IRQ_INFO_EVENTFD; break; + case VFIO_AP_CFG_CHG_IRQ_INDEX: + info.count = 1; + info.flags = VFIO_IRQ_INFO_EVENTFD; + break; default: return -EINVAL; } @@ -1902,6 +2203,39 @@ static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev, return 0; } +static int vfio_ap_set_cfg_change_irq(struct ap_matrix_mdev *matrix_mdev, unsigned long arg) +{ + s32 fd; + void __user *data; + unsigned long minsz; + struct eventfd_ctx *cfg_chg_trigger; + + minsz = offsetofend(struct vfio_irq_set, count); + data = (void __user *)(arg + minsz); + + if (get_user(fd, (s32 __user *)data)) + return -EFAULT; + + if (fd == -1) { + if (matrix_mdev->cfg_chg_trigger) + eventfd_ctx_put(matrix_mdev->cfg_chg_trigger); + matrix_mdev->cfg_chg_trigger = NULL; + } else if (fd >= 0) { + cfg_chg_trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(cfg_chg_trigger)) + return PTR_ERR(cfg_chg_trigger); + + if (matrix_mdev->cfg_chg_trigger) + eventfd_ctx_put(matrix_mdev->cfg_chg_trigger); + + matrix_mdev->cfg_chg_trigger = cfg_chg_trigger; + } else { + return -EINVAL; + } + + return 0; +} + static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev, unsigned long arg) { @@ -1917,6 +2251,8 @@ static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev, switch (irq_set.index) { case VFIO_AP_REQ_IRQ_INDEX: return vfio_ap_set_request_irq(matrix_mdev, arg); + case VFIO_AP_CFG_CHG_IRQ_INDEX: + return vfio_ap_set_cfg_change_irq(matrix_mdev, arg); default: return -EINVAL; } @@ -1938,11 +2274,11 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, ret = vfio_ap_mdev_get_device_info(arg); break; case VFIO_DEVICE_RESET: - ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable); + ret = vfio_ap_mdev_reset_queues(matrix_mdev); break; case VFIO_DEVICE_GET_IRQ_INFO: - ret = vfio_ap_get_irq_info(arg); - break; + ret = vfio_ap_get_irq_info(arg); + break; case VFIO_DEVICE_SET_IRQS: ret = vfio_ap_set_irqs(matrix_mdev, arg); break; @@ -1976,6 +2312,7 @@ static ssize_t status_show(struct device *dev, { ssize_t nchars = 0; struct vfio_ap_queue *q; + unsigned long apid, apqi; struct ap_matrix_mdev *matrix_mdev; struct ap_device *apdev = to_ap_dev(dev); @@ -1983,16 +2320,26 @@ static ssize_t status_show(struct device *dev, q = dev_get_drvdata(&apdev->device); matrix_mdev = vfio_ap_mdev_for_queue(q); + /* If the queue is assigned to the matrix mediated device, then + * determine whether it is passed through to a guest; otherwise, + * indicate that it is unassigned. + */ if (matrix_mdev) { - if (matrix_mdev->kvm) - nchars = scnprintf(buf, PAGE_SIZE, "%s\n", - AP_QUEUE_IN_USE); + apid = AP_QID_CARD(q->apqn); + apqi = AP_QID_QUEUE(q->apqn); + /* + * If the queue is passed through to the guest, then indicate + * that it is in use; otherwise, indicate that it is + * merely assigned to a matrix mediated device. + */ + if (matrix_mdev->kvm && + test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && + test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) + nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_IN_USE); else - nchars = scnprintf(buf, PAGE_SIZE, "%s\n", - AP_QUEUE_ASSIGNED); + nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_ASSIGNED); } else { - nchars = scnprintf(buf, PAGE_SIZE, "%s\n", - AP_QUEUE_UNASSIGNED); + nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_UNASSIGNED); } mutex_unlock(&matrix_dev->mdevs_lock); @@ -2020,6 +2367,7 @@ static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { .bind_iommufd = vfio_iommufd_emulated_bind, .unbind_iommufd = vfio_iommufd_emulated_unbind, .attach_ioas = vfio_iommufd_emulated_attach_ioas, + .detach_ioas = vfio_iommufd_emulated_detach_ioas, .request = vfio_ap_mdev_request }; @@ -2046,10 +2394,10 @@ int vfio_ap_mdev_register(void) matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT; matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT; - matrix_dev->mdev_types[0] = &matrix_dev->mdev_type; + matrix_dev->mdev_types = &matrix_dev->mdev_type; ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device, &vfio_ap_matrix_driver, - matrix_dev->mdev_types, 1); + &matrix_dev->mdev_types, 1); if (ret) goto err_driver; return 0; @@ -2069,6 +2417,7 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev) { int ret; struct vfio_ap_queue *q; + DECLARE_BITMAP(apm_filtered, AP_DEVICES); struct ap_matrix_mdev *matrix_mdev; ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group); @@ -2090,15 +2439,28 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev) if (matrix_mdev) { vfio_ap_mdev_link_queue(matrix_mdev, q); - if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, - matrix_mdev->matrix.aqm, - matrix_mdev)) + /* + * If we're in the process of handling the adding of adapters or + * domains to the host's AP configuration, then let the + * vfio_ap device driver's on_scan_complete callback filter the + * matrix and update the guest's AP configuration after all of + * the new queue devices are probed. + */ + if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) || + !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS)) + goto done; + + if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { vfio_ap_mdev_update_guest_apcb(matrix_mdev); + reset_queues_for_apids(matrix_mdev, apm_filtered); + } } + +done: dev_set_drvdata(&apdev->device, q); release_update_locks_for_mdev(matrix_mdev); - return 0; + return ret; err_remove_group: sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group); @@ -2115,26 +2477,40 @@ void vfio_ap_mdev_remove_queue(struct ap_device *apdev) q = dev_get_drvdata(&apdev->device); get_update_locks_for_queue(q); matrix_mdev = q->matrix_mdev; + apid = AP_QID_CARD(q->apqn); + apqi = AP_QID_QUEUE(q->apqn); if (matrix_mdev) { - vfio_ap_unlink_queue_fr_mdev(q); - - apid = AP_QID_CARD(q->apqn); - apqi = AP_QID_QUEUE(q->apqn); - - /* - * If the queue is assigned to the guest's APCB, then remove - * the adapter's APID from the APCB and hot it into the guest. - */ + /* If the queue is assigned to the guest's AP configuration */ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { + /* + * Since the queues are defined via a matrix of adapters + * and domains, it is not possible to hot unplug a + * single queue; so, let's unplug the adapter. + */ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); vfio_ap_mdev_update_guest_apcb(matrix_mdev); + reset_queues_for_apid(matrix_mdev, apid); + goto done; } } - vfio_ap_mdev_reset_queue(q); - flush_work(&q->reset_work); + /* + * If the queue is not in the host's AP configuration, then resetting + * it will fail with response code 01, (APQN not valid); so, let's make + * sure it is in the host's config. + */ + if (test_bit_inv(apid, (unsigned long *)matrix_dev->info.apm) && + test_bit_inv(apqi, (unsigned long *)matrix_dev->info.aqm)) { + vfio_ap_mdev_reset_queue(q); + flush_work(&q->reset_work); + } + +done: + if (matrix_mdev) + vfio_ap_unlink_queue_fr_mdev(q); + dev_set_drvdata(&apdev->device, NULL); kfree(q); release_update_locks_for_mdev(matrix_mdev); @@ -2160,7 +2536,7 @@ int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm) mutex_lock(&matrix_dev->guests_lock); mutex_lock(&matrix_dev->mdevs_lock); - ret = vfio_ap_mdev_verify_no_sharing(apm, aqm); + ret = vfio_ap_mdev_verify_no_sharing(NULL, apm, aqm); mutex_unlock(&matrix_dev->mdevs_lock); mutex_unlock(&matrix_dev->guests_lock); @@ -2296,7 +2672,7 @@ static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm) bool apid_cleared; struct ap_queue_status status; unsigned long apid, apqi; - struct ap_tapq_gr2 info; + struct ap_tapq_hwinfo info; for_each_set_bit_inv(apid, apm, AP_DEVICES) { apid_cleared = false; @@ -2442,39 +2818,30 @@ void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info, static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev) { - bool do_hotplug = false; - int filter_domains = 0; - int filter_adapters = 0; - DECLARE_BITMAP(apm, AP_DEVICES); - DECLARE_BITMAP(aqm, AP_DOMAINS); + DECLARE_BITMAP(apm_filtered, AP_DEVICES); + bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false; mutex_lock(&matrix_mdev->kvm->lock); mutex_lock(&matrix_dev->mdevs_lock); - filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm, - matrix_mdev->apm_add, AP_DEVICES); - filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm, - matrix_mdev->aqm_add, AP_DOMAINS); - - if (filter_adapters && filter_domains) - do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev); - else if (filter_adapters) - do_hotplug |= - vfio_ap_mdev_filter_matrix(apm, - matrix_mdev->shadow_apcb.aqm, - matrix_mdev); - else - do_hotplug |= - vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm, - aqm, matrix_mdev); + filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm, + matrix_mdev->apm_add, AP_DEVICES); + filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm, + matrix_mdev->aqm_add, AP_DOMAINS); + filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm, + matrix_mdev->adm_add, AP_DOMAINS); - if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add, - AP_DOMAINS)) + if (filter_adapters || filter_domains) + do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered); + + if (filter_cdoms) do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev); if (do_hotplug) vfio_ap_mdev_update_guest_apcb(matrix_mdev); + reset_queues_for_apids(matrix_mdev, apm_filtered); + mutex_unlock(&matrix_dev->mdevs_lock); mutex_unlock(&matrix_mdev->kvm->lock); } diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index 88aff8b81f2f..9bff666b0b35 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -53,7 +53,7 @@ struct ap_matrix_dev { struct mutex guests_lock; /* serializes access to each KVM guest */ struct mdev_parent parent; struct mdev_type mdev_type; - struct mdev_type *mdev_types[1]; + struct mdev_type *mdev_types; }; extern struct ap_matrix_dev *matrix_dev; @@ -75,11 +75,11 @@ extern struct ap_matrix_dev *matrix_dev; */ struct ap_matrix { unsigned long apm_max; - DECLARE_BITMAP(apm, 256); + DECLARE_BITMAP(apm, AP_DEVICES); unsigned long aqm_max; - DECLARE_BITMAP(aqm, 256); + DECLARE_BITMAP(aqm, AP_DOMAINS); unsigned long adm_max; - DECLARE_BITMAP(adm, 256); + DECLARE_BITMAP(adm, AP_DOMAINS); }; /** @@ -105,6 +105,7 @@ struct ap_queue_table { * @mdev: the mediated device * @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev * @req_trigger eventfd ctx for signaling userspace to return a device + * @cfg_chg_trigger eventfd ctx to signal AP config changed to userspace * @apm_add: bitmap of APIDs added to the host's AP configuration * @aqm_add: bitmap of APQIs added to the host's AP configuration * @adm_add: bitmap of control domain numbers added to the host's AP @@ -120,6 +121,7 @@ struct ap_matrix_mdev { struct mdev_device *mdev; struct ap_queue_table qtable; struct eventfd_ctx *req_trigger; + struct eventfd_ctx *cfg_chg_trigger; DECLARE_BITMAP(apm_add, AP_DEVICES); DECLARE_BITMAP(aqm_add, AP_DOMAINS); DECLARE_BITMAP(adm_add, AP_DOMAINS); @@ -133,6 +135,8 @@ struct ap_matrix_mdev { * @apqn: the APQN of the AP queue device * @saved_isc: the guest ISC registered with the GIB interface * @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable + * @reset_qnode: allows the vfio_ap_queue struct to be added to a list of queues + * that need to be reset * @reset_status: the status from the last reset of the queue * @reset_work: work to wait for queue reset to complete */ @@ -143,6 +147,7 @@ struct vfio_ap_queue { #define VFIO_AP_ISC_INVALID 0xff unsigned char saved_isc; struct hlist_node mdev_qnode; + struct list_head reset_qnode; struct ap_queue_status reset_status; struct work_struct reset_work; }; diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 4b23c9f7f3e5..7a3b99f065f2 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -12,12 +12,14 @@ * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com> */ +#define pr_fmt(fmt) "zcrypt: " fmt + +#include <linux/export.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/fs.h> -#include <linux/compat.h> #include <linux/slab.h> #include <linux/atomic.h> #include <linux/uaccess.h> @@ -47,6 +49,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); +unsigned int zcrypt_mempool_threshold = 5; +module_param_named(mempool_threshold, zcrypt_mempool_threshold, uint, 0440); +MODULE_PARM_DESC(mempool_threshold, "CCA and EP11 request/reply mempool minimal items (min: 1)"); + /* * zcrypt tracepoint functions */ @@ -57,10 +63,6 @@ DEFINE_SPINLOCK(zcrypt_list_lock); LIST_HEAD(zcrypt_card_list); static atomic_t zcrypt_open_count = ATOMIC_INIT(0); -static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); - -atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); -EXPORT_SYMBOL(zcrypt_rescan_req); static LIST_HEAD(zcrypt_ops_list); @@ -69,20 +71,15 @@ debug_info_t *zcrypt_dbf_info; /* * Process a rescan of the transport layer. - * - * Returns 1, if the rescan has been processed, otherwise 0. + * Runs a synchronous AP bus rescan. + * Returns true if something has changed (for example the + * bus scan has found and build up new devices) and it is + * worth to do a retry. Otherwise false is returned meaning + * no changes on the AP bus level. */ -static inline int zcrypt_process_rescan(void) +static inline bool zcrypt_process_rescan(void) { - if (atomic_read(&zcrypt_rescan_req)) { - atomic_set(&zcrypt_rescan_req, 0); - atomic_inc(&zcrypt_rescan_count); - ap_bus_force_rescan(); - ZCRYPT_DBF_INFO("%s rescan count=%07d\n", __func__, - atomic_inc_return(&zcrypt_rescan_count)); - return 1; - } - return 0; + return ap_bus_force_rescan(); } void zcrypt_msgtype_register(struct zcrypt_ops *zops) @@ -113,7 +110,11 @@ EXPORT_SYMBOL(zcrypt_msgtype); struct zcdn_device; -static struct class *zcrypt_class; +static void zcdn_device_release(struct device *dev); +static const struct class zcrypt_class = { + .name = ZCRYPT_NAME, + .dev_release = zcdn_device_release, +}; static dev_t zcrypt_devt; static struct cdev zcrypt_cdev; @@ -136,7 +137,7 @@ static int zcdn_destroy(const char *name); */ static inline struct zcdn_device *find_zcdndev_by_name(const char *name) { - struct device *dev = class_find_device_by_name(zcrypt_class, name); + struct device *dev = class_find_device_by_name(&zcrypt_class, name); return dev ? to_zcdn_dev(dev) : NULL; } @@ -148,7 +149,7 @@ static inline struct zcdn_device *find_zcdndev_by_name(const char *name) */ static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt) { - struct device *dev = class_find_device_by_devt(zcrypt_class, devt); + struct device *dev = class_find_device_by_devt(&zcrypt_class, devt); return dev ? to_zcdn_dev(dev) : NULL; } @@ -160,7 +161,7 @@ static ssize_t ioctlmask_show(struct device *dev, struct zcdn_device *zcdndev = to_zcdn_dev(dev); int i, n; - if (mutex_lock_interruptible(&ap_perms_mutex)) + if (mutex_lock_interruptible(&ap_attr_mutex)) return -ERESTARTSYS; n = sysfs_emit(buf, "0x"); @@ -168,7 +169,7 @@ static ssize_t ioctlmask_show(struct device *dev, n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]); n += sysfs_emit_at(buf, n, "\n"); - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); return n; } @@ -181,7 +182,7 @@ static ssize_t ioctlmask_store(struct device *dev, struct zcdn_device *zcdndev = to_zcdn_dev(dev); rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm, - AP_IOCTLS, &ap_perms_mutex); + AP_IOCTLS, &ap_attr_mutex); if (rc) return rc; @@ -197,7 +198,7 @@ static ssize_t apmask_show(struct device *dev, struct zcdn_device *zcdndev = to_zcdn_dev(dev); int i, n; - if (mutex_lock_interruptible(&ap_perms_mutex)) + if (mutex_lock_interruptible(&ap_attr_mutex)) return -ERESTARTSYS; n = sysfs_emit(buf, "0x"); @@ -205,7 +206,7 @@ static ssize_t apmask_show(struct device *dev, n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]); n += sysfs_emit_at(buf, n, "\n"); - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); return n; } @@ -218,7 +219,7 @@ static ssize_t apmask_store(struct device *dev, struct zcdn_device *zcdndev = to_zcdn_dev(dev); rc = ap_parse_mask_str(buf, zcdndev->perms.apm, - AP_DEVICES, &ap_perms_mutex); + AP_DEVICES, &ap_attr_mutex); if (rc) return rc; @@ -234,7 +235,7 @@ static ssize_t aqmask_show(struct device *dev, struct zcdn_device *zcdndev = to_zcdn_dev(dev); int i, n; - if (mutex_lock_interruptible(&ap_perms_mutex)) + if (mutex_lock_interruptible(&ap_attr_mutex)) return -ERESTARTSYS; n = sysfs_emit(buf, "0x"); @@ -242,7 +243,7 @@ static ssize_t aqmask_show(struct device *dev, n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]); n += sysfs_emit_at(buf, n, "\n"); - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); return n; } @@ -255,7 +256,7 @@ static ssize_t aqmask_store(struct device *dev, struct zcdn_device *zcdndev = to_zcdn_dev(dev); rc = ap_parse_mask_str(buf, zcdndev->perms.aqm, - AP_DOMAINS, &ap_perms_mutex); + AP_DOMAINS, &ap_attr_mutex); if (rc) return rc; @@ -271,7 +272,7 @@ static ssize_t admask_show(struct device *dev, struct zcdn_device *zcdndev = to_zcdn_dev(dev); int i, n; - if (mutex_lock_interruptible(&ap_perms_mutex)) + if (mutex_lock_interruptible(&ap_attr_mutex)) return -ERESTARTSYS; n = sysfs_emit(buf, "0x"); @@ -279,7 +280,7 @@ static ssize_t admask_show(struct device *dev, n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]); n += sysfs_emit_at(buf, n, "\n"); - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); return n; } @@ -292,7 +293,7 @@ static ssize_t admask_store(struct device *dev, struct zcdn_device *zcdndev = to_zcdn_dev(dev); rc = ap_parse_mask_str(buf, zcdndev->perms.adm, - AP_DOMAINS, &ap_perms_mutex); + AP_DOMAINS, &ap_attr_mutex); if (rc) return rc; @@ -366,10 +367,9 @@ static int zcdn_create(const char *name) { dev_t devt; int i, rc = 0; - char nodename[ZCDN_MAX_NAME]; struct zcdn_device *zcdndev; - if (mutex_lock_interruptible(&ap_perms_mutex)) + if (mutex_lock_interruptible(&ap_attr_mutex)) return -ERESTARTSYS; /* check if device node with this name already exists */ @@ -403,17 +403,15 @@ static int zcdn_create(const char *name) goto unlockout; } zcdndev->device.release = zcdn_device_release; - zcdndev->device.class = zcrypt_class; + zcdndev->device.class = &zcrypt_class; zcdndev->device.devt = devt; zcdndev->device.groups = zcdn_dev_attr_groups; if (name[0]) - strncpy(nodename, name, sizeof(nodename)); + rc = dev_set_name(&zcdndev->device, "%s", name); else - snprintf(nodename, sizeof(nodename), - ZCRYPT_NAME "_%d", (int)MINOR(devt)); - nodename[sizeof(nodename) - 1] = '\0'; - if (dev_set_name(&zcdndev->device, nodename)) { - rc = -EINVAL; + rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt)); + if (rc) { + kfree(zcdndev); goto unlockout; } rc = device_register(&zcdndev->device); @@ -426,7 +424,7 @@ static int zcdn_create(const char *name) __func__, MAJOR(devt), MINOR(devt)); unlockout: - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); return rc; } @@ -435,7 +433,7 @@ static int zcdn_destroy(const char *name) int rc = 0; struct zcdn_device *zcdndev; - if (mutex_lock_interruptible(&ap_perms_mutex)) + if (mutex_lock_interruptible(&ap_attr_mutex)) return -ERESTARTSYS; /* try to find this zcdn device */ @@ -453,7 +451,7 @@ static int zcdn_destroy(const char *name) device_unregister(&zcdndev->device); unlockout: - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); return rc; } @@ -463,7 +461,7 @@ static void zcdn_destroy_all(void) dev_t devt; struct zcdn_device *zcdndev; - mutex_lock(&ap_perms_mutex); + mutex_lock(&ap_attr_mutex); for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); zcdndev = find_zcdndev_by_devt(devt); @@ -472,7 +470,7 @@ static void zcdn_destroy_all(void) device_unregister(&zcdndev->device); } } - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); } /* @@ -509,11 +507,11 @@ static int zcrypt_open(struct inode *inode, struct file *filp) if (filp->f_inode->i_cdev == &zcrypt_cdev) { struct zcdn_device *zcdndev; - if (mutex_lock_interruptible(&ap_perms_mutex)) + if (mutex_lock_interruptible(&ap_attr_mutex)) return -ERESTARTSYS; zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); /* find returns a reference, no get_device() needed */ - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); if (zcdndev) perms = &zcdndev->perms; } @@ -533,9 +531,9 @@ static int zcrypt_release(struct inode *inode, struct file *filp) if (filp->f_inode->i_cdev == &zcrypt_cdev) { struct zcdn_device *zcdndev; - mutex_lock(&ap_perms_mutex); + mutex_lock(&ap_attr_mutex); zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); - mutex_unlock(&ap_perms_mutex); + mutex_unlock(&ap_attr_mutex); if (zcdndev) { /* 2 puts here: one for find, one for open */ put_device(&zcdndev->device); @@ -582,6 +580,7 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, { if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner)) return NULL; + zcrypt_card_get(zc); zcrypt_queue_get(zq); get_device(&zq->queue->ap_dev.device); atomic_add(weight, &zc->load); @@ -601,6 +600,7 @@ static inline void zcrypt_drop_queue(struct zcrypt_card *zc, atomic_sub(weight, &zq->load); put_device(&zq->queue->ap_dev.device); zcrypt_queue_put(zq); + zcrypt_card_put(zc); module_put(mod); } @@ -645,16 +645,17 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, struct zcrypt_queue *zq, *pref_zq; struct ap_message ap_msg; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; - int cpen, qpen, qid = 0, rc = -ENODEV; + unsigned int func_code = 0; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, 0); + if (rc) + goto out; if (mex->outputdatalength < mex->inputdatalength) { - func_code = 0; rc = -EINVAL; goto out; } @@ -676,7 +677,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, for_each_zcrypt_card(zc) { /* Check for usable accelerator or CCA card */ if (!zc->online || !zc->card->config || zc->card->chkstop || - !(zc->card->functions & 0x18000000)) + !(zc->card->hwinfo.accel || zc->card->hwinfo.cca)) continue; /* Check for size limits */ if (zc->min_mod_size > mex->inputdatalength || @@ -696,7 +697,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, for_each_zcrypt_queue(zq, zc) { /* check if device is usable and eligible */ if (!zq->online || !zq->ops->rsa_modexpo || - !zq->queue->config || zq->queue->chkstop) + !ap_queue_usable(zq->queue)) continue; /* check if device node has admission for this queue */ if (!zcrypt_check_queue(perms, @@ -718,8 +719,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); if (!pref_zq) { - ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", - __func__); + pr_debug("no matching queue found => ENODEV\n"); rc = -ENODEV; goto out; } @@ -732,13 +732,14 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; } trace_s390_zcrypt_rep(mex, func_code, rc, - AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + AP_QID_CARD(qid), AP_QID_QUEUE(qid), + ap_msg.psmid); return rc; } @@ -750,16 +751,17 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, struct zcrypt_queue *zq, *pref_zq; struct ap_message ap_msg; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; - int cpen, qpen, qid = 0, rc = -ENODEV; + unsigned int func_code = 0; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(crt, TP_ICARSACRT); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, 0); + if (rc) + goto out; if (crt->outputdatalength < crt->inputdatalength) { - func_code = 0; rc = -EINVAL; goto out; } @@ -781,7 +783,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, for_each_zcrypt_card(zc) { /* Check for usable accelerator or CCA card */ if (!zc->online || !zc->card->config || zc->card->chkstop || - !(zc->card->functions & 0x18000000)) + !(zc->card->hwinfo.accel || zc->card->hwinfo.cca)) continue; /* Check for size limits */ if (zc->min_mod_size > crt->inputdatalength || @@ -801,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, for_each_zcrypt_queue(zq, zc) { /* check if device is usable and eligible */ if (!zq->online || !zq->ops->rsa_modexpo_crt || - !zq->queue->config || zq->queue->chkstop) + !ap_queue_usable(zq->queue)) continue; /* check if device node has admission for this queue */ if (!zcrypt_check_queue(perms, @@ -823,8 +825,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); if (!pref_zq) { - ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", - __func__); + pr_debug("no matching queue found => ENODEV\n"); rc = -ENODEV; goto out; } @@ -837,37 +838,45 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; } trace_s390_zcrypt_rep(crt, func_code, rc, - AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + AP_QID_CARD(qid), AP_QID_QUEUE(qid), + ap_msg.psmid); return rc; } -static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, +static long _zcrypt_send_cprb(u32 xflags, struct ap_perms *perms, struct zcrypt_track *tr, struct ica_xcRB *xcrb) { + bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; struct ap_message ap_msg; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; + unsigned int func_code = 0; unsigned short *domain, tdom; - int cpen, qpen, qid = 0, rc = -ENODEV; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); xcrb->status = 0; - ap_init_message(&ap_msg); + + rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? + AP_MSG_FLAG_MEMPOOL : 0); + if (rc) + goto out; rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); if (rc) goto out; + print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1, + ap_msg.msg, ap_msg.len, false); tdom = *domain; if (perms != &ap_perms && tdom < AP_DOMAINS) { @@ -896,7 +905,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, for_each_zcrypt_card(zc) { /* Check for usable CCA card */ if (!zc->online || !zc->card->config || zc->card->chkstop || - !(zc->card->functions & 0x10000000)) + !zc->card->hwinfo.cca) continue; /* Check for user selected CCA card */ if (xcrb->user_defined != AUTOSELECT && @@ -919,7 +928,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, for_each_zcrypt_queue(zq, zc) { /* check for device usable and eligible */ if (!zq->online || !zq->ops->send_cprb || - !zq->queue->config || zq->queue->chkstop || + !ap_queue_usable(zq->queue) || (tdom != AUTOSEL_DOM && tdom != AP_QID_QUEUE(zq->queue->qid))) continue; @@ -943,8 +952,8 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); if (!pref_zq) { - ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", - __func__, xcrb->user_defined, *domain); + pr_debug("no match for address %02x.%04x => ENODEV\n", + xcrb->user_defined, *domain); rc = -ENODEV; goto out; } @@ -955,25 +964,49 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, *domain = AP_QID_QUEUE(qid); rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg); + if (!rc) { + print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1, + ap_msg.msg, ap_msg.len, false); + } spin_lock(&zcrypt_list_lock); zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; } trace_s390_zcrypt_rep(xcrb, func_code, rc, - AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + AP_QID_CARD(qid), AP_QID_QUEUE(qid), + ap_msg.psmid); return rc; } -long zcrypt_send_cprb(struct ica_xcRB *xcrb) +long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags) { - return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb); + struct zcrypt_track tr; + int rc; + + memset(&tr, 0, sizeof(tr)); + + do { + rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) + do { + rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) + rc = -EIO; + if (rc) + pr_debug("rc=%d\n", rc); + + return rc; } EXPORT_SYMBOL(zcrypt_send_cprb); @@ -1004,60 +1037,62 @@ static bool is_desired_ep11_queue(unsigned int dev_qid, return false; } -static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, +static long _zcrypt_send_ep11_cprb(u32 xflags, struct ap_perms *perms, struct zcrypt_track *tr, struct ep11_urb *xcrb) { + bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; - struct ep11_target_dev *targets; + struct ep11_target_dev *targets = NULL; unsigned short target_num; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code, domain; + unsigned int func_code = 0, domain; struct ap_message ap_msg; - int cpen, qpen, qid = 0, rc = -ENODEV; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? + AP_MSG_FLAG_MEMPOOL : 0); + if (rc) + goto out; target_num = (unsigned short)xcrb->targets_num; /* empty list indicates autoselect (all available targets) */ - targets = NULL; + rc = -ENOMEM; if (target_num != 0) { - struct ep11_target_dev __user *uptr; - - targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); - if (!targets) { - func_code = 0; - rc = -ENOMEM; - goto out; - } - - uptr = (struct ep11_target_dev __force __user *)xcrb->targets; - if (z_copy_from_user(userspace, targets, uptr, - target_num * sizeof(*targets))) { - func_code = 0; - rc = -EFAULT; - goto out_free; + if (userspace) { + targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); + if (!targets) + goto out; + if (copy_from_user(targets, xcrb->targets, + target_num * sizeof(*targets))) { + rc = -EFAULT; + goto out; + } + } else { + targets = (struct ep11_target_dev __force __kernel *)xcrb->targets; } } rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); if (rc) - goto out_free; + goto out; + print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1, + ap_msg.msg, ap_msg.len, false); if (perms != &ap_perms && domain < AUTOSEL_DOM) { if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { if (!test_bit_inv(domain, perms->adm)) { rc = -ENODEV; - goto out_free; + goto out; } } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { rc = -EOPNOTSUPP; - goto out_free; + goto out; } } @@ -1067,7 +1102,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, for_each_zcrypt_card(zc) { /* Check for usable EP11 card */ if (!zc->online || !zc->card->config || zc->card->chkstop || - !(zc->card->functions & 0x04000000)) + !zc->card->hwinfo.ep11) continue; /* Check for user selected EP11 card */ if (targets && @@ -1090,7 +1125,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, for_each_zcrypt_queue(zq, zc) { /* check if device is usable and eligible */ if (!zq->online || !zq->ops->send_ep11_cprb || - !zq->queue->config || zq->queue->chkstop || + !ap_queue_usable(zq->queue) || (targets && !is_desired_ep11_queue(zq->queue->qid, target_num, targets))) @@ -1116,43 +1151,65 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, if (!pref_zq) { if (targets && target_num == 1) { - ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", - __func__, (int)targets->ap_id, - (int)targets->dom_id); + pr_debug("no match for address %02x.%04x => ENODEV\n", + (int)targets->ap_id, (int)targets->dom_id); } else if (targets) { - ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n", - __func__, (int)target_num); + pr_debug("no match for %d target addrs => ENODEV\n", + (int)target_num); } else { - ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n", - __func__); + pr_debug("no match for address ff.ffff => ENODEV\n"); } rc = -ENODEV; - goto out_free; + goto out; } qid = pref_zq->queue->qid; rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg); + if (!rc) { + print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1, + ap_msg.msg, ap_msg.len, false); + } spin_lock(&zcrypt_list_lock); zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); spin_unlock(&zcrypt_list_lock); -out_free: - kfree(targets); out: - ap_release_message(&ap_msg); + if (userspace) + kfree(targets); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; } trace_s390_zcrypt_rep(xcrb, func_code, rc, - AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + AP_QID_CARD(qid), AP_QID_QUEUE(qid), + ap_msg.psmid); return rc; } -long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) +long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags) { - return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb); + struct zcrypt_track tr; + int rc; + + memset(&tr, 0, sizeof(tr)); + + do { + rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) + do { + rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) + rc = -EIO; + if (rc) + pr_debug("rc=%d\n", rc); + + return rc; } EXPORT_SYMBOL(zcrypt_send_ep11_cprb); @@ -1161,7 +1218,7 @@ static long zcrypt_rng(char *buffer) struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; + unsigned int func_code = 0; struct ap_message ap_msg; unsigned int domain; int qid = 0, rc = -ENODEV; @@ -1169,7 +1226,9 @@ static long zcrypt_rng(char *buffer) trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, 0); + if (rc) + goto out; rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain); if (rc) goto out; @@ -1180,7 +1239,7 @@ static long zcrypt_rng(char *buffer) for_each_zcrypt_card(zc) { /* Check for usable CCA card */ if (!zc->online || !zc->card->config || zc->card->chkstop || - !(zc->card->functions & 0x10000000)) + !zc->card->hwinfo.cca) continue; /* get weight index of the card device */ wgt = zc->speed_rating[func_code]; @@ -1189,7 +1248,7 @@ static long zcrypt_rng(char *buffer) for_each_zcrypt_queue(zq, zc) { /* check if device is usable and eligible */ if (!zq->online || !zq->ops->rng || - !zq->queue->config || zq->queue->chkstop) + !ap_queue_usable(zq->queue)) continue; if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt)) continue; @@ -1202,8 +1261,7 @@ static long zcrypt_rng(char *buffer) spin_unlock(&zcrypt_list_lock); if (!pref_zq) { - ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", - __func__); + pr_debug("no matching queue found => ENODEV\n"); rc = -ENODEV; goto out; } @@ -1216,9 +1274,10 @@ static long zcrypt_rng(char *buffer) spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); trace_s390_zcrypt_rep(buffer, func_code, rc, - AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + AP_QID_CARD(qid), AP_QID_QUEUE(qid), + ap_msg.psmid); return rc; } @@ -1241,7 +1300,7 @@ static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) queue = AP_QID_QUEUE(zq->queue->qid); stat = &devstatus[card * AP_DOMAINS + queue]; stat->hwtype = zc->card->ap_dev.device_type; - stat->functions = zc->card->functions >> 26; + stat->functions = zc->card->hwinfo.fac >> 26; stat->qid = zq->queue->qid; stat->online = zq->online ? 0x01 : 0x00; } @@ -1249,24 +1308,27 @@ static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) spin_unlock(&zcrypt_list_lock); } -void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) +void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus, + int maxcard, int maxqueue) { struct zcrypt_card *zc; struct zcrypt_queue *zq; struct zcrypt_device_status_ext *stat; int card, queue; - memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT - * sizeof(struct zcrypt_device_status_ext)); + maxcard = min_t(int, maxcard, MAX_ZDEV_CARDIDS_EXT); + maxqueue = min_t(int, maxqueue, MAX_ZDEV_DOMAINS_EXT); spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { for_each_zcrypt_queue(zq, zc) { card = AP_QID_CARD(zq->queue->qid); queue = AP_QID_QUEUE(zq->queue->qid); - stat = &devstatus[card * AP_DOMAINS + queue]; + if (card >= maxcard || queue >= maxqueue) + continue; + stat = &devstatus[card * maxqueue + queue]; stat->hwtype = zc->card->ap_dev.device_type; - stat->functions = zc->card->functions >> 26; + stat->functions = zc->card->hwinfo.fac >> 26; stat->qid = zq->queue->qid; stat->online = zq->online ? 0x01 : 0x00; } @@ -1289,7 +1351,7 @@ int zcrypt_device_status_ext(int card, int queue, if (card == AP_QID_CARD(zq->queue->qid) && queue == AP_QID_QUEUE(zq->queue->qid)) { devstat->hwtype = zc->card->ap_dev.device_type; - devstat->functions = zc->card->functions >> 26; + devstat->functions = zc->card->hwinfo.fac >> 26; devstat->qid = zq->queue->qid; devstat->online = zq->online ? 0x01 : 0x00; spin_unlock(&zcrypt_list_lock); @@ -1434,20 +1496,17 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg) do { rc = zcrypt_rsa_modexpo(perms, &tr, &mex); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) do { rc = zcrypt_rsa_modexpo(perms, &tr, &mex); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; if (rc) { - ZCRYPT_DBF_DBG("ioctl ICARSAMODEXPO rc=%d\n", rc); + pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc); return rc; } return put_user(mex.outputdatalength, &umex->outputdatalength); @@ -1466,20 +1525,17 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg) do { rc = zcrypt_rsa_crt(perms, &tr, &crt); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) do { rc = zcrypt_rsa_crt(perms, &tr, &crt); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; if (rc) { - ZCRYPT_DBF_DBG("ioctl ICARSACRT rc=%d\n", rc); + pr_debug("ioctl ICARSACRT rc=%d\n", rc); return rc; } return put_user(crt.outputdatalength, &ucrt->outputdatalength); @@ -1490,6 +1546,7 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) int rc; struct ica_xcRB xcrb; struct zcrypt_track tr; + u32 xflags = ZCRYPT_XFLAG_USERSPACE; struct ica_xcRB __user *uxcrb = (void __user *)arg; memset(&tr, 0, sizeof(tr)); @@ -1497,22 +1554,19 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) return -EFAULT; do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; if (rc) - ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n", - rc, xcrb.status); + pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n", + rc, xcrb.status); if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) return -EFAULT; return rc; @@ -1523,6 +1577,7 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) int rc; struct ep11_urb xcrb; struct zcrypt_track tr; + u32 xflags = ZCRYPT_XFLAG_USERSPACE; struct ep11_urb __user *uxcrb = (void __user *)arg; memset(&tr, 0, sizeof(tr)); @@ -1530,21 +1585,18 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) return -EFAULT; do { - rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) + rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; if (rc) - ZCRYPT_DBF_DBG("ioctl ZSENDEP11CPRB rc=%d\n", rc); + pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc); if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) return -EFAULT; return rc; @@ -1575,12 +1627,14 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, size_t total_size = MAX_ZDEV_ENTRIES_EXT * sizeof(struct zcrypt_device_status_ext); - device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); + device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, + sizeof(struct zcrypt_device_status_ext), + GFP_KERNEL); if (!device_status) return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); + zcrypt_device_status_mask_ext(device_status, + MAX_ZDEV_CARDIDS_EXT, + MAX_ZDEV_DOMAINS_EXT); if (copy_to_user((char __user *)arg, device_status, total_size)) rc = -EFAULT; @@ -1673,210 +1727,11 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, } /* unknown ioctl number */ default: - ZCRYPT_DBF_DBG("unknown ioctl 0x%08x\n", cmd); + pr_debug("unknown ioctl 0x%08x\n", cmd); return -ENOIOCTLCMD; } } -#ifdef CONFIG_COMPAT -/* - * ioctl32 conversion routines - */ -struct compat_ica_rsa_modexpo { - compat_uptr_t inputdata; - unsigned int inputdatalength; - compat_uptr_t outputdata; - unsigned int outputdatalength; - compat_uptr_t b_key; - compat_uptr_t n_modulus; -}; - -static long trans_modexpo32(struct ap_perms *perms, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); - struct compat_ica_rsa_modexpo mex32; - struct ica_rsa_modexpo mex64; - struct zcrypt_track tr; - long rc; - - memset(&tr, 0, sizeof(tr)); - if (copy_from_user(&mex32, umex32, sizeof(mex32))) - return -EFAULT; - mex64.inputdata = compat_ptr(mex32.inputdata); - mex64.inputdatalength = mex32.inputdatalength; - mex64.outputdata = compat_ptr(mex32.outputdata); - mex64.outputdatalength = mex32.outputdatalength; - mex64.b_key = compat_ptr(mex32.b_key); - mex64.n_modulus = compat_ptr(mex32.n_modulus); - do { - rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) - do { - rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) - rc = -EIO; - if (rc) - return rc; - return put_user(mex64.outputdatalength, - &umex32->outputdatalength); -} - -struct compat_ica_rsa_modexpo_crt { - compat_uptr_t inputdata; - unsigned int inputdatalength; - compat_uptr_t outputdata; - unsigned int outputdatalength; - compat_uptr_t bp_key; - compat_uptr_t bq_key; - compat_uptr_t np_prime; - compat_uptr_t nq_prime; - compat_uptr_t u_mult_inv; -}; - -static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); - struct compat_ica_rsa_modexpo_crt crt32; - struct ica_rsa_modexpo_crt crt64; - struct zcrypt_track tr; - long rc; - - memset(&tr, 0, sizeof(tr)); - if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) - return -EFAULT; - crt64.inputdata = compat_ptr(crt32.inputdata); - crt64.inputdatalength = crt32.inputdatalength; - crt64.outputdata = compat_ptr(crt32.outputdata); - crt64.outputdatalength = crt32.outputdatalength; - crt64.bp_key = compat_ptr(crt32.bp_key); - crt64.bq_key = compat_ptr(crt32.bq_key); - crt64.np_prime = compat_ptr(crt32.np_prime); - crt64.nq_prime = compat_ptr(crt32.nq_prime); - crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); - do { - rc = zcrypt_rsa_crt(perms, &tr, &crt64); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) - do { - rc = zcrypt_rsa_crt(perms, &tr, &crt64); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) - rc = -EIO; - if (rc) - return rc; - return put_user(crt64.outputdatalength, - &ucrt32->outputdatalength); -} - -struct compat_ica_xcrb { - unsigned short agent_ID; - unsigned int user_defined; - unsigned short request_ID; - unsigned int request_control_blk_length; - unsigned char padding1[16 - sizeof(compat_uptr_t)]; - compat_uptr_t request_control_blk_addr; - unsigned int request_data_length; - char padding2[16 - sizeof(compat_uptr_t)]; - compat_uptr_t request_data_address; - unsigned int reply_control_blk_length; - char padding3[16 - sizeof(compat_uptr_t)]; - compat_uptr_t reply_control_blk_addr; - unsigned int reply_data_length; - char padding4[16 - sizeof(compat_uptr_t)]; - compat_uptr_t reply_data_addr; - unsigned short priority_window; - unsigned int status; -} __packed; - -static long trans_xcrb32(struct ap_perms *perms, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); - struct compat_ica_xcrb xcrb32; - struct zcrypt_track tr; - struct ica_xcRB xcrb64; - long rc; - - memset(&tr, 0, sizeof(tr)); - if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32))) - return -EFAULT; - xcrb64.agent_ID = xcrb32.agent_ID; - xcrb64.user_defined = xcrb32.user_defined; - xcrb64.request_ID = xcrb32.request_ID; - xcrb64.request_control_blk_length = - xcrb32.request_control_blk_length; - xcrb64.request_control_blk_addr = - compat_ptr(xcrb32.request_control_blk_addr); - xcrb64.request_data_length = - xcrb32.request_data_length; - xcrb64.request_data_address = - compat_ptr(xcrb32.request_data_address); - xcrb64.reply_control_blk_length = - xcrb32.reply_control_blk_length; - xcrb64.reply_control_blk_addr = - compat_ptr(xcrb32.reply_control_blk_addr); - xcrb64.reply_data_length = xcrb32.reply_data_length; - xcrb64.reply_data_addr = - compat_ptr(xcrb32.reply_data_addr); - xcrb64.priority_window = xcrb32.priority_window; - xcrb64.status = xcrb32.status; - do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) - do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) - rc = -EIO; - xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length; - xcrb32.reply_data_length = xcrb64.reply_data_length; - xcrb32.status = xcrb64.status; - if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32))) - return -EFAULT; - return rc; -} - -static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, - unsigned long arg) -{ - int rc; - struct ap_perms *perms = - (struct ap_perms *)filp->private_data; - - rc = zcrypt_check_ioctl(perms, cmd); - if (rc) - return rc; - - if (cmd == ICARSAMODEXPO) - return trans_modexpo32(perms, filp, cmd, arg); - if (cmd == ICARSACRT) - return trans_modexpo_crt32(perms, filp, cmd, arg); - if (cmd == ZSECSENDCPRB) - return trans_xcrb32(perms, filp, cmd, arg); - return zcrypt_unlocked_ioctl(filp, cmd, arg); -} -#endif - /* * Misc device file operations. */ @@ -1885,12 +1740,8 @@ static const struct file_operations zcrypt_fops = { .read = zcrypt_read, .write = zcrypt_write, .unlocked_ioctl = zcrypt_unlocked_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = zcrypt_compat_ioctl, -#endif .open = zcrypt_open, .release = zcrypt_release, - .llseek = no_llseek, }; /* @@ -1917,8 +1768,8 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) */ if (zcrypt_rng_buffer_index == 0) { rc = zcrypt_rng((char *)zcrypt_rng_buffer); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) + /* on ENODEV failure: retry once again after an AP bus rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) rc = zcrypt_rng((char *)zcrypt_rng_buffer); if (rc < 0) return -EIO; @@ -1980,7 +1831,7 @@ void zcrypt_rng_device_remove(void) * an asynchronous job. This function waits until these initial jobs * are done and so the zcrypt api should be ready to serve crypto * requests - if there are resources available. The function uses an - * internal timeout of 60s. The very first caller will either wait for + * internal timeout of 30s. The very first caller will either wait for * ap bus bindings complete or the timeout happens. This state will be * remembered for further callers which will only be blocked until a * decision is made (timeout or bindings complete). @@ -1999,8 +1850,8 @@ int zcrypt_wait_api_operational(void) switch (zcrypt_wait_api_state) { case 0: /* initial state, invoke wait for the ap bus complete */ - rc = ap_wait_init_apqn_bindings_complete( - msecs_to_jiffies(60 * 1000)); + rc = ap_wait_apqn_bindings_complete( + msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS)); switch (rc) { case 0: /* ap bus bindings are complete */ @@ -2017,8 +1868,7 @@ int zcrypt_wait_api_operational(void) break; default: /* other failure */ - ZCRYPT_DBF_DBG("%s ap_wait_init_apqn_bindings_complete()=%d\n", - __func__, rc); + pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc); break; } break; @@ -2041,7 +1891,7 @@ EXPORT_SYMBOL(zcrypt_wait_api_operational); int __init zcrypt_debug_init(void) { zcrypt_dbf_info = debug_register("zcrypt", 2, 1, - DBF_MAX_SPRINTF_ARGS * sizeof(long)); + ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long)); debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); debug_set_level(zcrypt_dbf_info, DBF_ERR); @@ -2058,12 +1908,9 @@ static int __init zcdn_init(void) int rc; /* create a new class 'zcrypt' */ - zcrypt_class = class_create(ZCRYPT_NAME); - if (IS_ERR(zcrypt_class)) { - rc = PTR_ERR(zcrypt_class); - goto out_class_create_failed; - } - zcrypt_class->dev_release = zcdn_device_release; + rc = class_register(&zcrypt_class); + if (rc) + goto out_class_register_failed; /* alloc device minor range */ rc = alloc_chrdev_region(&zcrypt_devt, @@ -2079,35 +1926,35 @@ static int __init zcdn_init(void) goto out_cdev_add_failed; /* need some class specific sysfs attributes */ - rc = class_create_file(zcrypt_class, &class_attr_zcdn_create); + rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create); if (rc) goto out_class_create_file_1_failed; - rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy); + rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy); if (rc) goto out_class_create_file_2_failed; return 0; out_class_create_file_2_failed: - class_remove_file(zcrypt_class, &class_attr_zcdn_create); + class_remove_file(&zcrypt_class, &class_attr_zcdn_create); out_class_create_file_1_failed: cdev_del(&zcrypt_cdev); out_cdev_add_failed: unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); out_alloc_chrdev_failed: - class_destroy(zcrypt_class); -out_class_create_failed: + class_unregister(&zcrypt_class); +out_class_register_failed: return rc; } static void zcdn_exit(void) { - class_remove_file(zcrypt_class, &class_attr_zcdn_create); - class_remove_file(zcrypt_class, &class_attr_zcdn_destroy); + class_remove_file(&zcrypt_class, &class_attr_zcdn_create); + class_remove_file(&zcrypt_class, &class_attr_zcdn_destroy); zcdn_destroy_all(); cdev_del(&zcrypt_cdev); unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); - class_destroy(zcrypt_class); + class_unregister(&zcrypt_class); } /* @@ -2119,13 +1966,27 @@ int __init zcrypt_api_init(void) { int rc; + /* make sure the mempool threshold is >= 1 */ + if (zcrypt_mempool_threshold < 1) { + rc = -EINVAL; + goto out; + } + rc = zcrypt_debug_init(); if (rc) goto out; rc = zcdn_init(); if (rc) - goto out; + goto out_zcdn_init_failed; + + rc = zcrypt_ccamisc_init(); + if (rc) + goto out_ccamisc_init_failed; + + rc = zcrypt_ep11misc_init(); + if (rc) + goto out_ep11misc_init_failed; /* Register the request sprayer. */ rc = misc_register(&zcrypt_misc_device); @@ -2138,7 +1999,12 @@ int __init zcrypt_api_init(void) return 0; out_misc_register_failed: + zcrypt_ep11misc_exit(); +out_ep11misc_init_failed: + zcrypt_ccamisc_exit(); +out_ccamisc_init_failed: zcdn_exit(); +out_zcdn_init_failed: zcrypt_debug_exit(); out: return rc; diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index de659954c8f7..6ef8850a42df 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -38,6 +38,15 @@ */ #define ZCRYPT_RNG_BUFFER_SIZE 4096 +/** + * The zcrypt_wait_api_operational() function waits this + * amount in milliseconds for ap_wait_aqpn_bindings_complete(). + * Also on a cprb send failure with ENODEV the send functions + * trigger an ap bus rescan and wait this time in milliseconds + * for ap_wait_aqpn_bindings_complete() before resending. + */ +#define ZCRYPT_WAIT_BINDINGS_COMPLETE_MS 30000 + /* * Identifier for Crypto Request Performance Index */ @@ -67,6 +76,13 @@ struct zcrypt_track { #define TRACK_AGAIN_CARD_WEIGHT_PENALTY 1000 #define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000 +/* + * xflags - to be used with zcrypt_send_cprb() and + * zcrypt_send_ep11_cprb() for the xflags parameter. + */ +#define ZCRYPT_XFLAG_USERSPACE 0x0001 /* data ptrs address userspace */ +#define ZCRYPT_XFLAG_NOMEMALLOC 0x0002 /* do not allocate memory via kmalloc */ + struct zcrypt_ops { long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *, struct ap_message *); @@ -123,6 +139,8 @@ extern atomic_t zcrypt_rescan_req; extern spinlock_t zcrypt_list_lock; extern struct list_head zcrypt_card_list; +extern unsigned int zcrypt_mempool_threshold; + #define for_each_zcrypt_card(_zc) \ list_for_each_entry(_zc, &zcrypt_card_list, list) @@ -152,9 +170,10 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *); struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); int zcrypt_api_init(void); void zcrypt_api_exit(void); -long zcrypt_send_cprb(struct ica_xcRB *xcRB); -long zcrypt_send_ep11_cprb(struct ep11_urb *urb); -void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus); +long zcrypt_send_cprb(struct ica_xcRB *xcRB, u32 xflags); +long zcrypt_send_ep11_cprb(struct ep11_urb *urb, u32 xflags); +void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus, + int maxcard, int maxqueue); int zcrypt_device_status_ext(int card, int queue, struct zcrypt_device_status_ext *devstatus); diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c index c815722d0ac8..6dea702a5cac 100644 --- a/drivers/s390/crypto/zcrypt_card.c +++ b/drivers/s390/crypto/zcrypt_card.c @@ -11,6 +11,7 @@ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> */ +#include <linux/export.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -18,7 +19,6 @@ #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <linux/compat.h> #include <linux/slab.h> #include <linux/atomic.h> #include <linux/uaccess.h> @@ -52,7 +52,7 @@ static ssize_t online_show(struct device *dev, { struct zcrypt_card *zc = dev_get_drvdata(dev); struct ap_card *ac = to_ap_card(dev); - int online = ac->config && zc->online ? 1 : 0; + int online = ac->config && !ac->chkstop && zc->online ? 1 : 0; return sysfs_emit(buf, "%d\n", online); } @@ -70,7 +70,7 @@ static ssize_t online_store(struct device *dev, if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) return -EINVAL; - if (online && !ac->config) + if (online && (!ac->config || ac->chkstop)) return -ENODEV; zc->online = online; diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index 263fe182648b..573bad1d6d86 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -7,10 +7,11 @@ * Collection of CCA misc functions used by zcrypt and pkey */ -#define KMSG_COMPONENT "zcrypt" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zcrypt: " fmt +#include <linux/export.h> #include <linux/init.h> +#include <linux/mempool.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/random.h> @@ -23,27 +24,37 @@ #include "zcrypt_msgtype6.h" #include "zcrypt_ccamisc.h" -#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__) -#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__) -#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__) -#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__) - /* Size of parameter block used for all cca requests/replies */ #define PARMBSIZE 512 /* Size of vardata block used for some of the cca requests/replies */ #define VARDATASIZE 4096 -struct cca_info_list_entry { - struct list_head list; - u16 cardnr; - u16 domain; - struct cca_info info; -}; +/* + * Cprb memory pool held for urgent cases where no memory + * can be allocated via kmalloc. This pool is only used + * when alloc_and_prep_cprbmem() is called with the xflag + * ZCRYPT_XFLAG_NOMEMALLOC. The cprb memory needs to hold + * space for request AND reply! + */ +#define CPRB_MEMPOOL_ITEM_SIZE (16 * 1024) +static mempool_t *cprb_mempool; -/* a list with cca_info_list_entry entries */ -static LIST_HEAD(cca_info_list); -static DEFINE_SPINLOCK(cca_info_list_lock); +/* + * This is a pre-allocated memory for the device status array + * used within the findcard() functions. It is currently + * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is + * controlled via dev_status_mem_mutex. Needs adaption if more + * than 128 cards or domains to be are supported. + */ +#define ZCRYPT_DEV_STATUS_CARD_MAX 128 +#define ZCRYPT_DEV_STATUS_QUEUE_MAX 128 +#define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \ + ZCRYPT_DEV_STATUS_QUEUE_MAX) +#define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \ + sizeof(struct zcrypt_device_status_ext)) +static void *dev_status_mem; +static DEFINE_MUTEX(dev_status_mem_mutex); /* * Simple check if the token is a valid CCA secure AES data key @@ -177,7 +188,7 @@ EXPORT_SYMBOL(cca_check_secaescipherkey); * key token. Returns 0 on success or errno value on failure. */ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, - const u8 *token, size_t keysize, + const u8 *token, u32 keysize, int checkcpacfexport) { struct eccprivkeytoken *t = (struct eccprivkeytoken *)token; @@ -192,7 +203,7 @@ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, } if (t->len > keysize) { if (dbg) - DBF("%s token check failed, len %d > keysize %zu\n", + DBF("%s token check failed, len %d > keysize %u\n", __func__, (int)t->len, keysize); return -EINVAL; } @@ -224,19 +235,27 @@ EXPORT_SYMBOL(cca_check_sececckeytoken); static int alloc_and_prep_cprbmem(size_t paramblen, u8 **p_cprb_mem, struct CPRBX **p_req_cprb, - struct CPRBX **p_rep_cprb) + struct CPRBX **p_rep_cprb, + u32 xflags) { - u8 *cprbmem; + u8 *cprbmem = NULL; size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen; + size_t len = 2 * cprbplusparamblen; struct CPRBX *preqcblk, *prepcblk; /* * allocate consecutive memory for request CPRB, request param * block, reply CPRB and reply param block */ - cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL); + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) { + if (len <= CPRB_MEMPOOL_ITEM_SIZE) + cprbmem = mempool_alloc_preallocated(cprb_mempool); + } else { + cprbmem = kmalloc(len, GFP_KERNEL); + } if (!cprbmem) return -ENOMEM; + memset(cprbmem, 0, len); preqcblk = (struct CPRBX *)cprbmem; prepcblk = (struct CPRBX *)(cprbmem + cprbplusparamblen); @@ -266,11 +285,15 @@ static int alloc_and_prep_cprbmem(size_t paramblen, * with zeros before freeing (useful if there was some * clear key material in there). */ -static void free_cprbmem(void *mem, size_t paramblen, int scrub) +static void free_cprbmem(void *mem, size_t paramblen, bool scrub, u32 xflags) { - if (scrub) + if (mem && scrub) memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen)); - kfree(mem); + + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) + mempool_free(mem, cprb_mempool); + else + kfree(mem); } /* @@ -295,7 +318,7 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb, * Generate (random) CCA AES DATA secure key. */ int cca_genseckey(u16 cardnr, u16 domain, - u32 keybitsize, u8 *seckey) + u32 keybitsize, u8 *seckey, u32 xflags) { int i, rc, keysize; int seckeysize; @@ -337,7 +360,8 @@ int cca_genseckey(u16 cardnr, u16 domain, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -367,8 +391,8 @@ int cca_genseckey(u16 cardnr, u16 domain, memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8); break; default: - DEBUG_ERR("%s unknown/unsupported keybitsize %d\n", - __func__, keybitsize); + ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); rc = -EINVAL; goto out; } @@ -384,17 +408,17 @@ int cca_genseckey(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { - DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR("%s secure key generate failure, card response %d/%d\n", - __func__, + ZCRYPT_DBF_ERR("%s secure key generate failure, card response %d/%d\n", + __func__, (int)prepcblk->ccp_rtcode, (int)prepcblk->ccp_rscode); rc = -EIO; @@ -411,8 +435,8 @@ int cca_genseckey(u16 cardnr, u16 domain, - sizeof(prepparm->lv3.keyblock.toklen) - sizeof(prepparm->lv3.keyblock.tokattr); if (seckeysize != SECKEYBLOBSIZE) { - DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n", - __func__, seckeysize, SECKEYBLOBSIZE); + ZCRYPT_DBF_ERR("%s secure token size mismatch %d != %d bytes\n", + __func__, seckeysize, SECKEYBLOBSIZE); rc = -EIO; goto out; } @@ -429,7 +453,7 @@ int cca_genseckey(u16 cardnr, u16 domain, memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, false, xflags); return rc; } EXPORT_SYMBOL(cca_genseckey); @@ -438,7 +462,7 @@ EXPORT_SYMBOL(cca_genseckey); * Generate an CCA AES DATA secure key with given key value. */ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, - const u8 *clrkey, u8 *seckey) + const u8 *clrkey, u8 *seckey, u32 xflags) { int rc, keysize, seckeysize; u8 *mem, *ptr; @@ -478,7 +502,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -505,8 +530,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, keysize = 32; break; default: - DEBUG_ERR("%s unknown/unsupported keybitsize %d\n", - __func__, keybitsize); + ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); rc = -EINVAL; goto out; } @@ -522,19 +547,19 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { - DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR("%s clear key import failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s clear key import failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); rc = -EIO; goto out; } @@ -549,8 +574,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, - sizeof(prepparm->lv3.keyblock.toklen) - sizeof(prepparm->lv3.keyblock.tokattr); if (seckeysize != SECKEYBLOBSIZE) { - DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n", - __func__, seckeysize, SECKEYBLOBSIZE); + ZCRYPT_DBF_ERR("%s secure token size mismatch %d != %d bytes\n", + __func__, seckeysize, SECKEYBLOBSIZE); rc = -EIO; goto out; } @@ -568,7 +593,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); out: - free_cprbmem(mem, PARMBSIZE, 1); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_clr2seckey); @@ -578,7 +603,7 @@ EXPORT_SYMBOL(cca_clr2seckey); */ int cca_sec2protkey(u16 cardnr, u16 domain, const u8 *seckey, u8 *protkey, u32 *protkeylen, - u32 *protkeytype) + u32 *protkeytype, u32 xflags) { int rc; u8 *mem, *ptr; @@ -624,7 +649,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -649,30 +675,30 @@ int cca_sec2protkey(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { - DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) - rc = -EAGAIN; + rc = -EBUSY; else rc = -EIO; goto out; } if (prepcblk->ccp_rscode != 0) { - DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); } /* process response cprb param block */ @@ -683,8 +709,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain, /* check the returned keyblock */ if (prepparm->lv3.ckb.version != 0x01 && prepparm->lv3.ckb.version != 0x02) { - DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n", - __func__, (int)prepparm->lv3.ckb.version); + ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x\n", + __func__, (int)prepparm->lv3.ckb.version); rc = -EIO; goto out; } @@ -707,8 +733,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain, *protkeytype = PKEY_KEYTYPE_AES_256; break; default: - DEBUG_ERR("%s unknown/unsupported keylen %d\n", - __func__, prepparm->lv3.ckb.len); + ZCRYPT_DBF_ERR("%s unknown/unsupported keylen %d\n", + __func__, prepparm->lv3.ckb.len); rc = -EIO; goto out; } @@ -717,7 +743,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain, *protkeylen = prepparm->lv3.ckb.len; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_sec2protkey); @@ -742,7 +768,7 @@ static const u8 aes_cipher_key_skeleton[] = { * Generate (random) CCA AES CIPHER secure key. */ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize) + u8 *keybuf, u32 *keybufsize, u32 xflags) { int rc; u8 *mem, *ptr; @@ -818,7 +844,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, struct cipherkeytoken *t; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -840,9 +867,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, case 256: break; default: - DEBUG_ERR( - "%s unknown/unsupported keybitsize %d\n", - __func__, keybitsize); + ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); rc = -EINVAL; goto out; } @@ -878,21 +904,19 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR( - "%s cipher key generate failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s cipher key generate failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); rc = -EIO; goto out; } @@ -905,8 +929,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, /* do some plausibility checks on the key block */ if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) || prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) { - DEBUG_ERR("%s reply with invalid or unknown key block\n", - __func__); + ZCRYPT_DBF_ERR("%s reply with invalid or unknown key block\n", + __func__); rc = -EIO; goto out; } @@ -931,7 +955,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, *keybufsize = t->len; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, false, xflags); return rc; } EXPORT_SYMBOL(cca_gencipherkey); @@ -946,7 +970,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, const u8 *clr_key_value, int clr_key_bit_size, u8 *key_token, - int *key_token_size) + int *key_token_size, + u32 xflags) { int rc, n; u8 *mem, *ptr; @@ -997,7 +1022,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, int complete = strncmp(rule_array_2, "COMPLETE", 8) ? 0 : 1; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1046,21 +1072,19 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR( - "%s CSNBKPI2 failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s CSNBKPI2 failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); rc = -EIO; goto out; } @@ -1073,8 +1097,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, /* do some plausibility checks on the key block */ if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) || prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) { - DEBUG_ERR("%s reply with invalid or unknown key block\n", - __func__); + ZCRYPT_DBF_ERR("%s reply with invalid or unknown key block\n", + __func__); rc = -EIO; goto out; } @@ -1087,7 +1111,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, *key_token_size = t->len; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, false, xflags); return rc; } @@ -1095,23 +1119,31 @@ out: * Build CCA AES CIPHER secure key with a given clear key value. */ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, size_t *keybufsize) + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, u32 xflags) { int rc; - u8 *token; + void *mem; int tokensize; - u8 exorbuf[32]; + u8 *token, exorbuf[32]; struct cipherkeytoken *t; /* fill exorbuf with random data */ get_random_bytes(exorbuf, sizeof(exorbuf)); - /* allocate space for the key token to build */ - token = kmalloc(MAXCCAVLSCTOKENSIZE, GFP_KERNEL); - if (!token) + /* + * Allocate space for the key token to build. + * Also we only need up to MAXCCAVLSCTOKENSIZE bytes for this + * we use the already existing cprb mempool to solve this + * short term memory requirement. + */ + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!mem) return -ENOMEM; /* prepare the token with the key skeleton */ + token = (u8 *)mem; tokensize = SIZEOF_SKELETON; memcpy(token, aes_cipher_key_skeleton, tokensize); @@ -1130,35 +1162,31 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, * 4/4 COMPLETE the secure cipher key import */ rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART", - exorbuf, keybitsize, token, &tokensize); + exorbuf, keybitsize, token, &tokensize, xflags); if (rc) { - DEBUG_ERR( - "%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n", + __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, - clrkey, keybitsize, token, &tokensize); + clrkey, keybitsize, token, &tokensize, xflags); if (rc) { - DEBUG_ERR( - "%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n", + __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, - exorbuf, keybitsize, token, &tokensize); + exorbuf, keybitsize, token, &tokensize, xflags); if (rc) { - DEBUG_ERR( - "%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n", + __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL, - NULL, keybitsize, token, &tokensize); + NULL, keybitsize, token, &tokensize, xflags); if (rc) { - DEBUG_ERR( - "%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n", + __func__, rc); goto out; } @@ -1172,7 +1200,7 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, *keybufsize = tokensize; out: - kfree(token); + mempool_free(mem, cprb_mempool); return rc; } EXPORT_SYMBOL(cca_clr2cipherkey); @@ -1181,7 +1209,8 @@ EXPORT_SYMBOL(cca_clr2cipherkey); * Derive proteced key from CCA AES cipher secure key. */ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { int rc; u8 *mem, *ptr; @@ -1233,7 +1262,8 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, int keytoklen = ((struct cipherkeytoken *)ckey)->len; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1263,33 +1293,30 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR( - "%s unwrap secure key failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) - rc = -EAGAIN; + rc = -EBUSY; else rc = -EIO; goto out; } if (prepcblk->ccp_rscode != 0) { - DEBUG_WARN( - "%s unwrap secure key warning, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); } /* process response cprb param block */ @@ -1300,15 +1327,14 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, /* check the returned keyblock */ if (prepparm->vud.ckb.version != 0x01 && prepparm->vud.ckb.version != 0x02) { - DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n", - __func__, (int)prepparm->vud.ckb.version); + ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x\n", + __func__, (int)prepparm->vud.ckb.version); rc = -EIO; goto out; } if (prepparm->vud.ckb.algo != 0x02) { - DEBUG_ERR( - "%s reply param keyblock algo mismatch 0x%02x != 0x02\n", - __func__, (int)prepparm->vud.ckb.algo); + ZCRYPT_DBF_ERR("%s reply param keyblock algo mismatch 0x%02x != 0x02\n", + __func__, (int)prepparm->vud.ckb.algo); rc = -EIO; goto out; } @@ -1331,8 +1357,8 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, *protkeytype = PKEY_KEYTYPE_AES_256; break; default: - DEBUG_ERR("%s unknown/unsupported keylen %d\n", - __func__, prepparm->vud.ckb.keylen); + ZCRYPT_DBF_ERR("%s unknown/unsupported keylen %d\n", + __func__, prepparm->vud.ckb.keylen); rc = -EIO; goto out; } @@ -1341,7 +1367,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, *protkeylen = prepparm->vud.ckb.keylen; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_cipher2protkey); @@ -1350,7 +1376,7 @@ EXPORT_SYMBOL(cca_cipher2protkey); * Derive protected key from CCA ECC secure private key. */ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags) { int rc; u8 *mem, *ptr; @@ -1400,7 +1426,8 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, int keylen = ((struct eccprivkeytoken *)key)->len; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1430,33 +1457,30 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR( - "%s unwrap secure key failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) - rc = -EAGAIN; + rc = -EBUSY; else rc = -EIO; goto out; } if (prepcblk->ccp_rscode != 0) { - DEBUG_WARN( - "%s unwrap secure key warning, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); } /* process response cprb param block */ @@ -1466,23 +1490,22 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, /* check the returned keyblock */ if (prepparm->vud.ckb.version != 0x02) { - DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n", - __func__, (int)prepparm->vud.ckb.version); + ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n", + __func__, (int)prepparm->vud.ckb.version); rc = -EIO; goto out; } if (prepparm->vud.ckb.algo != 0x81) { - DEBUG_ERR( - "%s reply param keyblock algo mismatch 0x%02x != 0x81\n", - __func__, (int)prepparm->vud.ckb.algo); + ZCRYPT_DBF_ERR("%s reply param keyblock algo mismatch 0x%02x != 0x81\n", + __func__, (int)prepparm->vud.ckb.algo); rc = -EIO; goto out; } /* copy the translated protected key */ if (prepparm->vud.ckb.keylen > *protkeylen) { - DEBUG_ERR("%s prot keylen mismatch %d > buffersize %u\n", - __func__, prepparm->vud.ckb.keylen, *protkeylen); + ZCRYPT_DBF_ERR("%s prot keylen mismatch %d > buffersize %u\n", + __func__, prepparm->vud.ckb.keylen, *protkeylen); rc = -EIO; goto out; } @@ -1492,7 +1515,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, *protkeytype = PKEY_KEYTYPE_ECC; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_ecc2protkey); @@ -1503,7 +1526,8 @@ EXPORT_SYMBOL(cca_ecc2protkey); int cca_query_crypto_facility(u16 cardnr, u16 domain, const char *keyword, u8 *rarray, size_t *rarraylen, - u8 *varray, size_t *varraylen) + u8 *varray, size_t *varraylen, + u32 xflags) { int rc; u16 len; @@ -1527,7 +1551,8 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(parmbsize, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1548,19 +1573,19 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { - DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); rc = -EIO; goto out; } @@ -1595,94 +1620,21 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, } out: - free_cprbmem(mem, parmbsize, 0); + free_cprbmem(mem, parmbsize, false, xflags); return rc; } EXPORT_SYMBOL(cca_query_crypto_facility); -static int cca_info_cache_fetch(u16 cardnr, u16 domain, struct cca_info *ci) -{ - int rc = -ENOENT; - struct cca_info_list_entry *ptr; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry(ptr, &cca_info_list, list) { - if (ptr->cardnr == cardnr && ptr->domain == domain) { - memcpy(ci, &ptr->info, sizeof(*ci)); - rc = 0; - break; - } - } - spin_unlock_bh(&cca_info_list_lock); - - return rc; -} - -static void cca_info_cache_update(u16 cardnr, u16 domain, - const struct cca_info *ci) -{ - int found = 0; - struct cca_info_list_entry *ptr; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry(ptr, &cca_info_list, list) { - if (ptr->cardnr == cardnr && - ptr->domain == domain) { - memcpy(&ptr->info, ci, sizeof(*ci)); - found = 1; - break; - } - } - if (!found) { - ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); - if (!ptr) { - spin_unlock_bh(&cca_info_list_lock); - return; - } - ptr->cardnr = cardnr; - ptr->domain = domain; - memcpy(&ptr->info, ci, sizeof(*ci)); - list_add(&ptr->list, &cca_info_list); - } - spin_unlock_bh(&cca_info_list_lock); -} - -static void cca_info_cache_scrub(u16 cardnr, u16 domain) -{ - struct cca_info_list_entry *ptr; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry(ptr, &cca_info_list, list) { - if (ptr->cardnr == cardnr && - ptr->domain == domain) { - list_del(&ptr->list); - kfree(ptr); - break; - } - } - spin_unlock_bh(&cca_info_list_lock); -} - -static void __exit mkvp_cache_free(void) -{ - struct cca_info_list_entry *ptr, *pnext; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry_safe(ptr, pnext, &cca_info_list, list) { - list_del(&ptr->list); - kfree(ptr); - } - spin_unlock_bh(&cca_info_list_lock); -} - /* - * Fetch cca_info values via query_crypto_facility from adapter. + * Fetch cca_info values about a CCA queue via + * query_crypto_facility from adapter. */ -static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) +int cca_get_info(u16 cardnr, u16 domain, struct cca_info *ci, u32 xflags) { + void *mem; int rc, found = 0; size_t rlen, vlen; - u8 *rarray, *varray, *pg; + u8 *rarray, *varray; struct zcrypt_device_status_ext devstat; memset(ci, 0, sizeof(*ci)); @@ -1693,17 +1645,22 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) return rc; ci->hwtype = devstat.hwtype; - /* prep page for rule array and var array use */ - pg = (u8 *)__get_free_page(GFP_KERNEL); - if (!pg) + /* + * Prep memory for rule array and var array use. + * Use the cprb mempool for this. + */ + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!mem) return -ENOMEM; - rarray = pg; - varray = pg + PAGE_SIZE / 2; + rarray = (u8 *)mem; + varray = (u8 *)mem + PAGE_SIZE / 2; rlen = vlen = PAGE_SIZE / 2; /* QF for this card/domain */ rc = cca_query_crypto_facility(cardnr, domain, "STATICSA", - rarray, &rlen, varray, &vlen); + rarray, &rlen, varray, &vlen, xflags); if (rc == 0 && rlen >= 10 * 8 && vlen >= 204) { memcpy(ci->serial, rarray, 8); ci->new_asym_mk_state = (char)rarray[4 * 8]; @@ -1730,7 +1687,7 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) goto out; rlen = vlen = PAGE_SIZE / 2; rc = cca_query_crypto_facility(cardnr, domain, "STATICSB", - rarray, &rlen, varray, &vlen); + rarray, &rlen, varray, &vlen, xflags); if (rc == 0 && rlen >= 13 * 8 && vlen >= 240) { ci->new_apka_mk_state = (char)rarray[10 * 8]; ci->cur_apka_mk_state = (char)rarray[11 * 8]; @@ -1745,177 +1702,32 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) } out: - free_page((unsigned long)pg); + mempool_free(mem, cprb_mempool); return found == 2 ? 0 : -ENOENT; } - -/* - * Fetch cca information about a CCA queue. - */ -int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify) -{ - int rc; - - rc = cca_info_cache_fetch(card, dom, ci); - if (rc || verify) { - rc = fetch_cca_info(card, dom, ci); - if (rc == 0) - cca_info_cache_update(card, dom, ci); - } - - return rc; -} EXPORT_SYMBOL(cca_get_info); -/* - * Search for a matching crypto card based on the - * Master Key Verification Pattern given. - */ -static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain, - int verify, int minhwtype) -{ - struct zcrypt_device_status_ext *device_status; - u16 card, dom; - struct cca_info ci; - int i, rc, oi = -1; - - /* mkvp must not be zero, minhwtype needs to be >= 0 */ - if (mkvp == 0 || minhwtype < 0) - return -EINVAL; - - /* fetch status of all crypto cards */ - device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); - if (!device_status) - return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); - - /* walk through all crypto cards */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { - card = AP_QID_CARD(device_status[i].qid); - dom = AP_QID_QUEUE(device_status[i].qid); - if (device_status[i].online && - device_status[i].functions & 0x04) { - /* enabled CCA card, check current mkvp from cache */ - if (cca_info_cache_fetch(card, dom, &ci) == 0 && - ci.hwtype >= minhwtype && - ci.cur_aes_mk_state == '2' && - ci.cur_aes_mkvp == mkvp) { - if (!verify) - break; - /* verify: refresh card info */ - if (fetch_cca_info(card, dom, &ci) == 0) { - cca_info_cache_update(card, dom, &ci); - if (ci.hwtype >= minhwtype && - ci.cur_aes_mk_state == '2' && - ci.cur_aes_mkvp == mkvp) - break; - } - } - } else { - /* Card is offline and/or not a CCA card. */ - /* del mkvp entry from cache if it exists */ - cca_info_cache_scrub(card, dom); - } - } - if (i >= MAX_ZDEV_ENTRIES_EXT) { - /* nothing found, so this time without cache */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { - if (!(device_status[i].online && - device_status[i].functions & 0x04)) - continue; - card = AP_QID_CARD(device_status[i].qid); - dom = AP_QID_QUEUE(device_status[i].qid); - /* fresh fetch mkvp from adapter */ - if (fetch_cca_info(card, dom, &ci) == 0) { - cca_info_cache_update(card, dom, &ci); - if (ci.hwtype >= minhwtype && - ci.cur_aes_mk_state == '2' && - ci.cur_aes_mkvp == mkvp) - break; - if (ci.hwtype >= minhwtype && - ci.old_aes_mk_state == '2' && - ci.old_aes_mkvp == mkvp && - oi < 0) - oi = i; - } - } - if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) { - /* old mkvp matched, use this card then */ - card = AP_QID_CARD(device_status[oi].qid); - dom = AP_QID_QUEUE(device_status[oi].qid); - } - } - if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) { - if (pcardnr) - *pcardnr = card; - if (pdomain) - *pdomain = dom; - rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1); - } else { - rc = -ENODEV; - } - - kvfree(device_status); - return rc; -} - -/* - * Search for a matching crypto card based on the Master Key - * Verification Pattern provided inside a secure key token. - */ -int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify) -{ - u64 mkvp; - int minhwtype = 0; - const struct keytoken_header *hdr = (struct keytoken_header *)key; - - if (hdr->type != TOKTYPE_CCA_INTERNAL) - return -EINVAL; - - switch (hdr->version) { - case TOKVER_CCA_AES: - mkvp = ((struct secaeskeytoken *)key)->mkvp; - break; - case TOKVER_CCA_VLSC: - mkvp = ((struct cipherkeytoken *)key)->mkvp0; - minhwtype = AP_DEVICE_TYPE_CEX6; - break; - default: - return -EINVAL; - } - - return findcard(mkvp, pcardnr, pdomain, verify, minhwtype); -} -EXPORT_SYMBOL(cca_findcard); - -int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, +int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp, - int verify) + u32 xflags) { struct zcrypt_device_status_ext *device_status; - u32 *_apqns = NULL, _nr_apqns = 0; - int i, card, dom, curmatch, oldmatch, rc = 0; + int i, card, dom, curmatch, oldmatch; struct cca_info ci; + u32 _nr_apqns = 0; - /* fetch status of all crypto cards */ - device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); - if (!device_status) - return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); + /* occupy the device status memory */ + mutex_lock(&dev_status_mem_mutex); + memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE); + device_status = (struct zcrypt_device_status_ext *)dev_status_mem; - /* allocate 1k space for up to 256 apqns */ - _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL); - if (!_apqns) { - kvfree(device_status); - return -ENOMEM; - } + /* fetch crypto device status into this struct */ + zcrypt_device_status_mask_ext(device_status, + ZCRYPT_DEV_STATUS_CARD_MAX, + ZCRYPT_DEV_STATUS_QUEUE_MAX); /* walk through all the crypto apqnss */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) { card = AP_QID_CARD(device_status[i].qid); dom = AP_QID_QUEUE(device_status[i].qid); /* check online state */ @@ -1931,7 +1743,7 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, if (domain != 0xFFFF && dom != domain) continue; /* get cca info on this apqn */ - if (cca_get_info(card, dom, &ci, verify)) + if (cca_get_info(card, dom, &ci, xflags)) continue; /* current master key needs to be valid */ if (mktype == AES_MK_SET && ci.cur_aes_mk_state != '2') @@ -1961,27 +1773,41 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, continue; } /* apqn passed all filtering criterons, add to the array */ - if (_nr_apqns < 256) - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); + if (_nr_apqns < *nr_apqns) + apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); } - /* nothing found ? */ - if (!_nr_apqns) { - kfree(_apqns); - rc = -ENODEV; - } else { - /* no re-allocation, simple return the _apqns array */ - *apqns = _apqns; - *nr_apqns = _nr_apqns; - rc = 0; - } + *nr_apqns = _nr_apqns; - kvfree(device_status); - return rc; + /* release the device status memory */ + mutex_unlock(&dev_status_mem_mutex); + + return _nr_apqns ? 0 : -ENODEV; } EXPORT_SYMBOL(cca_findcard2); -void __exit zcrypt_ccamisc_exit(void) +int __init zcrypt_ccamisc_init(void) +{ + /* Pre-allocate a small memory pool for cca cprbs. */ + cprb_mempool = mempool_create_kmalloc_pool(zcrypt_mempool_threshold, + CPRB_MEMPOOL_ITEM_SIZE); + if (!cprb_mempool) + return -ENOMEM; + + /* Pre-allocate one crypto status card struct used in findcard() */ + dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL); + if (!dev_status_mem) { + mempool_destroy(cprb_mempool); + return -ENOMEM; + } + + return 0; +} + +void zcrypt_ccamisc_exit(void) { - mkvp_cache_free(); + mutex_lock(&dev_status_mem_mutex); + kvfree(dev_status_mem); + mutex_unlock(&dev_status_mem_mutex); + mempool_destroy(cprb_mempool); } diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h index 5ddf02f965f9..1ecc4e37e9ad 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.h +++ b/drivers/s390/crypto/zcrypt_ccamisc.h @@ -12,6 +12,7 @@ #include <asm/zcrypt.h> #include <asm/pkey.h> +#include "zcrypt_api.h" /* Key token types */ #define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */ @@ -153,50 +154,53 @@ int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl, * key token. Returns 0 on success or errno value on failure. */ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, - const u8 *token, size_t keysize, + const u8 *token, u32 keysize, int checkcpacfexport); /* * Generate (random) CCA AES DATA secure key. */ -int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey); +int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey, + u32 xflags); /* * Generate CCA AES DATA secure key with given clear key value. */ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, - const u8 *clrkey, u8 *seckey); + const u8 *clrkey, u8 *seckey, u32 xflags); /* * Derive proteced key from an CCA AES DATA secure key. */ int cca_sec2protkey(u16 cardnr, u16 domain, const u8 *seckey, u8 *protkey, u32 *protkeylen, - u32 *protkeytype); + u32 *protkeytype, u32 xflags); /* * Generate (random) CCA AES CIPHER secure key. */ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize); + u8 *keybuf, u32 *keybufsize, u32 xflags); /* * Derive proteced key from CCA AES cipher secure key. */ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); /* * Build CCA AES CIPHER secure key with a given clear key value. */ int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, size_t *keybufsize); + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, + u32 xflags); /* * Derive proteced key from CCA ECC secure private key. */ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags); /* * Query cryptographic facility from CCA adapter @@ -204,16 +208,8 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, int cca_query_crypto_facility(u16 cardnr, u16 domain, const char *keyword, u8 *rarray, size_t *rarraylen, - u8 *varray, size_t *varraylen); - -/* - * Search for a matching crypto card based on the Master Key - * Verification Pattern provided inside a secure key. - * Works with CCA AES data and cipher keys. - * Returns < 0 on failure, 0 if CURRENT MKVP matches and - * 1 if OLD MKVP matches. - */ -int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify); + u8 *varray, size_t *varraylen, + u32 xflags); /* * Build a list of cca apqns meeting the following constrains: @@ -223,21 +219,16 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify); * - if minhwtype > 0 only apqns with hwtype >= minhwtype * - if cur_mkvp != 0 only apqns where cur_mkvp == mkvp * - if old_mkvp != 0 only apqns where old_mkvp == mkvp - * - if verify is enabled and a cur_mkvp and/or old_mkvp - * value is given, then refetch the cca_info and make sure the current - * cur_mkvp or old_mkvp values of the apqn are used. * The mktype determines which set of master keys to use: * 0 = AES_MK_SET - AES MK set, 1 = APKA MK_SET - APKA MK set - * The array of apqn entries is allocated with kmalloc and returned in *apqns; - * the number of apqns stored into the list is returned in *nr_apqns. One apqn - * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and - * may be casted to struct pkey_apqn. The return value is either 0 for success - * or a negative errno value. If no apqn meeting the criteria is found, - * -ENODEV is returned. + * The caller should set *nr_apqns to the nr of elements available in *apqns. + * On return *nr_apqns is then updated with the nr of apqns filled into *apqns. + * The return value is either 0 for success or a negative errno value. + * If no apqn meeting the criteria is found, -ENODEV is returned. */ -int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, +int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp, - int verify); + u32 xflags); #define AES_MK_SET 0 #define APKA_MK_SET 1 @@ -269,8 +260,9 @@ struct cca_info { /* * Fetch cca information about an CCA queue. */ -int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify); +int cca_get_info(u16 card, u16 dom, struct cca_info *ci, u32 xflags); +int zcrypt_ccamisc_init(void); void zcrypt_ccamisc_exit(void); #endif /* _ZCRYPT_CCAMISC_H_ */ diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index 9cfce9ff2e65..6ba7fbddd3f7 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -79,14 +79,13 @@ static ssize_t cca_serialnr_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct cca_info ci; struct ap_card *ac = to_ap_card(dev); + struct cca_info ci; memset(&ci, 0, sizeof(ci)); if (ap_domain_index >= 0) - cca_get_info(ac->id, ap_domain_index, &ci, zc->online); + cca_get_info(ac->id, ap_domain_index, &ci, 0); return sysfs_emit(buf, "%s\n", ci.serial); } @@ -110,17 +109,17 @@ static ssize_t cca_mkvps_show(struct device *dev, struct device_attribute *attr, char *buf) { + static const char * const new_state[] = { "empty", "partial", "full" }; + static const char * const cao_state[] = { "invalid", "valid" }; struct zcrypt_queue *zq = dev_get_drvdata(dev); - int n = 0; struct cca_info ci; - static const char * const cao_state[] = { "invalid", "valid" }; - static const char * const new_state[] = { "empty", "partial", "full" }; + int n = 0; memset(&ci, 0, sizeof(ci)); cca_get_info(AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - &ci, zq->online); + &ci, 0); if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3') n += sysfs_emit_at(buf, n, "AES NEW: %s 0x%016llx\n", @@ -210,13 +209,12 @@ static ssize_t ep11_api_ordinalnr_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); if (ci.API_ord_nr > 0) return sysfs_emit(buf, "%u\n", ci.API_ord_nr); @@ -231,13 +229,12 @@ static ssize_t ep11_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); if (ci.FW_version > 0) return sysfs_emit(buf, "%d.%d\n", @@ -254,13 +251,12 @@ static ssize_t ep11_serialnr_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); if (ci.serial[0]) return sysfs_emit(buf, "%16.16s\n", ci.serial); @@ -279,7 +275,11 @@ static const struct { { 1, "BSI2009" }, { 2, "FIPS2011" }, { 3, "BSI2011" }, + { 4, "SIGG-IMPORT" }, + { 5, "SIGG" }, { 6, "BSICC2017" }, + { 7, "FIPS2021" }, + { 8, "FIPS2024" }, { 0, NULL } }; @@ -287,14 +287,13 @@ static ssize_t ep11_card_op_modes_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - int i, n = 0; - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; + int i, n = 0; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); for (i = 0; ep11_op_modes[i].mode_txt; i++) { if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) { @@ -344,7 +343,7 @@ static ssize_t ep11_mkvps_show(struct device *dev, if (zq->online) ep11_get_domain_info(AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - &di); + &di, 0); if (di.cur_wk_state == '0') { n = sysfs_emit(buf, "WK CUR: %s -\n", @@ -391,7 +390,7 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev, if (zq->online) ep11_get_domain_info(AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - &di); + &di, 0); for (i = 0; ep11_op_modes[i].mode_txt; i++) { if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) { @@ -473,7 +472,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) return -ENOMEM; zc->card = ac; dev_set_drvdata(&ap_dev->device, zc); - if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) { + if (ac->hwinfo.accel) { if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { zc->type_string = "CEX4A"; zc->user_space_type = ZCRYPT_CEX4; @@ -502,8 +501,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->user_space_type = ZCRYPT_CEX6; } zc->min_mod_size = CEX4A_MIN_MOD_SIZE; - if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && - ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) { + if (ac->hwinfo.mex4k && ac->hwinfo.crt4k) { zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K; zc->max_exp_bit_length = CEX4A_MAX_MOD_SIZE_4K; @@ -512,7 +510,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->max_exp_bit_length = CEX4A_MAX_MOD_SIZE_2K; } - } else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) { + } else if (ac->hwinfo.cca) { if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { zc->type_string = "CEX4C"; zc->speed_rating = CEX4C_SPEED_IDX; @@ -552,7 +550,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->min_mod_size = CEX4C_MIN_MOD_SIZE; zc->max_mod_size = CEX4C_MAX_MOD_SIZE; zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; - } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) { + } else if (ac->hwinfo.ep11) { if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { zc->type_string = "CEX4P"; zc->user_space_type = ZCRYPT_CEX4; @@ -595,14 +593,14 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) return rc; } - if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) { + if (ac->hwinfo.cca) { rc = sysfs_create_group(&ap_dev->device.kobj, &cca_card_attr_grp); if (rc) { zcrypt_card_unregister(zc); zcrypt_card_free(zc); } - } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) { + } else if (ac->hwinfo.ep11) { rc = sysfs_create_group(&ap_dev->device.kobj, &ep11_card_attr_grp); if (rc) { @@ -623,9 +621,9 @@ static void zcrypt_cex4_card_remove(struct ap_device *ap_dev) struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device); struct ap_card *ac = to_ap_card(&ap_dev->device); - if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) + if (ac->hwinfo.cca) sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp); - else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) + else if (ac->hwinfo.ep11) sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp); zcrypt_card_unregister(zc); @@ -650,19 +648,19 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) struct zcrypt_queue *zq; int rc; - if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) { + if (aq->card->hwinfo.accel) { zq = zcrypt_queue_alloc(aq->card->maxmsgsize); if (!zq) return -ENOMEM; zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT); - } else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) { + } else if (aq->card->hwinfo.cca) { zq = zcrypt_queue_alloc(aq->card->maxmsgsize); if (!zq) return -ENOMEM; zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, MSGTYPE06_VARIANT_DEFAULT); - } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) { + } else if (aq->card->hwinfo.ep11) { zq = zcrypt_queue_alloc(aq->card->maxmsgsize); if (!zq) return -ENOMEM; @@ -685,14 +683,14 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) return rc; } - if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) { + if (aq->card->hwinfo.cca) { rc = sysfs_create_group(&ap_dev->device.kobj, &cca_queue_attr_grp); if (rc) { zcrypt_queue_unregister(zq); zcrypt_queue_free(zq); } - } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) { + } else if (aq->card->hwinfo.ep11) { rc = sysfs_create_group(&ap_dev->device.kobj, &ep11_queue_attr_grp); if (rc) { @@ -713,9 +711,9 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev) struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device); struct ap_queue *aq = to_ap_queue(&ap_dev->device); - if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) + if (aq->card->hwinfo.cca) sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp); - else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) + else if (aq->card->hwinfo.ep11) sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp); zcrypt_queue_unregister(zq); diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h index 5cf88aabd64b..9a208dc4c200 100644 --- a/drivers/s390/crypto/zcrypt_debug.h +++ b/drivers/s390/crypto/zcrypt_debug.h @@ -17,7 +17,7 @@ #define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO) #define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO) -#define DBF_MAX_SPRINTF_ARGS 6 +#define ZCRYPT_DBF_MAX_SPRINTF_ARGS 6 #define ZCRYPT_DBF(...) \ debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__) @@ -27,8 +27,6 @@ debug_sprintf_event(zcrypt_dbf_info, DBF_WARN, ##__VA_ARGS__) #define ZCRYPT_DBF_INFO(...) \ debug_sprintf_event(zcrypt_dbf_info, DBF_INFO, ##__VA_ARGS__) -#define ZCRYPT_DBF_DBG(...) \ - debug_sprintf_event(zcrypt_dbf_info, DBF_DEBUG, ##__VA_ARGS__) extern debug_info_t *zcrypt_dbf_info; diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c index 0a877f9792c2..3dda9589f2b9 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.c +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -6,13 +6,14 @@ * Collection of EP11 misc functions used by zcrypt and pkey */ -#define KMSG_COMPONENT "zcrypt" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zcrypt: " fmt +#include <linux/export.h> #include <linux/init.h> +#include <linux/mempool.h> #include <linux/module.h> -#include <linux/slab.h> #include <linux/random.h> +#include <linux/slab.h> #include <asm/zcrypt.h> #include <asm/pkey.h> #include <crypto/aes.h> @@ -24,96 +25,35 @@ #include "zcrypt_ep11misc.h" #include "zcrypt_ccamisc.h" -#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__) -#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__) -#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__) -#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__) - #define EP11_PINBLOB_V1_BYTES 56 /* default iv used here */ static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff }; -/* ep11 card info cache */ -struct card_list_entry { - struct list_head list; - u16 cardnr; - struct ep11_card_info info; -}; -static LIST_HEAD(card_list); -static DEFINE_SPINLOCK(card_list_lock); - -static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci) -{ - int rc = -ENOENT; - struct card_list_entry *ptr; - - spin_lock_bh(&card_list_lock); - list_for_each_entry(ptr, &card_list, list) { - if (ptr->cardnr == cardnr) { - memcpy(ci, &ptr->info, sizeof(*ci)); - rc = 0; - break; - } - } - spin_unlock_bh(&card_list_lock); - - return rc; -} - -static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci) -{ - int found = 0; - struct card_list_entry *ptr; - - spin_lock_bh(&card_list_lock); - list_for_each_entry(ptr, &card_list, list) { - if (ptr->cardnr == cardnr) { - memcpy(&ptr->info, ci, sizeof(*ci)); - found = 1; - break; - } - } - if (!found) { - ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); - if (!ptr) { - spin_unlock_bh(&card_list_lock); - return; - } - ptr->cardnr = cardnr; - memcpy(&ptr->info, ci, sizeof(*ci)); - list_add(&ptr->list, &card_list); - } - spin_unlock_bh(&card_list_lock); -} - -static void card_cache_scrub(u16 cardnr) -{ - struct card_list_entry *ptr; - - spin_lock_bh(&card_list_lock); - list_for_each_entry(ptr, &card_list, list) { - if (ptr->cardnr == cardnr) { - list_del(&ptr->list); - kfree(ptr); - break; - } - } - spin_unlock_bh(&card_list_lock); -} - -static void __exit card_cache_free(void) -{ - struct card_list_entry *ptr, *pnext; +/* + * Cprb memory pool held for urgent cases where no memory + * can be allocated via kmalloc. This pool is only used when + * alloc_cprbmem() is called with the xflag ZCRYPT_XFLAG_NOMEMALLOC. + */ +#define CPRB_MEMPOOL_ITEM_SIZE (8 * 1024) +static mempool_t *cprb_mempool; - spin_lock_bh(&card_list_lock); - list_for_each_entry_safe(ptr, pnext, &card_list, list) { - list_del(&ptr->list); - kfree(ptr); - } - spin_unlock_bh(&card_list_lock); -} +/* + * This is a pre-allocated memory for the device status array + * used within the ep11_findcard2() function. It is currently + * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is + * controlled via dev_status_mem_mutex. Needs adaption if more + * than 128 cards or domains to be are supported. + */ +#define ZCRYPT_DEV_STATUS_CARD_MAX 128 +#define ZCRYPT_DEV_STATUS_QUEUE_MAX 128 +#define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \ + ZCRYPT_DEV_STATUS_QUEUE_MAX) +#define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \ + sizeof(struct zcrypt_device_status_ext)) +static void *dev_status_mem; +static DEFINE_MUTEX(dev_status_mem_mutex); static int ep11_kb_split(const u8 *kb, size_t kblen, u32 kbver, struct ep11kblob_header **kbhdr, size_t *kbhdrsize, @@ -208,7 +148,7 @@ out: * For valid ep11 keyblobs, returns a reference to the wrappingkey verification * pattern. Otherwise NULL. */ -const u8 *ep11_kb_wkvp(const u8 *keyblob, size_t keybloblen) +const u8 *ep11_kb_wkvp(const u8 *keyblob, u32 keybloblen) { struct ep11keyblob *kb; @@ -222,7 +162,7 @@ EXPORT_SYMBOL(ep11_kb_wkvp); * Simple check if the key blob is a valid EP11 AES key blob with header. */ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, - const u8 *key, size_t keylen, int checkcpacfexp) + const u8 *key, u32 keylen, int checkcpacfexp) { struct ep11kblob_header *hdr = (struct ep11kblob_header *)key; struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr)); @@ -230,7 +170,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) if (keylen < sizeof(*hdr) + sizeof(*kb)) { - DBF("%s key check failed, keylen %zu < %zu\n", + DBF("%s key check failed, keylen %u < %zu\n", __func__, keylen, sizeof(*hdr) + sizeof(*kb)); return -EINVAL; } @@ -255,7 +195,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, } if (hdr->len > keylen) { if (dbg) - DBF("%s key check failed, header len %d keylen %zu mismatch\n", + DBF("%s key check failed, header len %d keylen %u mismatch\n", __func__, (int)hdr->len, keylen); return -EINVAL; } @@ -289,7 +229,7 @@ EXPORT_SYMBOL(ep11_check_aes_key_with_hdr); * Simple check if the key blob is a valid EP11 ECC key blob with header. */ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, - const u8 *key, size_t keylen, int checkcpacfexp) + const u8 *key, u32 keylen, int checkcpacfexp) { struct ep11kblob_header *hdr = (struct ep11kblob_header *)key; struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr)); @@ -297,7 +237,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) if (keylen < sizeof(*hdr) + sizeof(*kb)) { - DBF("%s key check failed, keylen %zu < %zu\n", + DBF("%s key check failed, keylen %u < %zu\n", __func__, keylen, sizeof(*hdr) + sizeof(*kb)); return -EINVAL; } @@ -322,7 +262,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, } if (hdr->len > keylen) { if (dbg) - DBF("%s key check failed, header len %d keylen %zu mismatch\n", + DBF("%s key check failed, header len %d keylen %u mismatch\n", __func__, (int)hdr->len, keylen); return -EINVAL; } @@ -357,14 +297,14 @@ EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr); * the header in the session field (old style EP11 AES key). */ int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, - const u8 *key, size_t keylen, int checkcpacfexp) + const u8 *key, u32 keylen, int checkcpacfexp) { struct ep11keyblob *kb = (struct ep11keyblob *)key; #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) if (keylen < sizeof(*kb)) { - DBF("%s key check failed, keylen %zu < %zu\n", + DBF("%s key check failed, keylen %u < %zu\n", __func__, keylen, sizeof(*kb)); return -EINVAL; } @@ -383,7 +323,7 @@ int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, } if (kb->head.len > keylen) { if (dbg) - DBF("%s key check failed, header len %d keylen %zu mismatch\n", + DBF("%s key check failed, header len %d keylen %u mismatch\n", __func__, (int)kb->head.len, keylen); return -EINVAL; } @@ -416,14 +356,20 @@ EXPORT_SYMBOL(ep11_check_aes_key); /* * Allocate and prepare ep11 cprb plus additional payload. */ -static inline struct ep11_cprb *alloc_cprb(size_t payload_len) +static void *alloc_cprbmem(size_t payload_len, u32 xflags) { size_t len = sizeof(struct ep11_cprb) + payload_len; - struct ep11_cprb *cprb; + struct ep11_cprb *cprb = NULL; - cprb = kzalloc(len, GFP_KERNEL); + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) { + if (len <= CPRB_MEMPOOL_ITEM_SIZE) + cprb = mempool_alloc_preallocated(cprb_mempool); + } else { + cprb = kmalloc(len, GFP_KERNEL); + } if (!cprb) return NULL; + memset(cprb, 0, len); cprb->cprb_len = sizeof(struct ep11_cprb); cprb->cprb_ver_id = 0x04; @@ -435,6 +381,20 @@ static inline struct ep11_cprb *alloc_cprb(size_t payload_len) } /* + * Free ep11 cprb buffer space. + */ +static void free_cprbmem(void *mem, size_t payload_len, bool scrub, u32 xflags) +{ + if (mem && scrub) + memzero_explicit(mem, sizeof(struct ep11_cprb) + payload_len); + + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) + mempool_free(mem, cprb_mempool); + else + kfree(mem); +} + +/* * Some helper functions related to ASN1 encoding. * Limited to length info <= 2 byte. */ @@ -494,6 +454,7 @@ static inline void prep_urb(struct ep11_urb *u, struct ep11_cprb *req, size_t req_len, struct ep11_cprb *rep, size_t rep_len) { + memset(u, 0, sizeof(*u)); u->targets = (u8 __user *)t; u->targets_num = nt; u->req = (u8 __user *)req; @@ -510,7 +471,7 @@ static int check_reply_pl(const u8 *pl, const char *func) /* start tag */ if (*pl++ != 0x30) { - DEBUG_ERR("%s reply start tag mismatch\n", func); + ZCRYPT_DBF_ERR("%s reply start tag mismatch\n", func); return -EIO; } @@ -527,51 +488,68 @@ static int check_reply_pl(const u8 *pl, const char *func) len = *((u16 *)pl); pl += 2; } else { - DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n", - func, *pl); + ZCRYPT_DBF_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n", + func, *pl); return -EIO; } /* len should cover at least 3 fields with 32 bit value each */ if (len < 3 * 6) { - DEBUG_ERR("%s reply length %d too small\n", func, len); + ZCRYPT_DBF_ERR("%s reply length %d too small\n", func, len); return -EIO; } /* function tag, length and value */ if (pl[0] != 0x04 || pl[1] != 0x04) { - DEBUG_ERR("%s function tag or length mismatch\n", func); + ZCRYPT_DBF_ERR("%s function tag or length mismatch\n", func); return -EIO; } pl += 6; /* dom tag, length and value */ if (pl[0] != 0x04 || pl[1] != 0x04) { - DEBUG_ERR("%s dom tag or length mismatch\n", func); + ZCRYPT_DBF_ERR("%s dom tag or length mismatch\n", func); return -EIO; } pl += 6; /* return value tag, length and value */ if (pl[0] != 0x04 || pl[1] != 0x04) { - DEBUG_ERR("%s return value tag or length mismatch\n", func); + ZCRYPT_DBF_ERR("%s return value tag or length mismatch\n", + func); return -EIO; } pl += 2; ret = *((u32 *)pl); if (ret != 0) { - DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret); + ZCRYPT_DBF_ERR("%s return value 0x%08x != 0\n", func, ret); return -EIO; } return 0; } +/* Check ep11 reply cprb, return 0 or suggested errno value. */ +static int check_reply_cprb(const struct ep11_cprb *rep, const char *func) +{ + /* check ep11 reply return code field */ + if (rep->ret_code) { + ZCRYPT_DBF_ERR("%s ep11 reply ret_code=0x%08x\n", __func__, + rep->ret_code); + if (rep->ret_code == 0x000c0003) + return -EBUSY; + else + return -EIO; + } + + return 0; +} + /* * Helper function which does an ep11 query with given query type. */ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, - size_t buflen, u8 *buf) + size_t buflen, u8 *buf, u32 xflags) { struct ep11_info_req_pl { struct pl_head head; @@ -593,11 +571,11 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; int api = EP11_API_V1, rc = -ENOMEM; /* request cprb and payload */ - req = alloc_cprb(sizeof(struct ep11_info_req_pl)); + req = alloc_cprbmem(sizeof(struct ep11_info_req_pl), xflags); if (!req) goto out; req_pl = (struct ep11_info_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -609,40 +587,42 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, req_pl->query_subtype_len = sizeof(u32); /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen); + rep = alloc_cprbmem(sizeof(struct ep11_info_rep_pl) + buflen, xflags); if (!rep) goto out; rep_pl = (struct ep11_info_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = cardnr; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + sizeof(*req_pl), rep, sizeof(*rep) + sizeof(*rep_pl) + buflen); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } + /* check ep11 reply cprb */ + rc = check_reply_cprb(rep, __func__); + if (rc) + goto out; + + /* check payload */ rc = check_reply_pl((u8 *)rep_pl, __func__); if (rc) goto out; if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) { - DEBUG_ERR("%s unknown reply data format\n", __func__); + ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__); rc = -EIO; goto out; } if (rep_pl->data_len > buflen) { - DEBUG_ERR("%s mismatch between reply data len and buffer len\n", - __func__); + ZCRYPT_DBF_ERR("%s mismatch between reply data len and buffer len\n", + __func__); rc = -ENOSPC; goto out; } @@ -650,16 +630,15 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, memcpy(buf, ((u8 *)rep_pl) + sizeof(*rep_pl), rep_pl->data_len); out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, 0, false, xflags); + free_cprbmem(rep, 0, false, xflags); return rc; } /* * Provide information about an EP11 card. */ -int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify) +int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags) { int rc; struct ep11_module_query_info { @@ -689,30 +668,26 @@ int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify) u32 max_CP_index; } __packed * pmqi = NULL; - rc = card_cache_fetch(card, info); - if (rc || verify) { - pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL); - if (!pmqi) - return -ENOMEM; - rc = ep11_query_info(card, AUTOSEL_DOM, - 0x01 /* module info query */, - sizeof(*pmqi), (u8 *)pmqi); - if (rc) { - if (rc == -ENODEV) - card_cache_scrub(card); - goto out; - } - memset(info, 0, sizeof(*info)); - info->API_ord_nr = pmqi->API_ord_nr; - info->FW_version = - (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers; - memcpy(info->serial, pmqi->serial, sizeof(info->serial)); - info->op_mode = pmqi->op_mode; - card_cache_update(card, info); - } + /* use the cprb mempool to satisfy this short term mem alloc */ + pmqi = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!pmqi) + return -ENOMEM; + rc = ep11_query_info(card, AUTOSEL_DOM, + 0x01 /* module info query */, + sizeof(*pmqi), (u8 *)pmqi, xflags); + if (rc) + goto out; + + memset(info, 0, sizeof(*info)); + info->API_ord_nr = pmqi->API_ord_nr; + info->FW_version = (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers; + memcpy(info->serial, pmqi->serial, sizeof(info->serial)); + info->op_mode = pmqi->op_mode; out: - kfree(pmqi); + mempool_free(pmqi, cprb_mempool); return rc; } EXPORT_SYMBOL(ep11_get_card_info); @@ -720,7 +695,8 @@ EXPORT_SYMBOL(ep11_get_card_info); /* * Provide information about a domain within an EP11 card. */ -int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info) +int ep11_get_domain_info(u16 card, u16 domain, + struct ep11_domain_info *info, u32 xflags) { int rc; struct ep11_domain_query_info { @@ -729,36 +705,32 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info) u8 new_WK_VP[32]; u32 dom_flags; u64 op_mode; - } __packed * p_dom_info; - - p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL); - if (!p_dom_info) - return -ENOMEM; + } __packed dom_query_info; rc = ep11_query_info(card, domain, 0x03 /* domain info query */, - sizeof(*p_dom_info), (u8 *)p_dom_info); + sizeof(dom_query_info), (u8 *)&dom_query_info, + xflags); if (rc) goto out; memset(info, 0, sizeof(*info)); info->cur_wk_state = '0'; info->new_wk_state = '0'; - if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) { - if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) { + if (dom_query_info.dom_flags & 0x10 /* left imprint mode */) { + if (dom_query_info.dom_flags & 0x02 /* cur wk valid */) { info->cur_wk_state = '1'; - memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32); + memcpy(info->cur_wkvp, dom_query_info.cur_WK_VP, 32); } - if (p_dom_info->dom_flags & 0x04 || /* new wk present */ - p_dom_info->dom_flags & 0x08 /* new wk committed */) { + if (dom_query_info.dom_flags & 0x04 || /* new wk present */ + dom_query_info.dom_flags & 0x08 /* new wk committed */) { info->new_wk_state = - p_dom_info->dom_flags & 0x08 ? '2' : '1'; - memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32); + dom_query_info.dom_flags & 0x08 ? '2' : '1'; + memcpy(info->new_wkvp, dom_query_info.new_WK_VP, 32); } } - info->op_mode = p_dom_info->op_mode; + info->op_mode = dom_query_info.op_mode; out: - kfree(p_dom_info); return rc; } EXPORT_SYMBOL(ep11_get_domain_info); @@ -771,7 +743,7 @@ EXPORT_SYMBOL(ep11_get_domain_info); static int _ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize) + u8 *keybuf, size_t *keybufsize, u32 xflags) { struct keygen_req_pl { struct pl_head head; @@ -806,7 +778,7 @@ static int _ep11_genaeskey(u16 card, u16 domain, struct ep11_cprb *req = NULL, *rep = NULL; size_t req_pl_size, pinblob_size = 0; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; int api, rc = -ENOMEM; u8 *p; @@ -816,9 +788,8 @@ static int _ep11_genaeskey(u16 card, u16 domain, case 256: break; default: - DEBUG_ERR( - "%s unknown/unsupported keybitsize %d\n", - __func__, keybitsize); + ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); rc = -EINVAL; goto out; } @@ -835,7 +806,7 @@ static int _ep11_genaeskey(u16 card, u16 domain, pinblob_size = EP11_PINBLOB_V1_BYTES; } req_pl_size = sizeof(struct keygen_req_pl) + ASN1TAGLEN(pinblob_size); - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -861,40 +832,42 @@ static int _ep11_genaeskey(u16 card, u16 domain, *p++ = pinblob_size; /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct keygen_rep_pl)); + rep = alloc_cprbmem(sizeof(struct keygen_rep_pl), xflags); if (!rep) goto out; rep_pl = (struct keygen_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int)card, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int)card, (int)domain, rc); goto out; } + /* check ep11 reply cprb */ + rc = check_reply_cprb(rep, __func__); + if (rc) + goto out; + + /* check payload */ rc = check_reply_pl((u8 *)rep_pl, __func__); if (rc) goto out; if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) { - DEBUG_ERR("%s unknown reply data format\n", __func__); + ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__); rc = -EIO; goto out; } if (rep_pl->data_len > *keybufsize) { - DEBUG_ERR("%s mismatch reply data len / key buffer len\n", - __func__); + ZCRYPT_DBF_ERR("%s mismatch reply data len / key buffer len\n", + __func__); rc = -ENOSPC; goto out; } @@ -904,14 +877,13 @@ static int _ep11_genaeskey(u16 card, u16 domain, *keybufsize = rep_pl->data_len; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, 0, false, xflags); + free_cprbmem(rep, sizeof(struct keygen_rep_pl), true, xflags); return rc; } int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize, u32 keybufver) + u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags) { struct ep11kblob_header *hdr; size_t hdr_size, pl_size; @@ -932,7 +904,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, return rc; rc = _ep11_genaeskey(card, domain, keybitsize, keygenflags, - pl, &pl_size); + pl, &pl_size, xflags); if (rc) return rc; @@ -952,7 +924,8 @@ static int ep11_cryptsingle(u16 card, u16 domain, u16 mode, u32 mech, const u8 *iv, const u8 *key, size_t keysize, const u8 *inbuf, size_t inbufsize, - u8 *outbuf, size_t *outbufsize) + u8 *outbuf, size_t *outbufsize, + u32 xflags) { struct crypt_req_pl { struct pl_head head; @@ -979,8 +952,8 @@ static int ep11_cryptsingle(u16 card, u16 domain, } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; - size_t req_pl_size, rep_pl_size; + struct ep11_urb urb; + size_t req_pl_size, rep_pl_size = 0; int n, api = EP11_API_V1, rc = -ENOMEM; u8 *p; @@ -991,7 +964,7 @@ static int ep11_cryptsingle(u16 card, u16 domain, /* request cprb and payload */ req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize); - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; req_pl = (struct crypt_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -1013,34 +986,36 @@ static int ep11_cryptsingle(u16 card, u16 domain, /* reply cprb and payload, assume out data size <= in data size + 32 */ rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32); - rep = alloc_cprb(rep_pl_size); + rep = alloc_cprbmem(rep_pl_size, xflags); if (!rep) goto out; rep_pl = (struct crypt_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + rep_pl_size); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int)card, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int)card, (int)domain, rc); goto out; } + /* check ep11 reply cprb */ + rc = check_reply_cprb(rep, __func__); + if (rc) + goto out; + + /* check payload */ rc = check_reply_pl((u8 *)rep_pl, __func__); if (rc) goto out; if (rep_pl->data_tag != 0x04) { - DEBUG_ERR("%s unknown reply data format\n", __func__); + ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__); rc = -EIO; goto out; } @@ -1053,14 +1028,14 @@ static int ep11_cryptsingle(u16 card, u16 domain, n = *((u16 *)p); p += 2; } else { - DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n", - __func__, rep_pl->data_lenfmt); + ZCRYPT_DBF_ERR("%s unknown reply data length format 0x%02hhx\n", + __func__, rep_pl->data_lenfmt); rc = -EIO; goto out; } if (n > *outbufsize) { - DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n", - __func__, n, *outbufsize); + ZCRYPT_DBF_ERR("%s mismatch reply data len %d / output buffer %zu\n", + __func__, n, *outbufsize); rc = -ENOSPC; goto out; } @@ -1069,9 +1044,8 @@ static int ep11_cryptsingle(u16 card, u16 domain, *outbufsize = n; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, req_pl_size, true, xflags); + free_cprbmem(rep, rep_pl_size, true, xflags); return rc; } @@ -1080,7 +1054,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain, const u8 *enckey, size_t enckeysize, u32 mech, const u8 *iv, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize) + u8 *keybuf, size_t *keybufsize, u32 xflags) { struct uw_req_pl { struct pl_head head; @@ -1117,7 +1091,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain, struct ep11_cprb *req = NULL, *rep = NULL; size_t req_pl_size, pinblob_size = 0; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; int api, rc = -ENOMEM; u8 *p; @@ -1135,7 +1109,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain, req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keksize) + ASN1TAGLEN(0) + ASN1TAGLEN(pinblob_size) + ASN1TAGLEN(enckeysize); - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -1171,40 +1145,42 @@ static int _ep11_unwrapkey(u16 card, u16 domain, p += asn1tag_write(p, 0x04, enckey, enckeysize); /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct uw_rep_pl)); + rep = alloc_cprbmem(sizeof(struct uw_rep_pl), xflags); if (!rep) goto out; rep_pl = (struct uw_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int)card, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int)card, (int)domain, rc); goto out; } + /* check ep11 reply cprb */ + rc = check_reply_cprb(rep, __func__); + if (rc) + goto out; + + /* check payload */ rc = check_reply_pl((u8 *)rep_pl, __func__); if (rc) goto out; if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) { - DEBUG_ERR("%s unknown reply data format\n", __func__); + ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__); rc = -EIO; goto out; } if (rep_pl->data_len > *keybufsize) { - DEBUG_ERR("%s mismatch reply data len / key buffer len\n", - __func__); + ZCRYPT_DBF_ERR("%s mismatch reply data len / key buffer len\n", + __func__); rc = -ENOSPC; goto out; } @@ -1214,9 +1190,8 @@ static int _ep11_unwrapkey(u16 card, u16 domain, *keybufsize = rep_pl->data_len; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, req_pl_size, true, xflags); + free_cprbmem(rep, sizeof(struct uw_rep_pl), true, xflags); return rc; } @@ -1225,8 +1200,8 @@ static int ep11_unwrapkey(u16 card, u16 domain, const u8 *enckey, size_t enckeysize, u32 mech, const u8 *iv, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize, - u8 keybufver) + u8 *keybuf, u32 *keybufsize, + u8 keybufver, u32 xflags) { struct ep11kblob_header *hdr; size_t hdr_size, pl_size; @@ -1240,7 +1215,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, rc = _ep11_unwrapkey(card, domain, kek, keksize, enckey, enckeysize, mech, iv, keybitsize, keygenflags, - pl, &pl_size); + pl, &pl_size, xflags); if (rc) return rc; @@ -1259,7 +1234,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, static int _ep11_wrapkey(u16 card, u16 domain, const u8 *key, size_t keysize, u32 mech, const u8 *iv, - u8 *databuf, size_t *datasize) + u8 *databuf, size_t *datasize, u32 xflags) { struct wk_req_pl { struct pl_head head; @@ -1288,7 +1263,7 @@ static int _ep11_wrapkey(u16 card, u16 domain, } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; size_t req_pl_size; int api, rc = -ENOMEM; u8 *p; @@ -1296,7 +1271,7 @@ static int _ep11_wrapkey(u16 card, u16 domain, /* request cprb and payload */ req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keysize) + 4; - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; if (!mech || mech == 0x80060001) @@ -1326,40 +1301,42 @@ static int _ep11_wrapkey(u16 card, u16 domain, *p++ = 0; /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct wk_rep_pl)); + rep = alloc_cprbmem(sizeof(struct wk_rep_pl), xflags); if (!rep) goto out; rep_pl = (struct wk_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int)card, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int)card, (int)domain, rc); goto out; } + /* check ep11 reply cprb */ + rc = check_reply_cprb(rep, __func__); + if (rc) + goto out; + + /* check payload */ rc = check_reply_pl((u8 *)rep_pl, __func__); if (rc) goto out; if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) { - DEBUG_ERR("%s unknown reply data format\n", __func__); + ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__); rc = -EIO; goto out; } if (rep_pl->data_len > *datasize) { - DEBUG_ERR("%s mismatch reply data len / data buffer len\n", - __func__); + ZCRYPT_DBF_ERR("%s mismatch reply data len / data buffer len\n", + __func__); rc = -ENOSPC; goto out; } @@ -1369,78 +1346,83 @@ static int _ep11_wrapkey(u16 card, u16 domain, *datasize = rep_pl->data_len; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, req_pl_size, true, xflags); + free_cprbmem(rep, sizeof(struct wk_rep_pl), true, xflags); return rc; } int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, size_t *keybufsize, - u32 keytype) + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, + u32 keytype, u32 xflags) { int rc; - u8 encbuf[64], *kek = NULL; + void *mem; + u8 encbuf[64], *kek; size_t clrkeylen, keklen, encbuflen = sizeof(encbuf); if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) { clrkeylen = keybitsize / 8; } else { - DEBUG_ERR( - "%s unknown/unsupported keybitsize %d\n", - __func__, keybitsize); + ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); return -EINVAL; } - /* allocate memory for the temp kek */ + /* + * Allocate space for the temp kek. + * Also we only need up to MAXEP11AESKEYBLOBSIZE bytes for this + * we use the already existing cprb mempool to solve this + * short term memory requirement. + */ + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!mem) + return -ENOMEM; + kek = (u8 *)mem; keklen = MAXEP11AESKEYBLOBSIZE; - kek = kmalloc(keklen, GFP_ATOMIC); - if (!kek) { - rc = -ENOMEM; - goto out; - } /* Step 1: generate AES 256 bit random kek key */ rc = _ep11_genaeskey(card, domain, 256, 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */ - kek, &keklen); + kek, &keklen, xflags); if (rc) { - DEBUG_ERR( - "%s generate kek key failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s generate kek key failed, rc=%d\n", + __func__, rc); goto out; } /* Step 2: encrypt clear key value with the kek key */ rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen, - clrkey, clrkeylen, encbuf, &encbuflen); + clrkey, clrkeylen, encbuf, &encbuflen, xflags); if (rc) { - DEBUG_ERR( - "%s encrypting key value with kek key failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s encrypting key value with kek key failed, rc=%d\n", + __func__, rc); goto out; } /* Step 3: import the encrypted key value as a new key */ rc = ep11_unwrapkey(card, domain, kek, keklen, encbuf, encbuflen, 0, def_iv, - keybitsize, 0, keybuf, keybufsize, keytype); + keybitsize, keygenflags, + keybuf, keybufsize, + keytype, xflags); if (rc) { - DEBUG_ERR( - "%s importing key value as new key failed,, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s importing key value as new key failed, rc=%d\n", + __func__, rc); goto out; } out: - kfree(kek); + mempool_free(mem, cprb_mempool); return rc; } EXPORT_SYMBOL(ep11_clr2keyblob); int ep11_kblob2protkey(u16 card, u16 dom, - const u8 *keyblob, size_t keybloblen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + const u8 *keyblob, u32 keybloblen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { struct ep11kblob_header *hdr; struct ep11keyblob *key; @@ -1466,27 +1448,40 @@ int ep11_kblob2protkey(u16 card, u16 dom, } /* !!! hdr is no longer a valid header !!! */ - /* alloc temp working buffer */ + /* need a temp working buffer */ wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1)); - wkbuf = kmalloc(wkbuflen, GFP_ATOMIC); - if (!wkbuf) - return -ENOMEM; + if (wkbuflen > CPRB_MEMPOOL_ITEM_SIZE) { + /* this should never happen */ + rc = -ENOMEM; + ZCRYPT_DBF_WARN("%s wkbuflen %d > cprb mempool item size %d, rc=%d\n", + __func__, (int)wkbuflen, CPRB_MEMPOOL_ITEM_SIZE, rc); + return rc; + } + /* use the cprb mempool to satisfy this short term mem allocation */ + wkbuf = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_ATOMIC); + if (!wkbuf) { + rc = -ENOMEM; + ZCRYPT_DBF_WARN("%s allocating tmp buffer via cprb mempool failed, rc=%d\n", + __func__, rc); + return rc; + } /* ep11 secure key -> protected key + info */ rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen, - 0, def_iv, wkbuf, &wkbuflen); + 0, def_iv, wkbuf, &wkbuflen, xflags); if (rc) { - DEBUG_ERR( - "%s rewrapping ep11 key to pkey failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s rewrapping ep11 key to pkey failed, rc=%d\n", + __func__, rc); goto out; } wki = (struct wk_info *)wkbuf; /* check struct version and pkey type */ if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) { - DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n", - __func__, (int)wki->version, (int)wki->pkeytype); + ZCRYPT_DBF_ERR("%s wk info version %d or pkeytype %d mismatch.\n", + __func__, (int)wki->version, (int)wki->pkeytype); rc = -EIO; goto out; } @@ -1511,8 +1506,8 @@ int ep11_kblob2protkey(u16 card, u16 dom, *protkeytype = PKEY_KEYTYPE_AES_256; break; default: - DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n", - __func__, (int)wki->pkeysize); + ZCRYPT_DBF_ERR("%s unknown/unsupported AES pkeysize %d\n", + __func__, (int)wki->pkeysize); rc = -EIO; goto out; } @@ -1525,16 +1520,16 @@ int ep11_kblob2protkey(u16 card, u16 dom, break; case 2: /* TDES */ default: - DEBUG_ERR("%s unknown/unsupported key type %d\n", - __func__, (int)wki->pkeytype); + ZCRYPT_DBF_ERR("%s unknown/unsupported key type %d\n", + __func__, (int)wki->pkeytype); rc = -EIO; goto out; } /* copy the translated protected key */ if (wki->pkeysize > *protkeylen) { - DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n", - __func__, wki->pkeysize, *protkeylen); + ZCRYPT_DBF_ERR("%s wk info pkeysize %llu > protkeysize %u\n", + __func__, wki->pkeysize, *protkeylen); rc = -EINVAL; goto out; } @@ -1542,37 +1537,32 @@ int ep11_kblob2protkey(u16 card, u16 dom, *protkeylen = wki->pkeysize; out: - kfree(wkbuf); + mempool_free(wkbuf, cprb_mempool); return rc; } EXPORT_SYMBOL(ep11_kblob2protkey); -int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, - int minhwtype, int minapi, const u8 *wkvp) +int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, + int minhwtype, int minapi, const u8 *wkvp, u32 xflags) { struct zcrypt_device_status_ext *device_status; - u32 *_apqns = NULL, _nr_apqns = 0; - int i, card, dom, rc = -ENOMEM; struct ep11_domain_info edi; struct ep11_card_info eci; + u32 _nr_apqns = 0; + int i, card, dom; - /* fetch status of all crypto cards */ - device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); - if (!device_status) - return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); + /* occupy the device status memory */ + mutex_lock(&dev_status_mem_mutex); + memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE); + device_status = (struct zcrypt_device_status_ext *)dev_status_mem; - /* allocate 1k space for up to 256 apqns */ - _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL); - if (!_apqns) { - kvfree(device_status); - return -ENOMEM; - } + /* fetch crypto device status into this struct */ + zcrypt_device_status_mask_ext(device_status, + ZCRYPT_DEV_STATUS_CARD_MAX, + ZCRYPT_DEV_STATUS_QUEUE_MAX); /* walk through all the crypto apqnss */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) { card = AP_QID_CARD(device_status[i].qid); dom = AP_QID_QUEUE(device_status[i].qid); /* check online state */ @@ -1592,14 +1582,14 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, continue; /* check min api version if given */ if (minapi > 0) { - if (ep11_get_card_info(card, &eci, 0)) + if (ep11_get_card_info(card, &eci, xflags)) continue; if (minapi > eci.API_ord_nr) continue; } /* check wkvp if given */ if (wkvp) { - if (ep11_get_domain_info(card, dom, &edi)) + if (ep11_get_domain_info(card, dom, &edi, xflags)) continue; if (edi.cur_wk_state != '1') continue; @@ -1607,27 +1597,40 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, continue; } /* apqn passed all filtering criterons, add to the array */ - if (_nr_apqns < 256) - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); + if (_nr_apqns < *nr_apqns) + apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); } - /* nothing found ? */ - if (!_nr_apqns) { - kfree(_apqns); - rc = -ENODEV; - } else { - /* no re-allocation, simple return the _apqns array */ - *apqns = _apqns; - *nr_apqns = _nr_apqns; - rc = 0; - } + *nr_apqns = _nr_apqns; - kvfree(device_status); - return rc; + mutex_unlock(&dev_status_mem_mutex); + + return _nr_apqns ? 0 : -ENODEV; } EXPORT_SYMBOL(ep11_findcard2); -void __exit zcrypt_ep11misc_exit(void) +int __init zcrypt_ep11misc_init(void) +{ + /* Pre-allocate a small memory pool for ep11 cprbs. */ + cprb_mempool = mempool_create_kmalloc_pool(2 * zcrypt_mempool_threshold, + CPRB_MEMPOOL_ITEM_SIZE); + if (!cprb_mempool) + return -ENOMEM; + + /* Pre-allocate one crypto status card struct used in ep11_findcard2() */ + dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL); + if (!dev_status_mem) { + mempool_destroy(cprb_mempool); + return -ENOMEM; + } + + return 0; +} + +void zcrypt_ep11misc_exit(void) { - card_cache_free(); + mutex_lock(&dev_status_mem_mutex); + kvfree(dev_status_mem); + mutex_unlock(&dev_status_mem_mutex); + mempool_destroy(cprb_mempool); } diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h index 9d17fd5228a7..b5e6fd861815 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.h +++ b/drivers/s390/crypto/zcrypt_ep11misc.h @@ -54,7 +54,7 @@ static inline bool is_ep11_keyblob(const u8 *key) * For valid ep11 keyblobs, returns a reference to the wrappingkey verification * pattern. Otherwise NULL. */ -const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen); +const u8 *ep11_kb_wkvp(const u8 *kblob, u32 kbloblen); /* * Simple check if the key blob is a valid EP11 AES key blob with header. @@ -63,7 +63,7 @@ const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen); * Returns 0 on success or errno value on failure. */ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, - const u8 *key, size_t keylen, int checkcpacfexp); + const u8 *key, u32 keylen, int checkcpacfexp); /* * Simple check if the key blob is a valid EP11 ECC key blob with header. @@ -72,7 +72,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, * Returns 0 on success or errno value on failure. */ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, - const u8 *key, size_t keylen, int checkcpacfexp); + const u8 *key, u32 keylen, int checkcpacfexp); /* * Simple check if the key blob is a valid EP11 AES key blob with @@ -82,7 +82,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, * Returns 0 on success or errno value on failure. */ int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, - const u8 *key, size_t keylen, int checkcpacfexp); + const u8 *key, u32 keylen, int checkcpacfexp); /* EP11 card info struct */ struct ep11_card_info { @@ -104,25 +104,26 @@ struct ep11_domain_info { /* * Provide information about an EP11 card. */ -int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify); +int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags); /* * Provide information about a domain within an EP11 card. */ -int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info); +int ep11_get_domain_info(u16 card, u16 domain, + struct ep11_domain_info *info, u32 xflags); /* * Generate (random) EP11 AES secure key. */ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize, u32 keybufver); + u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags); /* * Generate EP11 AES secure key with given clear key value. */ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, size_t *keybufsize, - u32 keytype); + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, + u32 keytype, u32 xflags); /* * Build a list of ep11 apqns meeting the following constrains: @@ -136,22 +137,22 @@ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, * key for this domain. When a wkvp is given there will always be a re-fetch * of the domain info for the potential apqn - so this triggers an request * reply to each apqn eligible. - * The array of apqn entries is allocated with kmalloc and returned in *apqns; - * the number of apqns stored into the list is returned in *nr_apqns. One apqn - * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and - * may be casted to struct pkey_apqn. The return value is either 0 for success - * or a negative errno value. If no apqn meeting the criteria is found, - * -ENODEV is returned. + * The caller should set *nr_apqns to the nr of elements available in *apqns. + * On return *nr_apqns is then updated with the nr of apqns filled into *apqns. + * The return value is either 0 for success or a negative errno value. + * If no apqn meeting the criteria is found, -ENODEV is returned. */ -int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, - int minhwtype, int minapi, const u8 *wkvp); +int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, + int minhwtype, int minapi, const u8 *wkvp, u32 xflags); /* * Derive proteced key from EP11 key blob (AES and ECC keys). */ -int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, size_t keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); +int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); +int zcrypt_ep11misc_init(void); void zcrypt_ep11misc_exit(void); #endif /* _ZCRYPT_EP11MISC_H_ */ diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index d36177e65a3d..46e27b43a8af 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h @@ -98,17 +98,30 @@ static inline int convert_error(struct zcrypt_queue *zq, case REP88_ERROR_MESSAGE_MALFORMD: /* 0x22 */ case REP88_ERROR_KEY_TYPE: /* 0x34 */ /* RY indicates malformed request */ - ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => rc=EINVAL\n", - __func__, card, queue, ehdr->reply_code); + if (ehdr->reply_code == REP82_ERROR_FILTERED_BY_HYPERVISOR && + ehdr->type == TYPE86_RSP_CODE) { + struct { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; + } __packed * head = reply->msg; + unsigned int apfs = *((u32 *)head->fmt2.apfs); + + ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x apfs=0x%x => rc=EINVAL\n", + __func__, card, queue, + ehdr->reply_code, apfs); + } else { + ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => rc=EINVAL\n", + __func__, card, queue, + ehdr->reply_code); + } return -EINVAL; case REP82_ERROR_MACHINE_FAILURE: /* 0x10 */ case REP82_ERROR_MESSAGE_TYPE: /* 0x20 */ case REP82_ERROR_TRANSPORT_FAIL: /* 0x90 */ /* - * Msg to wrong type or card/infrastructure failure. - * Trigger rescan of the ap bus, trigger retry request. + * Msg to wrong type or card/infrastructure failure. Return + * EAGAIN, the upper layer may do a retry on the request. */ - atomic_set(&zcrypt_rescan_req, 1); /* For type 86 response show the apfs value (failure reason) */ if (ehdr->reply_code == REP82_ERROR_TRANSPORT_FAIL && ehdr->type == TYPE86_RSP_CODE) { diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index 2e155de8abe5..d6fc2d8e7fad 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -10,8 +10,7 @@ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> */ -#define KMSG_COMPONENT "zcrypt" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zcrypt: " fmt #include <linux/module.h> #include <linux/slab.h> @@ -427,7 +426,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq, len = t80h->len; if (len > reply->bufsize || len > msg->bufsize || len != reply->len) { - ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__); + pr_debug("len mismatch => EMSGSIZE\n"); msg->rc = -EMSGSIZE; goto out; } @@ -438,7 +437,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq, msg->len = sizeof(error_reply); } out: - complete((struct completion *)msg->private); + complete(&msg->response.work); } static atomic_t zcrypt_step = ATOMIC_INIT(0); @@ -449,30 +448,30 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0); * @zq: pointer to zcrypt_queue structure that identifies the * CEXxA device to the request distributor * @mex: pointer to the modexpo request buffer + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq, struct ica_rsa_modexpo *mex, struct ap_message *ap_msg) { - struct completion work; int rc; - ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE; - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; + if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE) + return -EMSGSIZE; ap_msg->receive = zcrypt_msgtype50_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &work; rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex); if (rc) goto out; - init_completion(&work); + init_completion(&ap_msg->response.work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&work); + rc = wait_for_completion_interruptible(&ap_msg->response.work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -485,11 +484,10 @@ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq, } out: - ap_msg->private = NULL; if (rc) - ZCRYPT_DBF_DBG("%s send me cprb at dev=%02x.%04x rc=%d\n", - __func__, AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), rc); + pr_debug("send me cprb at dev=%02x.%04x rc=%d\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), rc); return rc; } @@ -499,30 +497,30 @@ out: * @zq: pointer to zcrypt_queue structure that identifies the * CEXxA device to the request distributor * @crt: pointer to the modexpoc_crt request buffer + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq, struct ica_rsa_modexpo_crt *crt, struct ap_message *ap_msg) { - struct completion work; int rc; - ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE; - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; + if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE) + return -EMSGSIZE; ap_msg->receive = zcrypt_msgtype50_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &work; rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt); if (rc) goto out; - init_completion(&work); + init_completion(&ap_msg->response.work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&work); + rc = wait_for_completion_interruptible(&ap_msg->response.work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -535,11 +533,10 @@ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq, } out: - ap_msg->private = NULL; if (rc) - ZCRYPT_DBF_DBG("%s send crt cprb at dev=%02x.%04x rc=%d\n", - __func__, AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), rc); + pr_debug("send crt cprb at dev=%02x.%04x rc=%d\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), rc); return rc; } diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 3c53abbdc342..a0dcab5dc4f2 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -10,8 +10,7 @@ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> */ -#define KMSG_COMPONENT "zcrypt" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zcrypt: " fmt #include <linux/module.h> #include <linux/init.h> @@ -31,11 +30,6 @@ #define CEIL4(x) ((((x) + 3) / 4) * 4) -struct response_type { - struct completion work; - int type; -}; - #define CEXXC_RESPONSE_TYPE_ICA 0 #define CEXXC_RESPONSE_TYPE_XCRB 1 #define CEXXC_RESPONSE_TYPE_EP11 2 @@ -437,9 +431,8 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg, ap_msg->flags |= AP_MSG_FLAG_ADMIN; break; default: - ZCRYPT_DBF_DBG("%s unknown CPRB minor version '%c%c'\n", - __func__, msg->cprbx.func_id[0], - msg->cprbx.func_id[1]); + pr_debug("unknown CPRB minor version '%c%c'\n", + msg->cprbx.func_id[0], msg->cprbx.func_id[1]); } /* copy data block */ @@ -629,9 +622,8 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq, /* Copy CPRB to user */ if (xcrb->reply_control_blk_length < msg->fmt2.count1) { - ZCRYPT_DBF_DBG("%s reply_control_blk_length %u < required %u => EMSGSIZE\n", - __func__, xcrb->reply_control_blk_length, - msg->fmt2.count1); + pr_debug("reply_control_blk_length %u < required %u => EMSGSIZE\n", + xcrb->reply_control_blk_length, msg->fmt2.count1); return -EMSGSIZE; } if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr, @@ -642,9 +634,8 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq, /* Copy data buffer to user */ if (msg->fmt2.count2) { if (xcrb->reply_data_length < msg->fmt2.count2) { - ZCRYPT_DBF_DBG("%s reply_data_length %u < required %u => EMSGSIZE\n", - __func__, xcrb->reply_data_length, - msg->fmt2.count2); + pr_debug("reply_data_length %u < required %u => EMSGSIZE\n", + xcrb->reply_data_length, msg->fmt2.count2); return -EMSGSIZE; } if (z_copy_to_user(userspace, xcrb->reply_data_addr, @@ -673,9 +664,8 @@ static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq, char *data = reply->msg; if (xcrb->resp_len < msg->fmt2.count1) { - ZCRYPT_DBF_DBG("%s resp_len %u < required %u => EMSGSIZE\n", - __func__, (unsigned int)xcrb->resp_len, - msg->fmt2.count1); + pr_debug("resp_len %u < required %u => EMSGSIZE\n", + (unsigned int)xcrb->resp_len, msg->fmt2.count1); return -EMSGSIZE; } @@ -860,7 +850,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; - struct response_type *resp_type = msg->private; + struct ap_response_type *resp_type = &msg->response; struct type86x_reply *t86r; int len; @@ -875,7 +865,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, len = sizeof(struct type86x_reply) + t86r->length; if (len > reply->bufsize || len > msg->bufsize || len != reply->len) { - ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__); + pr_debug("len mismatch => EMSGSIZE\n"); msg->rc = -EMSGSIZE; goto out; } @@ -889,7 +879,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, len = t86r->fmt2.offset1 + t86r->fmt2.count1; if (len > reply->bufsize || len > msg->bufsize || len != reply->len) { - ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__); + pr_debug("len mismatch => EMSGSIZE\n"); msg->rc = -EMSGSIZE; goto out; } @@ -924,7 +914,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; - struct response_type *resp_type = msg->private; + struct ap_response_type *resp_type = &msg->response; struct type86_ep11_reply *t86r; int len; @@ -939,7 +929,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, len = t86r->fmt2.offset1 + t86r->fmt2.count1; if (len > reply->bufsize || len > msg->bufsize || len != reply->len) { - ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__); + pr_debug("len mismatch => EMSGSIZE\n"); msg->rc = -EMSGSIZE; goto out; } @@ -971,9 +961,7 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, struct ica_rsa_modexpo *mex, struct ap_message *ap_msg) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_ICA, - }; + struct ap_response_type *resp_type = &ap_msg->response; int rc; ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); @@ -983,15 +971,15 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &resp_type; rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex); if (rc) goto out_free; - init_completion(&resp_type.work); + resp_type->type = CEXXC_RESPONSE_TYPE_ICA; + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out_free; - rc = wait_for_completion_interruptible(&resp_type.work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1005,7 +993,6 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, out_free: free_page((unsigned long)ap_msg->msg); - ap_msg->private = NULL; ap_msg->msg = NULL; return rc; } @@ -1021,9 +1008,7 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, struct ica_rsa_modexpo_crt *crt, struct ap_message *ap_msg) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_ICA, - }; + struct ap_response_type *resp_type = &ap_msg->response; int rc; ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); @@ -1033,15 +1018,15 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &resp_type; rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt); if (rc) goto out_free; - init_completion(&resp_type.work); + resp_type->type = CEXXC_RESPONSE_TYPE_ICA; + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out_free; - rc = wait_for_completion_interruptible(&resp_type.work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1055,7 +1040,6 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, out_free: free_page((unsigned long)ap_msg->msg); - ap_msg->private = NULL; ap_msg->msg = NULL; return rc; } @@ -1065,28 +1049,21 @@ out_free: * Prepare a CCA AP msg: fetch the required data from userspace, * prepare the AP msg, fill some info into the ap_message struct, * extract some data from the CPRB and give back to the caller. - * This function allocates memory and needs an ap_msg prepared - * by the caller with ap_init_message(). Also the caller has to - * make sure ap_release_message() is always called even on failure. + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb, struct ap_message *ap_msg, unsigned int *func_code, unsigned short **dom) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_XCRB, - }; + struct ap_response_type *resp_type = &ap_msg->response; - ap_msg->bufsize = atomic_read(&ap_max_msg_size); - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); - if (!ap_msg->private) - return -ENOMEM; + resp_type->type = CEXXC_RESPONSE_TYPE_XCRB; return xcrb_msg_to_type6cprb_msgx(userspace, ap_msg, xcrb, func_code, dom); } @@ -1101,7 +1078,7 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, struct ica_xcRB *xcrb, struct ap_message *ap_msg) { - struct response_type *rtype = ap_msg->private; + struct ap_response_type *resp_type = &ap_msg->response; struct { struct type6_hdr hdr; struct CPRBX cprbx; @@ -1132,11 +1109,11 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, msg->hdr.fromcardlen1 -= delta; } - init_completion(&rtype->work); + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&rtype->work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1151,9 +1128,9 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, out: if (rc) - ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n", - __func__, AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), rc); + pr_debug("send cprb at dev=%02x.%04x rc=%d\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), rc); return rc; } @@ -1162,28 +1139,21 @@ out: * Prepare an EP11 AP msg: fetch the required data from userspace, * prepare the AP msg, fill some info into the ap_message struct, * extract some data from the CPRB and give back to the caller. - * This function allocates memory and needs an ap_msg prepared - * by the caller with ap_init_message(). Also the caller has to - * make sure ap_release_message() is always called even on failure. + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb, struct ap_message *ap_msg, unsigned int *func_code, unsigned int *domain) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_EP11, - }; + struct ap_response_type *resp_type = &ap_msg->response; - ap_msg->bufsize = atomic_read(&ap_max_msg_size); - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive_ep11; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); - if (!ap_msg->private) - return -ENOMEM; + resp_type->type = CEXXC_RESPONSE_TYPE_EP11; return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code, domain); } @@ -1201,7 +1171,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * { int rc; unsigned int lfmt; - struct response_type *rtype = ap_msg->private; + struct ap_response_type *resp_type = &ap_msg->response; struct { struct type6_hdr hdr; struct ep11_cprb cprbx; @@ -1255,11 +1225,11 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * msg->hdr.fromcardlen1 = zq->reply.bufsize - sizeof(struct type86_hdr) - sizeof(struct type86_fmt2_ext); - init_completion(&rtype->work); + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&rtype->work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1274,29 +1244,31 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * out: if (rc) - ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n", - __func__, AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), rc); + pr_debug("send cprb at dev=%02x.%04x rc=%d\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), rc); return rc; } +/* + * Prepare a CEXXC get random request ap message. + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_max_msg_size is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. + */ int prep_rng_ap_msg(struct ap_message *ap_msg, int *func_code, unsigned int *domain) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_XCRB, - }; + struct ap_response_type *resp_type = &ap_msg->response; - ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE; - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; + if (ap_msg->bufsize < AP_DEFAULT_MAX_MSG_SIZE) + return -EMSGSIZE; ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); - if (!ap_msg->private) - return -ENOMEM; + + resp_type->type = CEXXC_RESPONSE_TYPE_XCRB; rng_type6cprb_msgx(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); @@ -1323,16 +1295,16 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq, short int verb_length; short int key_length; } __packed * msg = ap_msg->msg; - struct response_type *rtype = ap_msg->private; + struct ap_response_type *resp_type = &ap_msg->response; int rc; msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); - init_completion(&rtype->work); + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&rtype->work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c index 112a80e8e6c2..a173d32eb6e8 100644 --- a/drivers/s390/crypto/zcrypt_queue.c +++ b/drivers/s390/crypto/zcrypt_queue.c @@ -11,6 +11,7 @@ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> */ +#include <linux/export.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -18,7 +19,6 @@ #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <linux/compat.h> #include <linux/slab.h> #include <linux/atomic.h> #include <linux/uaccess.h> @@ -42,7 +42,7 @@ static ssize_t online_show(struct device *dev, { struct zcrypt_queue *zq = dev_get_drvdata(dev); struct ap_queue *aq = to_ap_queue(dev); - int online = aq->config && zq->online ? 1 : 0; + int online = aq->config && !aq->chkstop && zq->online ? 1 : 0; return sysfs_emit(buf, "%d\n", online); } @@ -59,7 +59,8 @@ static ssize_t online_store(struct device *dev, if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) return -EINVAL; - if (online && (!aq->config || !aq->card->config)) + if (online && (!aq->config || !aq->card->config || + aq->chkstop || aq->card->chkstop)) return -ENODEV; if (online && !zc->online) return -EINVAL; diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 74760c1a163b..0fd700c5745a 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -2,15 +2,6 @@ menu "S/390 network device drivers" depends on NETDEVICES && S390 -config LCS - def_tristate m - prompt "Lan Channel Station Interface" - depends on CCW && NETDEVICES && ETHERNET - help - Select this option if you want to use LCS networking on IBM System z. - To compile as a module, choose M. The module name is lcs. - If you do not use LCS, choose N. - config CTCM def_tristate m prompt "CTC and MPC SNA device support" @@ -26,18 +17,6 @@ config CTCM To compile into the kernel, choose Y. If you do not need any channel-to-channel connection, choose N. -config NETIUCV - def_tristate m - prompt "IUCV network device support (VM only)" - depends on IUCV && NETDEVICES - help - Select this option if you want to use inter-user communication - vehicle networking under VM or VIF. It enables a fast communication - link between VM guests. Using ifconfig a point-to-point connection - can be established to the Linux on IBM System z - running on the other VM guest. To compile as a module, choose M. - The module name is netiucv. If unsure, choose Y. - config SMSGIUCV def_tristate m prompt "IUCV special message support (VM only)" @@ -98,15 +77,15 @@ config QETH_OSX config CCWGROUP tristate - default (LCS || CTCM || QETH || SMC) + default (CTCM || QETH || SMC) config ISM tristate "Support for ISM vPCI Adapter" - depends on PCI && SMC + depends on PCI && DIBS default n help Select this option if you want to use the Internal Shared Memory - vPCI Adapter. + vPCI Adapter. The adapter can be used with the SMC network protocol. To compile as a module choose M. The module name is ism. If unsure, choose N. diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index bc55ec316adb..537514cc52fb 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile @@ -5,10 +5,8 @@ ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o obj-$(CONFIG_CTCM) += ctcm.o fsm.o -obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o obj-$(CONFIG_SMSGIUCV) += smsgiucv.o obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o -obj-$(CONFIG_LCS) += lcs.o qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_ethtool.o obj-$(CONFIG_QETH) += qeth.o qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 90ec477386a8..bf917f426453 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -12,8 +12,7 @@ #undef DEBUGDATA #undef DEBUGCCW -#define KMSG_COMPONENT "ctcm" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "ctcm: " fmt #include <linux/module.h> #include <linux/init.h> @@ -882,6 +881,13 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, CTC_STATE_RXERR); fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); } + } else if (event == CTC_EVENT_UC_RCRESET) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, + ctc_ch_event_names[event], fsm_getstate_str(fi)); + + dev_info(&dev->dev, + "Init handshake not received, peer not ready yet\n"); } else { CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, @@ -967,6 +973,13 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, CTC_STATE_TXERR); fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); } + } else if (event == CTC_EVENT_UC_RCRESET) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, + ctc_ch_event_names[event], fsm_getstate_str(fi)); + + dev_info(&dev->dev, + "Init handshake not sent, peer not ready yet\n"); } else { CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, @@ -1325,7 +1338,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) clear_normalized_cda(&ch->ccw[1]); CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", - (void *)(unsigned long)ch->ccw[1].cda, + (void *)(u64)dma32_to_u32(ch->ccw[1].cda), ch->trans_skb->data); ch->ccw[1].count = ch->max_bufsize; @@ -1340,7 +1353,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) } CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", - (void *)(unsigned long)ch->ccw[1].cda, + (void *)(u64)dma32_to_u32(ch->ccw[1].cda), ch->trans_skb->data); ch->ccw[1].count = ch->trans_skb->len; diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 6faf27136024..3d7ccf2366a0 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -20,8 +20,7 @@ #undef DEBUGDATA #undef DEBUGCCW -#define KMSG_COMPONENT "ctcm" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "ctcm: " fmt #include <linux/module.h> #include <linux/init.h> @@ -200,13 +199,13 @@ static void channel_free(struct channel *ch) static void channel_remove(struct channel *ch) { struct channel **c = &channels; - char chid[CTCM_ID_SIZE+1]; + char chid[CTCM_ID_SIZE]; int ok = 0; if (ch == NULL) return; else - strncpy(chid, ch->id, CTCM_ID_SIZE); + strscpy(chid, ch->id, sizeof(chid)); channel_free(ch); while (*c) { @@ -996,7 +995,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu) return -EINVAL; dev->hard_header_len = LL_HEADER_LENGTH + 2; } - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } @@ -1389,7 +1388,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type, ch->ccw[15].cmd_code = CCW_CMD_WRITE; ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC; ch->ccw[15].count = TH_HEADER_LENGTH; - ch->ccw[15].cda = virt_to_phys(ch->discontact_th); + ch->ccw[15].cda = virt_to_dma32(ch->discontact_th); ch->ccw[16].cmd_code = CCW_CMD_NOOP; ch->ccw[16].flags = CCW_FLAG_SLI; diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 7a2f34a5e0e0..0f329fb514ee 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -18,9 +18,9 @@ #undef DEBUGDATA #undef DEBUGCCW -#define KMSG_COMPONENT "ctcm" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "ctcm: " fmt +#include <linux/export.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> @@ -179,7 +179,7 @@ void ctcmpc_dumpit(char *buf, int len) ctcm_pr_debug(" %s (+%s) : %s [%s]\n", addr, boff, bhex, basc); dup = 0; - strcpy(duphex, bhex); + strscpy(duphex, bhex); } else dup++; @@ -700,7 +700,6 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo) grp->sweep_req_pend_num--; ctcmpc_send_sweep_resp(ch); - kfree(mpcginfo); return; } @@ -1708,57 +1707,57 @@ static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side) ch->ccw[9].cmd_code = CCW_CMD_WRITE; ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC; ch->ccw[9].count = TH_HEADER_LENGTH; - ch->ccw[9].cda = virt_to_phys(ch->xid_th); + ch->ccw[9].cda = virt_to_dma32(ch->xid_th); if (ch->xid == NULL) goto done; ch->ccw[10].cmd_code = CCW_CMD_WRITE; ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC; ch->ccw[10].count = XID2_LENGTH; - ch->ccw[10].cda = virt_to_phys(ch->xid); + ch->ccw[10].cda = virt_to_dma32(ch->xid); ch->ccw[11].cmd_code = CCW_CMD_READ; ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC; ch->ccw[11].count = TH_HEADER_LENGTH; - ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th); + ch->ccw[11].cda = virt_to_dma32(ch->rcvd_xid_th); ch->ccw[12].cmd_code = CCW_CMD_READ; ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC; ch->ccw[12].count = XID2_LENGTH; - ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid); + ch->ccw[12].cda = virt_to_dma32(ch->rcvd_xid); ch->ccw[13].cmd_code = CCW_CMD_READ; - ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id); + ch->ccw[13].cda = virt_to_dma32(ch->rcvd_xid_id); } else { /* side == YSIDE : mpc_action_yside_xid */ ch->ccw[9].cmd_code = CCW_CMD_READ; ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC; ch->ccw[9].count = TH_HEADER_LENGTH; - ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th); + ch->ccw[9].cda = virt_to_dma32(ch->rcvd_xid_th); ch->ccw[10].cmd_code = CCW_CMD_READ; ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC; ch->ccw[10].count = XID2_LENGTH; - ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid); + ch->ccw[10].cda = virt_to_dma32(ch->rcvd_xid); if (ch->xid_th == NULL) goto done; ch->ccw[11].cmd_code = CCW_CMD_WRITE; ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC; ch->ccw[11].count = TH_HEADER_LENGTH; - ch->ccw[11].cda = virt_to_phys(ch->xid_th); + ch->ccw[11].cda = virt_to_dma32(ch->xid_th); if (ch->xid == NULL) goto done; ch->ccw[12].cmd_code = CCW_CMD_WRITE; ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC; ch->ccw[12].count = XID2_LENGTH; - ch->ccw[12].cda = virt_to_phys(ch->xid); + ch->ccw[12].cda = virt_to_dma32(ch->xid); if (ch->xid_id == NULL) goto done; ch->ccw[13].cmd_code = CCW_CMD_WRITE; - ch->ccw[13].cda = virt_to_phys(ch->xid_id); + ch->ccw[13].cda = virt_to_dma32(ch->xid_id); } ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC; diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c index 0c5d8a3eaa2e..529a1c40ae63 100644 --- a/drivers/s390/net/ctcm_sysfs.c +++ b/drivers/s390/net/ctcm_sysfs.c @@ -9,8 +9,7 @@ #undef DEBUGDATA #undef DEBUGCCW -#define KMSG_COMPONENT "ctcm" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "ctcm: " fmt #include <linux/device.h> #include <linux/sysfs.h> diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c index 0ff61d00feb1..58f8e2fb6d54 100644 --- a/drivers/s390/net/fsm.c +++ b/drivers/s390/net/fsm.c @@ -5,11 +5,12 @@ */ #include "fsm.h" +#include <linux/export.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/timer.h> -MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); +MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert <felfert@millenux.com>"); MODULE_DESCRIPTION("Finite state machine helper functions"); MODULE_LICENSE("GPL"); @@ -132,7 +133,7 @@ fsm_getstate_str(fsm_instance *fi) static void fsm_expire_timer(struct timer_list *t) { - fsm_timer *this = from_timer(this, t, tl); + fsm_timer *this = timer_container_of(this, t, tl); #if FSM_TIMER_DEBUG printk(KERN_DEBUG "fsm(%s): Timer %p expired\n", this->fi->name, this); @@ -158,7 +159,7 @@ fsm_deltimer(fsm_timer *this) printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name, this); #endif - del_timer(&this->tl); + timer_delete(&this->tl); } int @@ -188,7 +189,7 @@ fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg) this->fi->name, this, millisec); #endif - del_timer(&this->tl); + timer_delete(&this->tl); timer_setup(&this->tl, fsm_expire_timer, 0); this->expire_event = event; this->event_arg = arg; diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h index 70c5bbda0fea..08d17956cb36 100644 --- a/drivers/s390/net/ism.h +++ b/drivers/s390/net/ism.h @@ -5,18 +5,19 @@ #include <linux/spinlock.h> #include <linux/types.h> #include <linux/pci.h> -#include <linux/ism.h> -#include <net/smc.h> +#include <linux/dibs.h> #include <asm/pci_insn.h> #define UTIL_STR_LEN 16 +#define ISM_ERROR 0xFFFF + +#define ISM_NR_DMBS 1920 /* * Do not use the first word of the DMB bits to ensure 8 byte aligned access. */ #define ISM_DMB_WORD_OFFSET 1 #define ISM_DMB_BIT_OFFSET (ISM_DMB_WORD_OFFSET * 32) -#define ISM_IDENT_MASK 0x00FFFF #define ISM_REG_SBA 0x1 #define ISM_REG_IEQ 0x2 @@ -33,6 +34,23 @@ #define ISM_UNREG_SBA 0x11 #define ISM_UNREG_IEQ 0x12 +enum ism_event_type { + ISM_EVENT_BUF = 0x00, + ISM_EVENT_DEV = 0x01, + ISM_EVENT_SWR = 0x02 +}; + +enum ism_event_code { + ISM_BUF_DMB_UNREGISTERED = 0x04, + ISM_BUF_USING_ISM_DEV_DISABLED = 0x08, + ISM_BUF_OWNING_ISM_DEV_IN_ERR_STATE = 0x02, + ISM_BUF_USING_ISM_DEV_IN_ERR_STATE = 0x03, + ISM_BUF_VLAN_MISMATCH_WITH_OWNER = 0x05, + ISM_BUF_VLAN_MISMATCH_WITH_USER = 0x06, + ISM_DEV_GID_DISABLED = 0x07, + ISM_DEV_GID_ERR_STATE = 0x01 +}; + struct ism_req_hdr { u32 cmd; u16 : 16; @@ -66,6 +84,15 @@ union ism_reg_ieq { } response; } __aligned(16); +/* ISM-vPCI devices provide 64 Bit GIDs + * Map them to ISM UUID GIDs like this: + * _________________________________________ + * | 64 Bit ISM-vPCI GID | 00000000_00000000 | + * ----------------------------------------- + * This will be interpreted as a UIID variant, that is reserved + * for NCS backward compatibility. So it will not collide with + * proper UUIDs. + */ union ism_read_gid { struct { struct ism_req_hdr hdr; @@ -175,6 +202,14 @@ struct ism_eq_header { u64 : 64; }; +struct ism_event { + u32 type; + u32 code; + u64 tok; + u64 time; + u64 info; +}; + struct ism_eq { struct ism_eq_header header; struct ism_event entry[15]; @@ -189,15 +224,22 @@ struct ism_sba { u16 dmbe_mask[ISM_NR_DMBS]; }; -#define ISM_CREATE_REQ(dmb, idx, sf, offset) \ - ((dmb) | (idx) << 24 | (sf) << 23 | (offset)) +struct ism_dev { + spinlock_t cmd_lock; /* serializes cmds */ + struct dibs_dev *dibs; + struct pci_dev *pdev; + struct ism_sba *sba; + dma_addr_t sba_dma_addr; + DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS); -struct ism_systemeid { - u8 seid_string[24]; - u8 serial_number[4]; - u8 type[4]; + struct ism_eq *ieq; + dma_addr_t ieq_dma_addr; + int ieq_idx; }; +#define ISM_CREATE_REQ(dmb, idx, sf, offset) \ + ((dmb) | (idx) << 24 | (sf) << 23 | (offset)) + static inline void __ism_read_cmd(struct ism_dev *ism, void *data, unsigned long offset, unsigned long len) { diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index 6df7f377d2f9..8b8e4f06be0f 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -4,9 +4,9 @@ * * Copyright IBM Corp. 2018 */ -#define KMSG_COMPONENT "ism" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "ism: " fmt +#include <linux/export.h> #include <linux/module.h> #include <linux/types.h> #include <linux/interrupt.h> @@ -20,7 +20,6 @@ MODULE_DESCRIPTION("ISM driver for s390"); MODULE_LICENSE("GPL"); -#define PCI_DEVICE_ID_IBM_ISM 0x04ED #define DRV_NAME "ism" static const struct pci_device_id ism_device_table[] = { @@ -30,107 +29,13 @@ static const struct pci_device_id ism_device_table[] = { MODULE_DEVICE_TABLE(pci, ism_device_table); static debug_info_t *ism_debug_info; -static const struct smcd_ops ism_ops; - -#define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */ -static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */ - /* a list for fast mapping */ -static u8 max_client; -static DEFINE_MUTEX(clients_lock); -struct ism_dev_list { - struct list_head list; - struct mutex mutex; /* protects ism device list */ -}; - -static struct ism_dev_list ism_dev_list = { - .list = LIST_HEAD_INIT(ism_dev_list.list), - .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex), -}; - -static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism) -{ - unsigned long flags; - - spin_lock_irqsave(&ism->lock, flags); - ism->subs[client->id] = client; - spin_unlock_irqrestore(&ism->lock, flags); -} - -int ism_register_client(struct ism_client *client) -{ - struct ism_dev *ism; - int i, rc = -ENOSPC; - - mutex_lock(&ism_dev_list.mutex); - mutex_lock(&clients_lock); - for (i = 0; i < MAX_CLIENTS; ++i) { - if (!clients[i]) { - clients[i] = client; - client->id = i; - if (i == max_client) - max_client++; - rc = 0; - break; - } - } - mutex_unlock(&clients_lock); - - if (i < MAX_CLIENTS) { - /* initialize with all devices that we got so far */ - list_for_each_entry(ism, &ism_dev_list.list, list) { - ism->priv[i] = NULL; - client->add(ism); - ism_setup_forwarding(client, ism); - } - } - mutex_unlock(&ism_dev_list.mutex); - - return rc; -} -EXPORT_SYMBOL_GPL(ism_register_client); - -int ism_unregister_client(struct ism_client *client) -{ - struct ism_dev *ism; - unsigned long flags; - int rc = 0; - - mutex_lock(&ism_dev_list.mutex); - list_for_each_entry(ism, &ism_dev_list.list, list) { - spin_lock_irqsave(&ism->lock, flags); - /* Stop forwarding IRQs and events */ - ism->subs[client->id] = NULL; - for (int i = 0; i < ISM_NR_DMBS; ++i) { - if (ism->sba_client_arr[i] == client->id) { - WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n", - __func__, client->name); - rc = -EBUSY; - goto err_reg_dmb; - } - } - spin_unlock_irqrestore(&ism->lock, flags); - } - mutex_unlock(&ism_dev_list.mutex); - - mutex_lock(&clients_lock); - clients[client->id] = NULL; - if (client->id + 1 == max_client) - max_client--; - mutex_unlock(&clients_lock); - return rc; - -err_reg_dmb: - spin_unlock_irqrestore(&ism->lock, flags); - mutex_unlock(&ism_dev_list.mutex); - return rc; -} -EXPORT_SYMBOL_GPL(ism_unregister_client); static int ism_cmd(struct ism_dev *ism, void *cmd) { struct ism_req_hdr *req = cmd; struct ism_resp_hdr *resp = cmd; + spin_lock(&ism->cmd_lock); __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req)); __ism_write_cmd(ism, req, 0, sizeof(*req)); @@ -144,6 +49,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd) } __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp)); out: + spin_unlock(&ism->cmd_lock); return resp->ret; } @@ -271,8 +177,9 @@ static int unregister_ieq(struct ism_dev *ism) return 0; } -static int ism_read_local_gid(struct ism_dev *ism) +static int ism_read_local_gid(struct dibs_dev *dibs) { + struct ism_dev *ism = dibs->drv_priv; union ism_read_gid cmd; int ret; @@ -284,66 +191,93 @@ static int ism_read_local_gid(struct ism_dev *ism) if (ret) goto out; - ism->local_gid = cmd.response.gid; + memset(&dibs->gid, 0, sizeof(dibs->gid)); + memcpy(&dibs->gid, &cmd.response.gid, sizeof(cmd.response.gid)); out: return ret; } -static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid, - u32 vid) +static int ism_query_rgid(struct dibs_dev *dibs, const uuid_t *rgid, + u32 vid_valid, u32 vid) { + struct ism_dev *ism = dibs->drv_priv; union ism_query_rgid cmd; memset(&cmd, 0, sizeof(cmd)); cmd.request.hdr.cmd = ISM_QUERY_RGID; cmd.request.hdr.len = sizeof(cmd.request); - cmd.request.rgid = rgid; + memcpy(&cmd.request.rgid, rgid, sizeof(cmd.request.rgid)); cmd.request.vlan_valid = vid_valid; cmd.request.vlan_id = vid; return ism_cmd(ism, &cmd); } -static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb) +static int ism_max_dmbs(void) { - clear_bit(dmb->sba_idx, ism->sba_bitmap); - dma_free_coherent(&ism->pdev->dev, dmb->dmb_len, - dmb->cpu_addr, dmb->dma_addr); + return ISM_NR_DMBS; } -static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb) +static void ism_free_dmb(struct ism_dev *ism, struct dibs_dmb *dmb) { + clear_bit(dmb->idx, ism->sba_bitmap); + dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len, + DMA_FROM_DEVICE); + folio_put(virt_to_folio(dmb->cpu_addr)); +} + +static int ism_alloc_dmb(struct ism_dev *ism, struct dibs_dmb *dmb) +{ + struct folio *folio; unsigned long bit; + int rc; if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev)) return -EINVAL; - if (!dmb->sba_idx) { + if (!dmb->idx) { bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS, ISM_DMB_BIT_OFFSET); if (bit == ISM_NR_DMBS) return -ENOSPC; - dmb->sba_idx = bit; + dmb->idx = bit; } - if (dmb->sba_idx < ISM_DMB_BIT_OFFSET || - test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) + if (dmb->idx < ISM_DMB_BIT_OFFSET || + test_and_set_bit(dmb->idx, ism->sba_bitmap)) return -EINVAL; - dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len, - &dmb->dma_addr, - GFP_KERNEL | __GFP_NOWARN | - __GFP_NOMEMALLOC | __GFP_NORETRY); - if (!dmb->cpu_addr) - clear_bit(dmb->sba_idx, ism->sba_bitmap); + folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | + __GFP_NORETRY, get_order(dmb->dmb_len)); + + if (!folio) { + rc = -ENOMEM; + goto out_bit; + } + + dmb->cpu_addr = folio_address(folio); + dmb->dma_addr = dma_map_page(&ism->pdev->dev, + virt_to_page(dmb->cpu_addr), 0, + dmb->dmb_len, DMA_FROM_DEVICE); + if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) { + rc = -ENOMEM; + goto out_free; + } + + return 0; - return dmb->cpu_addr ? 0 : -ENOMEM; +out_free: + kfree(dmb->cpu_addr); +out_bit: + clear_bit(dmb->idx, ism->sba_bitmap); + return rc; } -int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb, - struct ism_client *client) +static int ism_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb, + struct dibs_client *client) { + struct ism_dev *ism = dibs->drv_priv; union ism_reg_dmb cmd; unsigned long flags; int ret; @@ -358,10 +292,10 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb, cmd.request.dmb = dmb->dma_addr; cmd.request.dmb_len = dmb->dmb_len; - cmd.request.sba_idx = dmb->sba_idx; + cmd.request.sba_idx = dmb->idx; cmd.request.vlan_valid = dmb->vlan_valid; cmd.request.vlan_id = dmb->vlan_id; - cmd.request.rgid = dmb->rgid; + memcpy(&cmd.request.rgid, &dmb->rgid, sizeof(u64)); ret = ism_cmd(ism, &cmd); if (ret) { @@ -369,16 +303,16 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb, goto out; } dmb->dmb_tok = cmd.response.dmb_tok; - spin_lock_irqsave(&ism->lock, flags); - ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id; - spin_unlock_irqrestore(&ism->lock, flags); + spin_lock_irqsave(&dibs->lock, flags); + dibs->dmb_clientid_arr[dmb->idx - ISM_DMB_BIT_OFFSET] = client->id; + spin_unlock_irqrestore(&dibs->lock, flags); out: return ret; } -EXPORT_SYMBOL_GPL(ism_register_dmb); -int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb) +static int ism_unregister_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb) { + struct ism_dev *ism = dibs->drv_priv; union ism_unreg_dmb cmd; unsigned long flags; int ret; @@ -389,9 +323,9 @@ int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb) cmd.request.dmb_tok = dmb->dmb_tok; - spin_lock_irqsave(&ism->lock, flags); - ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT; - spin_unlock_irqrestore(&ism->lock, flags); + spin_lock_irqsave(&dibs->lock, flags); + dibs->dmb_clientid_arr[dmb->idx - ISM_DMB_BIT_OFFSET] = NO_DIBS_CLIENT; + spin_unlock_irqrestore(&dibs->lock, flags); ret = ism_cmd(ism, &cmd); if (ret && ret != ISM_ERROR) @@ -401,10 +335,10 @@ int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb) out: return ret; } -EXPORT_SYMBOL_GPL(ism_unregister_dmb); -static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id) +static int ism_add_vlan_id(struct dibs_dev *dibs, u64 vlan_id) { + struct ism_dev *ism = dibs->drv_priv; union ism_set_vlan_id cmd; memset(&cmd, 0, sizeof(cmd)); @@ -416,8 +350,9 @@ static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id) return ism_cmd(ism, &cmd); } -static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id) +static int ism_del_vlan_id(struct dibs_dev *dibs, u64 vlan_id) { + struct ism_dev *ism = dibs->drv_priv; union ism_set_vlan_id cmd; memset(&cmd, 0, sizeof(cmd)); @@ -429,16 +364,17 @@ static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id) return ism_cmd(ism, &cmd); } -static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq, - u32 event_code, u64 info) +static int ism_signal_ieq(struct dibs_dev *dibs, const uuid_t *rgid, + u32 trigger_irq, u32 event_code, u64 info) { + struct ism_dev *ism = dibs->drv_priv; union ism_sig_ieq cmd; memset(&cmd, 0, sizeof(cmd)); cmd.request.hdr.cmd = ISM_SIGNAL_IEQ; cmd.request.hdr.len = sizeof(cmd.request); - cmd.request.rgid = rgid; + memcpy(&cmd.request.rgid, rgid, sizeof(cmd.request.rgid)); cmd.request.trigger_irq = trigger_irq; cmd.request.event_code = event_code; cmd.request.info = info; @@ -452,9 +388,11 @@ static unsigned int max_bytes(unsigned int start, unsigned int len, return min(boundary - (start & (boundary - 1)), len); } -int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf, - unsigned int offset, void *data, unsigned int size) +static int ism_move(struct dibs_dev *dibs, u64 dmb_tok, unsigned int idx, + bool sf, unsigned int offset, void *data, + unsigned int size) { + struct ism_dev *ism = dibs->drv_priv; unsigned int bytes; u64 dmb_req; int ret; @@ -475,58 +413,79 @@ int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf, return 0; } -EXPORT_SYMBOL_GPL(ism_move); - -static struct ism_systemeid SYSTEM_EID = { - .seid_string = "IBM-SYSZ-ISMSEID00000000", - .serial_number = "0000", - .type = "0000", -}; -static void ism_create_system_eid(void) +static u16 ism_get_chid(struct dibs_dev *dibs) { - struct cpuid id; - u16 ident_tail; - char tmp[5]; - - get_cpu_id(&id); - ident_tail = (u16)(id.ident & ISM_IDENT_MASK); - snprintf(tmp, 5, "%04X", ident_tail); - memcpy(&SYSTEM_EID.serial_number, tmp, 4); - snprintf(tmp, 5, "%04X", id.machine); - memcpy(&SYSTEM_EID.type, tmp, 4); + struct ism_dev *ism = dibs->drv_priv; + + if (!ism || !ism->pdev) + return 0; + + return to_zpci(ism->pdev)->pchid; } -u8 *ism_get_seid(void) +static int ism_match_event_type(u32 s390_event_type) { - return SYSTEM_EID.seid_string; + switch (s390_event_type) { + case ISM_EVENT_BUF: + return DIBS_BUF_EVENT; + case ISM_EVENT_DEV: + return DIBS_DEV_EVENT; + case ISM_EVENT_SWR: + return DIBS_SW_EVENT; + default: + return DIBS_OTHER_TYPE; + } } -EXPORT_SYMBOL_GPL(ism_get_seid); -static u16 ism_get_chid(struct ism_dev *ism) +static int ism_match_event_subtype(u32 s390_event_subtype) { - if (!ism || !ism->pdev) - return 0; - - return to_zpci(ism->pdev)->pchid; + switch (s390_event_subtype) { + case ISM_BUF_DMB_UNREGISTERED: + return DIBS_BUF_UNREGISTERED; + case ISM_DEV_GID_DISABLED: + return DIBS_DEV_DISABLED; + case ISM_DEV_GID_ERR_STATE: + return DIBS_DEV_ERR_STATE; + default: + return DIBS_OTHER_SUBTYPE; + } } static void ism_handle_event(struct ism_dev *ism) { + struct dibs_dev *dibs = ism->dibs; + struct dibs_event event; struct ism_event *entry; - struct ism_client *clt; + struct dibs_client *clt; int i; while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) { - if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry)) + if (++ism->ieq_idx == ARRAY_SIZE(ism->ieq->entry)) ism->ieq_idx = 0; entry = &ism->ieq->entry[ism->ieq_idx]; debug_event(ism_debug_info, 2, entry, sizeof(*entry)); - for (i = 0; i < max_client; ++i) { - clt = ism->subs[i]; + __memset(&event, 0, sizeof(event)); + event.type = ism_match_event_type(entry->type); + if (event.type == DIBS_SW_EVENT) + event.subtype = entry->code; + else + event.subtype = ism_match_event_subtype(entry->code); + event.time = entry->time; + event.data = entry->info; + switch (event.type) { + case DIBS_BUF_EVENT: + event.buffer_tok = entry->tok; + break; + case DIBS_DEV_EVENT: + case DIBS_SW_EVENT: + memcpy(&event.gid, &entry->tok, sizeof(u64)); + } + for (i = 0; i < MAX_DIBS_CLIENTS; ++i) { + clt = dibs->subs[i]; if (clt) - clt->handle_event(ism, entry); + clt->ops->handle_event(dibs, &event); } } } @@ -535,14 +494,17 @@ static irqreturn_t ism_handle_irq(int irq, void *data) { struct ism_dev *ism = data; unsigned long bit, end; + struct dibs_dev *dibs; unsigned long *bv; u16 dmbemask; u8 client_id; + dibs = ism->dibs; + bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET]; end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET; - spin_lock(&ism->lock); + spin_lock(&dibs->lock); ism->sba->s = 0; barrier(); for (bit = 0;;) { @@ -554,10 +516,13 @@ static irqreturn_t ism_handle_irq(int irq, void *data) dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET]; ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; barrier(); - client_id = ism->sba_client_arr[bit]; - if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id])) + client_id = dibs->dmb_clientid_arr[bit]; + if (unlikely(client_id == NO_DIBS_CLIENT || + !dibs->subs[client_id])) continue; - ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask); + dibs->subs[client_id]->ops->handle_irq(dibs, + bit + ISM_DMB_BIT_OFFSET, + dmbemask); } if (ism->sba->e) { @@ -565,33 +530,35 @@ static irqreturn_t ism_handle_irq(int irq, void *data) barrier(); ism_handle_event(ism); } - spin_unlock(&ism->lock); + spin_unlock(&dibs->lock); return IRQ_HANDLED; } -static u64 ism_get_local_gid(struct ism_dev *ism) -{ - return ism->local_gid; -} +static const struct dibs_dev_ops ism_ops = { + .get_fabric_id = ism_get_chid, + .query_remote_gid = ism_query_rgid, + .max_dmbs = ism_max_dmbs, + .register_dmb = ism_register_dmb, + .unregister_dmb = ism_unregister_dmb, + .move_data = ism_move, + .add_vlan_id = ism_add_vlan_id, + .del_vlan_id = ism_del_vlan_id, + .signal_event = ism_signal_ieq, +}; static int ism_dev_init(struct ism_dev *ism) { struct pci_dev *pdev = ism->pdev; - int i, ret; + int ret; ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); if (ret <= 0) goto out; - ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL); - if (!ism->sba_client_arr) - goto free_vectors; - memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS); - ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0, pci_name(pdev), ism); if (ret) - goto free_client_arr; + goto free_vectors; ret = register_sba(ism); if (ret) @@ -601,46 +568,33 @@ static int ism_dev_init(struct ism_dev *ism) if (ret) goto unreg_sba; - ret = ism_read_local_gid(ism); - if (ret) - goto unreg_ieq; - - if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID)) - /* hardware is V2 capable */ - ism_create_system_eid(); - - mutex_lock(&ism_dev_list.mutex); - mutex_lock(&clients_lock); - for (i = 0; i < max_client; ++i) { - if (clients[i]) { - clients[i]->add(ism); - ism_setup_forwarding(clients[i], ism); - } - } - mutex_unlock(&clients_lock); - - list_add(&ism->list, &ism_dev_list.list); - mutex_unlock(&ism_dev_list.mutex); - query_info(ism); return 0; -unreg_ieq: - unregister_ieq(ism); unreg_sba: unregister_sba(ism); free_irq: free_irq(pci_irq_vector(pdev, 0), ism); -free_client_arr: - kfree(ism->sba_client_arr); free_vectors: pci_free_irq_vectors(pdev); out: return ret; } +static void ism_dev_exit(struct ism_dev *ism) +{ + struct pci_dev *pdev = ism->pdev; + + unregister_ieq(ism); + unregister_sba(ism); + free_irq(pci_irq_vector(pdev, 0), ism); + pci_free_irq_vectors(pdev); +} + static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct dibs_dev *dibs; + struct zpci_dev *zdev; struct ism_dev *ism; int ret; @@ -648,19 +602,13 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!ism) return -ENOMEM; - spin_lock_init(&ism->lock); + spin_lock_init(&ism->cmd_lock); dev_set_drvdata(&pdev->dev, ism); ism->pdev = pdev; - ism->dev.parent = &pdev->dev; - device_initialize(&ism->dev); - dev_set_name(&ism->dev, dev_name(&pdev->dev)); - ret = device_add(&ism->dev); - if (ret) - goto err_dev; ret = pci_enable_device_mem(pdev); if (ret) - goto err; + goto err_dev; ret = pci_request_mem_regions(pdev, DRV_NAME); if (ret) @@ -674,18 +622,48 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) dma_set_max_seg_size(&pdev->dev, SZ_1M); pci_set_master(pdev); + dibs = dibs_dev_alloc(); + if (!dibs) { + ret = -ENOMEM; + goto err_resource; + } + /* set this up before we enable interrupts */ + ism->dibs = dibs; + dibs->drv_priv = ism; + dibs->ops = &ism_ops; + + /* enable ism device, but any interrupts and events will be ignored + * before dibs_dev_add() adds it to any clients. + */ ret = ism_dev_init(ism); if (ret) - goto err_resource; + goto err_dibs; + + /* after ism_dev_init() we can call ism function to set gid */ + ret = ism_read_local_gid(dibs); + if (ret) + goto err_ism; + + dibs->dev.parent = &pdev->dev; + + zdev = to_zpci(pdev); + dev_set_name(&dibs->dev, "ism%x", zdev->uid ? zdev->uid : zdev->fid); + + ret = dibs_dev_add(dibs); + if (ret) + goto err_ism; return 0; +err_ism: + ism_dev_exit(ism); +err_dibs: + /* pairs with dibs_dev_alloc() */ + put_device(&dibs->dev); err_resource: pci_release_mem_regions(pdev); err_disable: pci_disable_device(pdev); -err: - device_del(&ism->dev); err_dev: dev_set_drvdata(&pdev->dev, NULL); kfree(ism); @@ -693,46 +671,18 @@ err_dev: return ret; } -static void ism_dev_exit(struct ism_dev *ism) -{ - struct pci_dev *pdev = ism->pdev; - unsigned long flags; - int i; - - spin_lock_irqsave(&ism->lock, flags); - for (i = 0; i < max_client; ++i) - ism->subs[i] = NULL; - spin_unlock_irqrestore(&ism->lock, flags); - - mutex_lock(&ism_dev_list.mutex); - mutex_lock(&clients_lock); - for (i = 0; i < max_client; ++i) { - if (clients[i]) - clients[i]->remove(ism); - } - mutex_unlock(&clients_lock); - - if (SYSTEM_EID.serial_number[0] != '0' || - SYSTEM_EID.type[0] != '0') - ism_del_vlan_id(ism, ISM_RESERVED_VLANID); - unregister_ieq(ism); - unregister_sba(ism); - free_irq(pci_irq_vector(pdev, 0), ism); - kfree(ism->sba_client_arr); - pci_free_irq_vectors(pdev); - list_del_init(&ism->list); - mutex_unlock(&ism_dev_list.mutex); -} - static void ism_remove(struct pci_dev *pdev) { struct ism_dev *ism = dev_get_drvdata(&pdev->dev); + struct dibs_dev *dibs = ism->dibs; + dibs_dev_del(dibs); ism_dev_exit(ism); + /* pairs with dibs_dev_alloc() */ + put_device(&dibs->dev); pci_release_mem_regions(pdev); pci_disable_device(pdev); - device_del(&ism->dev); dev_set_drvdata(&pdev->dev, NULL); kfree(ism); } @@ -752,8 +702,6 @@ static int __init ism_init(void) if (!ism_debug_info) return -ENODEV; - memset(clients, 0, sizeof(clients)); - max_client = 0; debug_register_view(ism_debug_info, &debug_hex_ascii_view); ret = pci_register_driver(&ism_driver); if (ret) @@ -770,103 +718,3 @@ static void __exit ism_exit(void) module_init(ism_init); module_exit(ism_exit); - -/*************************** SMC-D Implementation *****************************/ - -#if IS_ENABLED(CONFIG_SMC) -static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid, - u32 vid) -{ - return ism_query_rgid(smcd->priv, rgid, vid_valid, vid); -} - -static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb, - struct ism_client *client) -{ - return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client); -} - -static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) -{ - return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb); -} - -static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id) -{ - return ism_add_vlan_id(smcd->priv, vlan_id); -} - -static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id) -{ - return ism_del_vlan_id(smcd->priv, vlan_id); -} - -static int smcd_set_vlan_required(struct smcd_dev *smcd) -{ - return ism_cmd_simple(smcd->priv, ISM_SET_VLAN); -} - -static int smcd_reset_vlan_required(struct smcd_dev *smcd) -{ - return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN); -} - -static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq, - u32 event_code, u64 info) -{ - return ism_signal_ieq(smcd->priv, rgid, trigger_irq, event_code, info); -} - -static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx, - bool sf, unsigned int offset, void *data, - unsigned int size) -{ - return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size); -} - -static int smcd_supports_v2(void) -{ - return SYSTEM_EID.serial_number[0] != '0' || - SYSTEM_EID.type[0] != '0'; -} - -static u64 smcd_get_local_gid(struct smcd_dev *smcd) -{ - return ism_get_local_gid(smcd->priv); -} - -static u16 smcd_get_chid(struct smcd_dev *smcd) -{ - return ism_get_chid(smcd->priv); -} - -static inline struct device *smcd_get_dev(struct smcd_dev *dev) -{ - struct ism_dev *ism = dev->priv; - - return &ism->dev; -} - -static const struct smcd_ops ism_ops = { - .query_remote_gid = smcd_query_rgid, - .register_dmb = smcd_register_dmb, - .unregister_dmb = smcd_unregister_dmb, - .add_vlan_id = smcd_add_vlan_id, - .del_vlan_id = smcd_del_vlan_id, - .set_vlan_required = smcd_set_vlan_required, - .reset_vlan_required = smcd_reset_vlan_required, - .signal_event = smcd_signal_ieq, - .move_data = smcd_move, - .supports_v2 = smcd_supports_v2, - .get_system_eid = ism_get_seid, - .get_local_gid = smcd_get_local_gid, - .get_chid = smcd_get_chid, - .get_dev = smcd_get_dev, -}; - -const struct smcd_ops *ism_get_smcd_ops(void) -{ - return &ism_ops; -} -EXPORT_SYMBOL_GPL(ism_get_smcd_ops); -#endif diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c deleted file mode 100644 index a1f2acd6fb8f..000000000000 --- a/drivers/s390/net/lcs.c +++ /dev/null @@ -1,2386 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Linux for S/390 Lan Channel Station Network Driver - * - * Copyright IBM Corp. 1999, 2009 - * Author(s): Original Code written by - * DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com> - * Rewritten by - * Frank Pavlic <fpavlic@de.ibm.com> and - * Martin Schwidefsky <schwidefsky@de.ibm.com> - */ - -#define KMSG_COMPONENT "lcs" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - -#include <linux/module.h> -#include <linux/if.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/inetdevice.h> -#include <linux/in.h> -#include <linux/igmp.h> -#include <linux/delay.h> -#include <linux/kthread.h> -#include <linux/slab.h> -#include <net/arp.h> -#include <net/ip.h> - -#include <asm/debug.h> -#include <asm/idals.h> -#include <asm/timex.h> -#include <linux/device.h> -#include <asm/ccwgroup.h> - -#include "lcs.h" - - -/* - * initialization string for output - */ - -static char version[] __initdata = "LCS driver"; - -/* - * the root device for lcs group devices - */ -static struct device *lcs_root_dev; - -/* - * Some prototypes. - */ -static void lcs_tasklet(unsigned long); -static void lcs_start_kernel_thread(struct work_struct *); -static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); -#ifdef CONFIG_IP_MULTICAST -static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); -#endif /* CONFIG_IP_MULTICAST */ -static int lcs_recovery(void *ptr); - -/* - * Debug Facility Stuff - */ -static char debug_buffer[255]; -static debug_info_t *lcs_dbf_setup; -static debug_info_t *lcs_dbf_trace; - -/* - * LCS Debug Facility functions - */ -static void -lcs_unregister_debug_facility(void) -{ - debug_unregister(lcs_dbf_setup); - debug_unregister(lcs_dbf_trace); -} - -static int -lcs_register_debug_facility(void) -{ - lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); - lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8); - if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { - pr_err("Not enough memory for debug facility.\n"); - lcs_unregister_debug_facility(); - return -ENOMEM; - } - debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view); - debug_set_level(lcs_dbf_setup, 2); - debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view); - debug_set_level(lcs_dbf_trace, 2); - return 0; -} - -/* - * Allocate io buffers. - */ -static int -lcs_alloc_channel(struct lcs_channel *channel) -{ - int cnt; - - LCS_DBF_TEXT(2, setup, "ichalloc"); - for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { - /* alloc memory fo iobuffer */ - channel->iob[cnt].data = - kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL); - if (channel->iob[cnt].data == NULL) - break; - channel->iob[cnt].state = LCS_BUF_STATE_EMPTY; - } - if (cnt < LCS_NUM_BUFFS) { - /* Not all io buffers could be allocated. */ - LCS_DBF_TEXT(2, setup, "echalloc"); - while (cnt-- > 0) - kfree(channel->iob[cnt].data); - return -ENOMEM; - } - return 0; -} - -/* - * Free io buffers. - */ -static void -lcs_free_channel(struct lcs_channel *channel) -{ - int cnt; - - LCS_DBF_TEXT(2, setup, "ichfree"); - for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { - kfree(channel->iob[cnt].data); - channel->iob[cnt].data = NULL; - } -} - -/* - * Cleanup channel. - */ -static void -lcs_cleanup_channel(struct lcs_channel *channel) -{ - LCS_DBF_TEXT(3, setup, "cleanch"); - /* Kill write channel tasklets. */ - tasklet_kill(&channel->irq_tasklet); - /* Free channel buffers. */ - lcs_free_channel(channel); -} - -/* - * LCS free memory for card and channels. - */ -static void -lcs_free_card(struct lcs_card *card) -{ - LCS_DBF_TEXT(2, setup, "remcard"); - LCS_DBF_HEX(2, setup, &card, sizeof(void*)); - kfree(card); -} - -/* - * LCS alloc memory for card and channels - */ -static struct lcs_card * -lcs_alloc_card(void) -{ - struct lcs_card *card; - int rc; - - LCS_DBF_TEXT(2, setup, "alloclcs"); - - card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA); - if (card == NULL) - return NULL; - card->lan_type = LCS_FRAME_TYPE_AUTO; - card->pkt_seq = 0; - card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT; - /* Allocate io buffers for the read channel. */ - rc = lcs_alloc_channel(&card->read); - if (rc){ - LCS_DBF_TEXT(2, setup, "iccwerr"); - lcs_free_card(card); - return NULL; - } - /* Allocate io buffers for the write channel. */ - rc = lcs_alloc_channel(&card->write); - if (rc) { - LCS_DBF_TEXT(2, setup, "iccwerr"); - lcs_cleanup_channel(&card->read); - lcs_free_card(card); - return NULL; - } - -#ifdef CONFIG_IP_MULTICAST - INIT_LIST_HEAD(&card->ipm_list); -#endif - LCS_DBF_HEX(2, setup, &card, sizeof(void*)); - return card; -} - -/* - * Setup read channel. - */ -static void -lcs_setup_read_ccws(struct lcs_card *card) -{ - int cnt; - - LCS_DBF_TEXT(2, setup, "ireadccw"); - /* Setup read ccws. */ - memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1)); - for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { - card->read.ccws[cnt].cmd_code = LCS_CCW_READ; - card->read.ccws[cnt].count = LCS_IOBUFFERSIZE; - card->read.ccws[cnt].flags = - CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI; - /* - * Note: we have allocated the buffer with GFP_DMA, so - * we do not need to do set_normalized_cda. - */ - card->read.ccws[cnt].cda = - (__u32)virt_to_phys(card->read.iob[cnt].data); - ((struct lcs_header *) - card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET; - card->read.iob[cnt].callback = lcs_get_frames_cb; - card->read.iob[cnt].state = LCS_BUF_STATE_READY; - card->read.iob[cnt].count = LCS_IOBUFFERSIZE; - } - card->read.ccws[0].flags &= ~CCW_FLAG_PCI; - card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI; - card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND; - /* Last ccw is a tic (transfer in channel). */ - card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; - card->read.ccws[LCS_NUM_BUFFS].cda = - (__u32)virt_to_phys(card->read.ccws); - /* Setg initial state of the read channel. */ - card->read.state = LCS_CH_STATE_INIT; - - card->read.io_idx = 0; - card->read.buf_idx = 0; -} - -static void -lcs_setup_read(struct lcs_card *card) -{ - LCS_DBF_TEXT(3, setup, "initread"); - - lcs_setup_read_ccws(card); - /* Initialize read channel tasklet. */ - card->read.irq_tasklet.data = (unsigned long) &card->read; - card->read.irq_tasklet.func = lcs_tasklet; - /* Initialize waitqueue. */ - init_waitqueue_head(&card->read.wait_q); -} - -/* - * Setup write channel. - */ -static void -lcs_setup_write_ccws(struct lcs_card *card) -{ - int cnt; - - LCS_DBF_TEXT(3, setup, "iwritccw"); - /* Setup write ccws. */ - memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1)); - for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { - card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE; - card->write.ccws[cnt].count = 0; - card->write.ccws[cnt].flags = - CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI; - /* - * Note: we have allocated the buffer with GFP_DMA, so - * we do not need to do set_normalized_cda. - */ - card->write.ccws[cnt].cda = - (__u32)virt_to_phys(card->write.iob[cnt].data); - } - /* Last ccw is a tic (transfer in channel). */ - card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; - card->write.ccws[LCS_NUM_BUFFS].cda = - (__u32)virt_to_phys(card->write.ccws); - /* Set initial state of the write channel. */ - card->read.state = LCS_CH_STATE_INIT; - - card->write.io_idx = 0; - card->write.buf_idx = 0; -} - -static void -lcs_setup_write(struct lcs_card *card) -{ - LCS_DBF_TEXT(3, setup, "initwrit"); - - lcs_setup_write_ccws(card); - /* Initialize write channel tasklet. */ - card->write.irq_tasklet.data = (unsigned long) &card->write; - card->write.irq_tasklet.func = lcs_tasklet; - /* Initialize waitqueue. */ - init_waitqueue_head(&card->write.wait_q); -} - -static void -lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads) -{ - unsigned long flags; - - spin_lock_irqsave(&card->mask_lock, flags); - card->thread_allowed_mask = threads; - spin_unlock_irqrestore(&card->mask_lock, flags); - wake_up(&card->wait_q); -} -static int lcs_threads_running(struct lcs_card *card, unsigned long threads) -{ - unsigned long flags; - int rc = 0; - - spin_lock_irqsave(&card->mask_lock, flags); - rc = (card->thread_running_mask & threads); - spin_unlock_irqrestore(&card->mask_lock, flags); - return rc; -} - -static int -lcs_wait_for_threads(struct lcs_card *card, unsigned long threads) -{ - return wait_event_interruptible(card->wait_q, - lcs_threads_running(card, threads) == 0); -} - -static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread) -{ - unsigned long flags; - - spin_lock_irqsave(&card->mask_lock, flags); - if ( !(card->thread_allowed_mask & thread) || - (card->thread_start_mask & thread) ) { - spin_unlock_irqrestore(&card->mask_lock, flags); - return -EPERM; - } - card->thread_start_mask |= thread; - spin_unlock_irqrestore(&card->mask_lock, flags); - return 0; -} - -static void -lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread) -{ - unsigned long flags; - - spin_lock_irqsave(&card->mask_lock, flags); - card->thread_running_mask &= ~thread; - spin_unlock_irqrestore(&card->mask_lock, flags); - wake_up(&card->wait_q); -} - -static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread) -{ - unsigned long flags; - int rc = 0; - - spin_lock_irqsave(&card->mask_lock, flags); - if (card->thread_start_mask & thread){ - if ((card->thread_allowed_mask & thread) && - !(card->thread_running_mask & thread)){ - rc = 1; - card->thread_start_mask &= ~thread; - card->thread_running_mask |= thread; - } else - rc = -EPERM; - } - spin_unlock_irqrestore(&card->mask_lock, flags); - return rc; -} - -static int -lcs_do_run_thread(struct lcs_card *card, unsigned long thread) -{ - int rc = 0; - wait_event(card->wait_q, - (rc = __lcs_do_run_thread(card, thread)) >= 0); - return rc; -} - -static int -lcs_do_start_thread(struct lcs_card *card, unsigned long thread) -{ - unsigned long flags; - int rc = 0; - - spin_lock_irqsave(&card->mask_lock, flags); - LCS_DBF_TEXT_(4, trace, " %02x%02x%02x", - (u8) card->thread_start_mask, - (u8) card->thread_allowed_mask, - (u8) card->thread_running_mask); - rc = (card->thread_start_mask & thread); - spin_unlock_irqrestore(&card->mask_lock, flags); - return rc; -} - -/* - * Initialize channels,card and state machines. - */ -static void -lcs_setup_card(struct lcs_card *card) -{ - LCS_DBF_TEXT(2, setup, "initcard"); - LCS_DBF_HEX(2, setup, &card, sizeof(void*)); - - lcs_setup_read(card); - lcs_setup_write(card); - /* Set cards initial state. */ - card->state = DEV_STATE_DOWN; - card->tx_buffer = NULL; - card->tx_emitted = 0; - - init_waitqueue_head(&card->wait_q); - spin_lock_init(&card->lock); - spin_lock_init(&card->ipm_lock); - spin_lock_init(&card->mask_lock); -#ifdef CONFIG_IP_MULTICAST - INIT_LIST_HEAD(&card->ipm_list); -#endif - INIT_LIST_HEAD(&card->lancmd_waiters); -} - -static void lcs_clear_multicast_list(struct lcs_card *card) -{ -#ifdef CONFIG_IP_MULTICAST - struct lcs_ipm_list *ipm; - unsigned long flags; - - /* Free multicast list. */ - LCS_DBF_TEXT(3, setup, "clmclist"); - spin_lock_irqsave(&card->ipm_lock, flags); - while (!list_empty(&card->ipm_list)){ - ipm = list_entry(card->ipm_list.next, - struct lcs_ipm_list, list); - list_del(&ipm->list); - if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){ - spin_unlock_irqrestore(&card->ipm_lock, flags); - lcs_send_delipm(card, ipm); - spin_lock_irqsave(&card->ipm_lock, flags); - } - kfree(ipm); - } - spin_unlock_irqrestore(&card->ipm_lock, flags); -#endif -} - -/* - * Cleanup channels,card and state machines. - */ -static void -lcs_cleanup_card(struct lcs_card *card) -{ - - LCS_DBF_TEXT(3, setup, "cleancrd"); - LCS_DBF_HEX(2,setup,&card,sizeof(void*)); - - if (card->dev != NULL) - free_netdev(card->dev); - /* Cleanup channels. */ - lcs_cleanup_channel(&card->write); - lcs_cleanup_channel(&card->read); -} - -/* - * Start channel. - */ -static int -lcs_start_channel(struct lcs_channel *channel) -{ - unsigned long flags; - int rc; - - LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev)); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start(channel->ccwdev, - channel->ccws + channel->io_idx, 0, 0, - DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND); - if (rc == 0) - channel->state = LCS_CH_STATE_RUNNING; - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); - if (rc) { - LCS_DBF_TEXT_(4,trace,"essh%s", - dev_name(&channel->ccwdev->dev)); - dev_err(&channel->ccwdev->dev, - "Starting an LCS device resulted in an error," - " rc=%d!\n", rc); - } - return rc; -} - -static int -lcs_clear_channel(struct lcs_channel *channel) -{ - unsigned long flags; - int rc; - - LCS_DBF_TEXT(4,trace,"clearch"); - LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_clear(channel->ccwdev, 0); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); - if (rc) { - LCS_DBF_TEXT_(4, trace, "ecsc%s", - dev_name(&channel->ccwdev->dev)); - return rc; - } - wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED)); - channel->state = LCS_CH_STATE_STOPPED; - return rc; -} - - -/* - * Stop channel. - */ -static int -lcs_stop_channel(struct lcs_channel *channel) -{ - unsigned long flags; - int rc; - - if (channel->state == LCS_CH_STATE_STOPPED) - return 0; - LCS_DBF_TEXT(4,trace,"haltsch"); - LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); - channel->state = LCS_CH_STATE_INIT; - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_halt(channel->ccwdev, 0); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); - if (rc) { - LCS_DBF_TEXT_(4, trace, "ehsc%s", - dev_name(&channel->ccwdev->dev)); - return rc; - } - /* Asynchronous halt initialted. Wait for its completion. */ - wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED)); - lcs_clear_channel(channel); - return 0; -} - -/* - * start read and write channel - */ -static int -lcs_start_channels(struct lcs_card *card) -{ - int rc; - - LCS_DBF_TEXT(2, trace, "chstart"); - /* start read channel */ - rc = lcs_start_channel(&card->read); - if (rc) - return rc; - /* start write channel */ - rc = lcs_start_channel(&card->write); - if (rc) - lcs_stop_channel(&card->read); - return rc; -} - -/* - * stop read and write channel - */ -static int -lcs_stop_channels(struct lcs_card *card) -{ - LCS_DBF_TEXT(2, trace, "chhalt"); - lcs_stop_channel(&card->read); - lcs_stop_channel(&card->write); - return 0; -} - -/* - * Get empty buffer. - */ -static struct lcs_buffer * -__lcs_get_buffer(struct lcs_channel *channel) -{ - int index; - - LCS_DBF_TEXT(5, trace, "_getbuff"); - index = channel->io_idx; - do { - if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) { - channel->iob[index].state = LCS_BUF_STATE_LOCKED; - return channel->iob + index; - } - index = (index + 1) & (LCS_NUM_BUFFS - 1); - } while (index != channel->io_idx); - return NULL; -} - -static struct lcs_buffer * -lcs_get_buffer(struct lcs_channel *channel) -{ - struct lcs_buffer *buffer; - unsigned long flags; - - LCS_DBF_TEXT(5, trace, "getbuff"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - buffer = __lcs_get_buffer(channel); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); - return buffer; -} - -/* - * Resume channel program if the channel is suspended. - */ -static int -__lcs_resume_channel(struct lcs_channel *channel) -{ - int rc; - - if (channel->state != LCS_CH_STATE_SUSPENDED) - return 0; - if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND) - return 0; - LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev)); - rc = ccw_device_resume(channel->ccwdev); - if (rc) { - LCS_DBF_TEXT_(4, trace, "ersc%s", - dev_name(&channel->ccwdev->dev)); - dev_err(&channel->ccwdev->dev, - "Sending data from the LCS device to the LAN failed" - " with rc=%d\n",rc); - } else - channel->state = LCS_CH_STATE_RUNNING; - return rc; - -} - -/* - * Make a buffer ready for processing. - */ -static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index) -{ - int prev, next; - - LCS_DBF_TEXT(5, trace, "rdybits"); - prev = (index - 1) & (LCS_NUM_BUFFS - 1); - next = (index + 1) & (LCS_NUM_BUFFS - 1); - /* Check if we may clear the suspend bit of this buffer. */ - if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) { - /* Check if we have to set the PCI bit. */ - if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND)) - /* Suspend bit of the previous buffer is not set. */ - channel->ccws[index].flags |= CCW_FLAG_PCI; - /* Suspend bit of the next buffer is set. */ - channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND; - } -} - -static int -lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) -{ - unsigned long flags; - int index, rc; - - LCS_DBF_TEXT(5, trace, "rdybuff"); - BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED && - buffer->state != LCS_BUF_STATE_PROCESSED); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - buffer->state = LCS_BUF_STATE_READY; - index = buffer - channel->iob; - /* Set length. */ - channel->ccws[index].count = buffer->count; - /* Check relevant PCI/suspend bits. */ - __lcs_ready_buffer_bits(channel, index); - rc = __lcs_resume_channel(channel); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); - return rc; -} - -/* - * Mark the buffer as processed. Take care of the suspend bit - * of the previous buffer. This function is called from - * interrupt context, so the lock must not be taken. - */ -static int -__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) -{ - int index, prev, next; - - LCS_DBF_TEXT(5, trace, "prcsbuff"); - BUG_ON(buffer->state != LCS_BUF_STATE_READY); - buffer->state = LCS_BUF_STATE_PROCESSED; - index = buffer - channel->iob; - prev = (index - 1) & (LCS_NUM_BUFFS - 1); - next = (index + 1) & (LCS_NUM_BUFFS - 1); - /* Set the suspend bit and clear the PCI bit of this buffer. */ - channel->ccws[index].flags |= CCW_FLAG_SUSPEND; - channel->ccws[index].flags &= ~CCW_FLAG_PCI; - /* Check the suspend bit of the previous buffer. */ - if (channel->iob[prev].state == LCS_BUF_STATE_READY) { - /* - * Previous buffer is in state ready. It might have - * happened in lcs_ready_buffer that the suspend bit - * has not been cleared to avoid an endless loop. - * Do it now. - */ - __lcs_ready_buffer_bits(channel, prev); - } - /* Clear PCI bit of next buffer. */ - channel->ccws[next].flags &= ~CCW_FLAG_PCI; - return __lcs_resume_channel(channel); -} - -/* - * Put a processed buffer back to state empty. - */ -static void -lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) -{ - unsigned long flags; - - LCS_DBF_TEXT(5, trace, "relbuff"); - BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED && - buffer->state != LCS_BUF_STATE_PROCESSED); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - buffer->state = LCS_BUF_STATE_EMPTY; - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); -} - -/* - * Get buffer for a lan command. - */ -static struct lcs_buffer * -lcs_get_lancmd(struct lcs_card *card, int count) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(4, trace, "getlncmd"); - /* Get buffer and wait if none is available. */ - wait_event(card->write.wait_q, - ((buffer = lcs_get_buffer(&card->write)) != NULL)); - count += sizeof(struct lcs_header); - *(__u16 *)(buffer->data + count) = 0; - buffer->count = count + sizeof(__u16); - buffer->callback = lcs_release_buffer; - cmd = (struct lcs_cmd *) buffer->data; - cmd->offset = count; - cmd->type = LCS_FRAME_TYPE_CONTROL; - cmd->slot = 0; - return buffer; -} - - -static void -lcs_get_reply(struct lcs_reply *reply) -{ - refcount_inc(&reply->refcnt); -} - -static void -lcs_put_reply(struct lcs_reply *reply) -{ - if (refcount_dec_and_test(&reply->refcnt)) - kfree(reply); -} - -static struct lcs_reply * -lcs_alloc_reply(struct lcs_cmd *cmd) -{ - struct lcs_reply *reply; - - LCS_DBF_TEXT(4, trace, "getreply"); - - reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC); - if (!reply) - return NULL; - refcount_set(&reply->refcnt, 1); - reply->sequence_no = cmd->sequence_no; - reply->received = 0; - reply->rc = 0; - init_waitqueue_head(&reply->wait_q); - - return reply; -} - -/* - * Notifier function for lancmd replies. Called from read irq. - */ -static void -lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) -{ - struct list_head *l, *n; - struct lcs_reply *reply; - - LCS_DBF_TEXT(4, trace, "notiwait"); - spin_lock(&card->lock); - list_for_each_safe(l, n, &card->lancmd_waiters) { - reply = list_entry(l, struct lcs_reply, list); - if (reply->sequence_no == cmd->sequence_no) { - lcs_get_reply(reply); - list_del_init(&reply->list); - if (reply->callback != NULL) - reply->callback(card, cmd); - reply->received = 1; - reply->rc = cmd->return_code; - wake_up(&reply->wait_q); - lcs_put_reply(reply); - break; - } - } - spin_unlock(&card->lock); -} - -/* - * Emit buffer of a lan command. - */ -static void -lcs_lancmd_timeout(struct timer_list *t) -{ - struct lcs_reply *reply = from_timer(reply, t, timer); - struct lcs_reply *list_reply, *r; - unsigned long flags; - - LCS_DBF_TEXT(4, trace, "timeout"); - spin_lock_irqsave(&reply->card->lock, flags); - list_for_each_entry_safe(list_reply, r, - &reply->card->lancmd_waiters,list) { - if (reply == list_reply) { - lcs_get_reply(reply); - list_del_init(&reply->list); - spin_unlock_irqrestore(&reply->card->lock, flags); - reply->received = 1; - reply->rc = -ETIME; - wake_up(&reply->wait_q); - lcs_put_reply(reply); - return; - } - } - spin_unlock_irqrestore(&reply->card->lock, flags); -} - -static int -lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer, - void (*reply_callback)(struct lcs_card *, struct lcs_cmd *)) -{ - struct lcs_reply *reply; - struct lcs_cmd *cmd; - unsigned long flags; - int rc; - - LCS_DBF_TEXT(4, trace, "sendcmd"); - cmd = (struct lcs_cmd *) buffer->data; - cmd->return_code = 0; - cmd->sequence_no = card->sequence_no++; - reply = lcs_alloc_reply(cmd); - if (!reply) - return -ENOMEM; - reply->callback = reply_callback; - reply->card = card; - spin_lock_irqsave(&card->lock, flags); - list_add_tail(&reply->list, &card->lancmd_waiters); - spin_unlock_irqrestore(&card->lock, flags); - - buffer->callback = lcs_release_buffer; - rc = lcs_ready_buffer(&card->write, buffer); - if (rc) - return rc; - timer_setup(&reply->timer, lcs_lancmd_timeout, 0); - mod_timer(&reply->timer, jiffies + HZ * card->lancmd_timeout); - wait_event(reply->wait_q, reply->received); - del_timer_sync(&reply->timer); - LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc); - rc = reply->rc; - lcs_put_reply(reply); - return rc ? -EIO : 0; -} - -/* - * LCS startup command - */ -static int -lcs_send_startup(struct lcs_card *card, __u8 initiator) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2, trace, "startup"); - buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_STARTUP; - cmd->initiator = initiator; - cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE; - return lcs_send_lancmd(card, buffer, NULL); -} - -/* - * LCS shutdown command - */ -static int -lcs_send_shutdown(struct lcs_card *card) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2, trace, "shutdown"); - buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_SHUTDOWN; - cmd->initiator = LCS_INITIATOR_TCPIP; - return lcs_send_lancmd(card, buffer, NULL); -} - -/* - * LCS lanstat command - */ -static void -__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd) -{ - LCS_DBF_TEXT(2, trace, "statcb"); - memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH); -} - -static int -lcs_send_lanstat(struct lcs_card *card) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2,trace, "cmdstat"); - buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - /* Setup lanstat command. */ - cmd->cmd_code = LCS_CMD_LANSTAT; - cmd->initiator = LCS_INITIATOR_TCPIP; - cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; - cmd->cmd.lcs_std_cmd.portno = card->portno; - return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb); -} - -/* - * send stoplan command - */ -static int -lcs_send_stoplan(struct lcs_card *card, __u8 initiator) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2, trace, "cmdstpln"); - buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_STOPLAN; - cmd->initiator = initiator; - cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; - cmd->cmd.lcs_std_cmd.portno = card->portno; - return lcs_send_lancmd(card, buffer, NULL); -} - -/* - * send startlan command - */ -static void -__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd) -{ - LCS_DBF_TEXT(2, trace, "srtlancb"); - card->lan_type = cmd->cmd.lcs_std_cmd.lan_type; - card->portno = cmd->cmd.lcs_std_cmd.portno; -} - -static int -lcs_send_startlan(struct lcs_card *card, __u8 initiator) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2, trace, "cmdstaln"); - buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_STARTLAN; - cmd->initiator = initiator; - cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; - cmd->cmd.lcs_std_cmd.portno = card->portno; - return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb); -} - -#ifdef CONFIG_IP_MULTICAST -/* - * send setipm command (Multicast) - */ -static int -lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2, trace, "cmdsetim"); - buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_SETIPM; - cmd->initiator = LCS_INITIATOR_TCPIP; - cmd->cmd.lcs_qipassist.lan_type = card->lan_type; - cmd->cmd.lcs_qipassist.portno = card->portno; - cmd->cmd.lcs_qipassist.version = 4; - cmd->cmd.lcs_qipassist.num_ip_pairs = 1; - memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair, - &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair)); - LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); - return lcs_send_lancmd(card, buffer, NULL); -} - -/* - * send delipm command (Multicast) - */ -static int -lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2, trace, "cmddelim"); - buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_DELIPM; - cmd->initiator = LCS_INITIATOR_TCPIP; - cmd->cmd.lcs_qipassist.lan_type = card->lan_type; - cmd->cmd.lcs_qipassist.portno = card->portno; - cmd->cmd.lcs_qipassist.version = 4; - cmd->cmd.lcs_qipassist.num_ip_pairs = 1; - memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair, - &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair)); - LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); - return lcs_send_lancmd(card, buffer, NULL); -} - -/* - * check if multicast is supported by LCS - */ -static void -__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd) -{ - LCS_DBF_TEXT(2, trace, "chkmccb"); - card->ip_assists_supported = - cmd->cmd.lcs_qipassist.ip_assists_supported; - card->ip_assists_enabled = - cmd->cmd.lcs_qipassist.ip_assists_enabled; -} - -static int -lcs_check_multicast_support(struct lcs_card *card) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - int rc; - - LCS_DBF_TEXT(2, trace, "cmdqipa"); - /* Send query ipassist. */ - buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_QIPASSIST; - cmd->initiator = LCS_INITIATOR_TCPIP; - cmd->cmd.lcs_qipassist.lan_type = card->lan_type; - cmd->cmd.lcs_qipassist.portno = card->portno; - cmd->cmd.lcs_qipassist.version = 4; - cmd->cmd.lcs_qipassist.num_ip_pairs = 1; - rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb); - if (rc != 0) { - pr_err("Query IPAssist failed. Assuming unsupported!\n"); - return -EOPNOTSUPP; - } - if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) - return 0; - return -EOPNOTSUPP; -} - -/* - * set or del multicast address on LCS card - */ -static void -lcs_fix_multicast_list(struct lcs_card *card) -{ - struct list_head failed_list; - struct lcs_ipm_list *ipm, *tmp; - unsigned long flags; - int rc; - - LCS_DBF_TEXT(4,trace, "fixipm"); - INIT_LIST_HEAD(&failed_list); - spin_lock_irqsave(&card->ipm_lock, flags); -list_modified: - list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){ - switch (ipm->ipm_state) { - case LCS_IPM_STATE_SET_REQUIRED: - /* del from ipm_list so no one else can tamper with - * this entry */ - list_del_init(&ipm->list); - spin_unlock_irqrestore(&card->ipm_lock, flags); - rc = lcs_send_setipm(card, ipm); - spin_lock_irqsave(&card->ipm_lock, flags); - if (rc) { - pr_info("Adding multicast address failed." - " Table possibly full!\n"); - /* store ipm in failed list -> will be added - * to ipm_list again, so a retry will be done - * during the next call of this function */ - list_add_tail(&ipm->list, &failed_list); - } else { - ipm->ipm_state = LCS_IPM_STATE_ON_CARD; - /* re-insert into ipm_list */ - list_add_tail(&ipm->list, &card->ipm_list); - } - goto list_modified; - case LCS_IPM_STATE_DEL_REQUIRED: - list_del(&ipm->list); - spin_unlock_irqrestore(&card->ipm_lock, flags); - lcs_send_delipm(card, ipm); - spin_lock_irqsave(&card->ipm_lock, flags); - kfree(ipm); - goto list_modified; - case LCS_IPM_STATE_ON_CARD: - break; - } - } - /* re-insert all entries from the failed_list into ipm_list */ - list_for_each_entry_safe(ipm, tmp, &failed_list, list) - list_move_tail(&ipm->list, &card->ipm_list); - - spin_unlock_irqrestore(&card->ipm_lock, flags); -} - -/* - * get mac address for the relevant Multicast address - */ -static void -lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev) -{ - LCS_DBF_TEXT(4,trace, "getmac"); - ip_eth_mc_map(ipm, mac); -} - -/* - * function called by net device to handle multicast address relevant things - */ -static void lcs_remove_mc_addresses(struct lcs_card *card, - struct in_device *in4_dev) -{ - struct ip_mc_list *im4; - struct list_head *l; - struct lcs_ipm_list *ipm; - unsigned long flags; - char buf[MAX_ADDR_LEN]; - - LCS_DBF_TEXT(4, trace, "remmclst"); - spin_lock_irqsave(&card->ipm_lock, flags); - list_for_each(l, &card->ipm_list) { - ipm = list_entry(l, struct lcs_ipm_list, list); - for (im4 = rcu_dereference(in4_dev->mc_list); - im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) { - lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); - if ( (ipm->ipm.ip_addr == im4->multiaddr) && - (memcmp(buf, &ipm->ipm.mac_addr, - LCS_MAC_LENGTH) == 0) ) - break; - } - if (im4 == NULL) - ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED; - } - spin_unlock_irqrestore(&card->ipm_lock, flags); -} - -static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card, - struct ip_mc_list *im4, - char *buf) -{ - struct lcs_ipm_list *tmp, *ipm = NULL; - struct list_head *l; - unsigned long flags; - - LCS_DBF_TEXT(4, trace, "chkmcent"); - spin_lock_irqsave(&card->ipm_lock, flags); - list_for_each(l, &card->ipm_list) { - tmp = list_entry(l, struct lcs_ipm_list, list); - if ( (tmp->ipm.ip_addr == im4->multiaddr) && - (memcmp(buf, &tmp->ipm.mac_addr, - LCS_MAC_LENGTH) == 0) ) { - ipm = tmp; - break; - } - } - spin_unlock_irqrestore(&card->ipm_lock, flags); - return ipm; -} - -static void lcs_set_mc_addresses(struct lcs_card *card, - struct in_device *in4_dev) -{ - - struct ip_mc_list *im4; - struct lcs_ipm_list *ipm; - char buf[MAX_ADDR_LEN]; - unsigned long flags; - - LCS_DBF_TEXT(4, trace, "setmclst"); - for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; - im4 = rcu_dereference(im4->next_rcu)) { - lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); - ipm = lcs_check_addr_entry(card, im4, buf); - if (ipm != NULL) - continue; /* Address already in list. */ - ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); - if (ipm == NULL) { - pr_info("Not enough memory to add" - " new multicast entry!\n"); - break; - } - memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); - ipm->ipm.ip_addr = im4->multiaddr; - ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED; - spin_lock_irqsave(&card->ipm_lock, flags); - LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4); - list_add(&ipm->list, &card->ipm_list); - spin_unlock_irqrestore(&card->ipm_lock, flags); - } -} - -static int -lcs_register_mc_addresses(void *data) -{ - struct lcs_card *card; - struct in_device *in4_dev; - - card = (struct lcs_card *) data; - - if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD)) - return 0; - LCS_DBF_TEXT(4, trace, "regmulti"); - - in4_dev = in_dev_get(card->dev); - if (in4_dev == NULL) - goto out; - rcu_read_lock(); - lcs_remove_mc_addresses(card,in4_dev); - lcs_set_mc_addresses(card, in4_dev); - rcu_read_unlock(); - in_dev_put(in4_dev); - - netif_carrier_off(card->dev); - netif_tx_disable(card->dev); - wait_event(card->write.wait_q, - (card->write.state != LCS_CH_STATE_RUNNING)); - lcs_fix_multicast_list(card); - if (card->state == DEV_STATE_UP) { - netif_carrier_on(card->dev); - netif_wake_queue(card->dev); - } -out: - lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD); - return 0; -} -#endif /* CONFIG_IP_MULTICAST */ - -/* - * function called by net device to - * handle multicast address relevant things - */ -static void -lcs_set_multicast_list(struct net_device *dev) -{ -#ifdef CONFIG_IP_MULTICAST - struct lcs_card *card; - - LCS_DBF_TEXT(4, trace, "setmulti"); - card = (struct lcs_card *) dev->ml_priv; - - if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) - schedule_work(&card->kernel_thread_starter); -#endif /* CONFIG_IP_MULTICAST */ -} - -static long -lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb) -{ - if (!IS_ERR(irb)) - return 0; - - switch (PTR_ERR(irb)) { - case -EIO: - dev_warn(&cdev->dev, - "An I/O-error occurred on the LCS device\n"); - LCS_DBF_TEXT(2, trace, "ckirberr"); - LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); - break; - case -ETIMEDOUT: - dev_warn(&cdev->dev, - "A command timed out on the LCS device\n"); - LCS_DBF_TEXT(2, trace, "ckirberr"); - LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); - break; - default: - dev_warn(&cdev->dev, - "An error occurred on the LCS device, rc=%ld\n", - PTR_ERR(irb)); - LCS_DBF_TEXT(2, trace, "ckirberr"); - LCS_DBF_TEXT(2, trace, " rc???"); - } - return PTR_ERR(irb); -} - -static int -lcs_get_problem(struct ccw_device *cdev, struct irb *irb) -{ - int dstat, cstat; - char *sense; - - sense = (char *) irb->ecw; - cstat = irb->scsw.cmd.cstat; - dstat = irb->scsw.cmd.dstat; - - if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | - SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | - SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { - LCS_DBF_TEXT(2, trace, "CGENCHK"); - return 1; - } - if (dstat & DEV_STAT_UNIT_CHECK) { - if (sense[LCS_SENSE_BYTE_1] & - LCS_SENSE_RESETTING_EVENT) { - LCS_DBF_TEXT(2, trace, "REVIND"); - return 1; - } - if (sense[LCS_SENSE_BYTE_0] & - LCS_SENSE_CMD_REJECT) { - LCS_DBF_TEXT(2, trace, "CMDREJ"); - return 0; - } - if ((!sense[LCS_SENSE_BYTE_0]) && - (!sense[LCS_SENSE_BYTE_1]) && - (!sense[LCS_SENSE_BYTE_2]) && - (!sense[LCS_SENSE_BYTE_3])) { - LCS_DBF_TEXT(2, trace, "ZEROSEN"); - return 0; - } - LCS_DBF_TEXT(2, trace, "DGENCHK"); - return 1; - } - return 0; -} - -static void -lcs_schedule_recovery(struct lcs_card *card) -{ - LCS_DBF_TEXT(2, trace, "startrec"); - if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD)) - schedule_work(&card->kernel_thread_starter); -} - -/* - * IRQ Handler for LCS channels - */ -static void -lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) -{ - struct lcs_card *card; - struct lcs_channel *channel; - int rc, index; - int cstat, dstat; - - if (lcs_check_irb_error(cdev, irb)) - return; - - card = CARD_FROM_DEV(cdev); - if (card->read.ccwdev == cdev) - channel = &card->read; - else - channel = &card->write; - - cstat = irb->scsw.cmd.cstat; - dstat = irb->scsw.cmd.dstat; - LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev)); - LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat, - irb->scsw.cmd.dstat); - LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl, - irb->scsw.cmd.actl); - - /* Check for channel and device errors presented */ - rc = lcs_get_problem(cdev, irb); - if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { - dev_warn(&cdev->dev, - "The LCS device stopped because of an error," - " dstat=0x%X, cstat=0x%X \n", - dstat, cstat); - if (rc) { - channel->state = LCS_CH_STATE_ERROR; - } - } - if (channel->state == LCS_CH_STATE_ERROR) { - lcs_schedule_recovery(card); - wake_up(&card->wait_q); - return; - } - /* How far in the ccw chain have we processed? */ - if ((channel->state != LCS_CH_STATE_INIT) && - (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && - (irb->scsw.cmd.cpa != 0)) { - index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa) - - channel->ccws; - if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) || - (irb->scsw.cmd.cstat & SCHN_STAT_PCI)) - /* Bloody io subsystem tells us lies about cpa... */ - index = (index - 1) & (LCS_NUM_BUFFS - 1); - while (channel->io_idx != index) { - __lcs_processed_buffer(channel, - channel->iob + channel->io_idx); - channel->io_idx = - (channel->io_idx + 1) & (LCS_NUM_BUFFS - 1); - } - } - - if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) || - (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) || - (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) - /* Mark channel as stopped. */ - channel->state = LCS_CH_STATE_STOPPED; - else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) - /* CCW execution stopped on a suspend bit. */ - channel->state = LCS_CH_STATE_SUSPENDED; - if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { - if (irb->scsw.cmd.cc != 0) { - ccw_device_halt(channel->ccwdev, 0); - return; - } - /* The channel has been stopped by halt_IO. */ - channel->state = LCS_CH_STATE_HALTED; - } - if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) - channel->state = LCS_CH_STATE_CLEARED; - /* Do the rest in the tasklet. */ - tasklet_schedule(&channel->irq_tasklet); -} - -/* - * Tasklet for IRQ handler - */ -static void -lcs_tasklet(unsigned long data) -{ - unsigned long flags; - struct lcs_channel *channel; - struct lcs_buffer *iob; - int buf_idx; - - channel = (struct lcs_channel *) data; - LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev)); - - /* Check for processed buffers. */ - iob = channel->iob; - buf_idx = channel->buf_idx; - while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) { - /* Do the callback thing. */ - if (iob[buf_idx].callback != NULL) - iob[buf_idx].callback(channel, iob + buf_idx); - buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1); - } - channel->buf_idx = buf_idx; - - if (channel->state == LCS_CH_STATE_STOPPED) - lcs_start_channel(channel); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - if (channel->state == LCS_CH_STATE_SUSPENDED && - channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) - __lcs_resume_channel(channel); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); - - /* Something happened on the channel. Wake up waiters. */ - wake_up(&channel->wait_q); -} - -/* - * Finish current tx buffer and make it ready for transmit. - */ -static void -__lcs_emit_txbuffer(struct lcs_card *card) -{ - LCS_DBF_TEXT(5, trace, "emittx"); - *(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0; - card->tx_buffer->count += 2; - lcs_ready_buffer(&card->write, card->tx_buffer); - card->tx_buffer = NULL; - card->tx_emitted++; -} - -/* - * Callback for finished tx buffers. - */ -static void -lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) -{ - struct lcs_card *card; - - LCS_DBF_TEXT(5, trace, "txbuffcb"); - /* Put buffer back to pool. */ - lcs_release_buffer(channel, buffer); - card = container_of(channel, struct lcs_card, write); - if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev)) - netif_wake_queue(card->dev); - spin_lock(&card->lock); - card->tx_emitted--; - if (card->tx_emitted <= 0 && card->tx_buffer != NULL) - /* - * Last running tx buffer has finished. Submit partially - * filled current buffer. - */ - __lcs_emit_txbuffer(card); - spin_unlock(&card->lock); -} - -/* - * Packet transmit function called by network stack - */ -static netdev_tx_t __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, - struct net_device *dev) -{ - struct lcs_header *header; - int rc = NETDEV_TX_OK; - - LCS_DBF_TEXT(5, trace, "hardxmit"); - if (skb == NULL) { - card->stats.tx_dropped++; - card->stats.tx_errors++; - return NETDEV_TX_OK; - } - if (card->state != DEV_STATE_UP) { - dev_kfree_skb(skb); - card->stats.tx_dropped++; - card->stats.tx_errors++; - card->stats.tx_carrier_errors++; - return NETDEV_TX_OK; - } - if (skb->protocol == htons(ETH_P_IPV6)) { - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - netif_stop_queue(card->dev); - spin_lock(&card->lock); - if (card->tx_buffer != NULL && - card->tx_buffer->count + sizeof(struct lcs_header) + - skb->len + sizeof(u16) > LCS_IOBUFFERSIZE) - /* skb too big for current tx buffer. */ - __lcs_emit_txbuffer(card); - if (card->tx_buffer == NULL) { - /* Get new tx buffer */ - card->tx_buffer = lcs_get_buffer(&card->write); - if (card->tx_buffer == NULL) { - card->stats.tx_dropped++; - rc = NETDEV_TX_BUSY; - goto out; - } - card->tx_buffer->callback = lcs_txbuffer_cb; - card->tx_buffer->count = 0; - } - header = (struct lcs_header *) - (card->tx_buffer->data + card->tx_buffer->count); - card->tx_buffer->count += skb->len + sizeof(struct lcs_header); - header->offset = card->tx_buffer->count; - header->type = card->lan_type; - header->slot = card->portno; - skb_copy_from_linear_data(skb, header + 1, skb->len); - spin_unlock(&card->lock); - card->stats.tx_bytes += skb->len; - card->stats.tx_packets++; - dev_kfree_skb(skb); - netif_wake_queue(card->dev); - spin_lock(&card->lock); - if (card->tx_emitted <= 0 && card->tx_buffer != NULL) - /* If this is the first tx buffer emit it immediately. */ - __lcs_emit_txbuffer(card); -out: - spin_unlock(&card->lock); - return rc; -} - -static netdev_tx_t lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct lcs_card *card; - int rc; - - LCS_DBF_TEXT(5, trace, "pktxmit"); - card = (struct lcs_card *) dev->ml_priv; - rc = __lcs_start_xmit(card, skb, dev); - return rc; -} - -/* - * send startlan and lanstat command to make LCS device ready - */ -static int -lcs_startlan_auto(struct lcs_card *card) -{ - int rc; - - LCS_DBF_TEXT(2, trace, "strtauto"); - card->lan_type = LCS_FRAME_TYPE_ENET; - rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); - if (rc == 0) - return 0; - - return -EIO; -} - -static int -lcs_startlan(struct lcs_card *card) -{ - int rc, i; - - LCS_DBF_TEXT(2, trace, "startlan"); - rc = 0; - if (card->portno != LCS_INVALID_PORT_NO) { - if (card->lan_type == LCS_FRAME_TYPE_AUTO) - rc = lcs_startlan_auto(card); - else - rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); - } else { - for (i = 0; i <= 16; i++) { - card->portno = i; - if (card->lan_type != LCS_FRAME_TYPE_AUTO) - rc = lcs_send_startlan(card, - LCS_INITIATOR_TCPIP); - else - /* autodetecting lan type */ - rc = lcs_startlan_auto(card); - if (rc == 0) - break; - } - } - if (rc == 0) - return lcs_send_lanstat(card); - return rc; -} - -/* - * LCS detect function - * setup channels and make them I/O ready - */ -static int -lcs_detect(struct lcs_card *card) -{ - int rc = 0; - - LCS_DBF_TEXT(2, setup, "lcsdetct"); - /* start/reset card */ - if (card->dev) - netif_stop_queue(card->dev); - rc = lcs_stop_channels(card); - if (rc == 0) { - rc = lcs_start_channels(card); - if (rc == 0) { - rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP); - if (rc == 0) - rc = lcs_startlan(card); - } - } - if (rc == 0) { - card->state = DEV_STATE_UP; - } else { - card->state = DEV_STATE_DOWN; - card->write.state = LCS_CH_STATE_INIT; - card->read.state = LCS_CH_STATE_INIT; - } - return rc; -} - -/* - * LCS Stop card - */ -static int -lcs_stopcard(struct lcs_card *card) -{ - int rc; - - LCS_DBF_TEXT(3, setup, "stopcard"); - - if (card->read.state != LCS_CH_STATE_STOPPED && - card->write.state != LCS_CH_STATE_STOPPED && - card->read.state != LCS_CH_STATE_ERROR && - card->write.state != LCS_CH_STATE_ERROR && - card->state == DEV_STATE_UP) { - lcs_clear_multicast_list(card); - rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP); - rc = lcs_send_shutdown(card); - } - rc = lcs_stop_channels(card); - card->state = DEV_STATE_DOWN; - - return rc; -} - -/* - * Kernel Thread helper functions for LGW initiated commands - */ -static void -lcs_start_kernel_thread(struct work_struct *work) -{ - struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter); - LCS_DBF_TEXT(5, trace, "krnthrd"); - if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD)) - kthread_run(lcs_recovery, card, "lcs_recover"); -#ifdef CONFIG_IP_MULTICAST - if (lcs_do_start_thread(card, LCS_SET_MC_THREAD)) - kthread_run(lcs_register_mc_addresses, card, "regipm"); -#endif -} - -/* - * Process control frames. - */ -static void -lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) -{ - LCS_DBF_TEXT(5, trace, "getctrl"); - if (cmd->initiator == LCS_INITIATOR_LGW) { - switch(cmd->cmd_code) { - case LCS_CMD_STARTUP: - case LCS_CMD_STARTLAN: - lcs_schedule_recovery(card); - break; - case LCS_CMD_STOPLAN: - if (card->dev) { - pr_warn("Stoplan for %s initiated by LGW\n", - card->dev->name); - netif_carrier_off(card->dev); - } - break; - default: - LCS_DBF_TEXT(5, trace, "noLGWcmd"); - break; - } - } else - lcs_notify_lancmd_waiters(card, cmd); -} - -/* - * Unpack network packet. - */ -static void -lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) -{ - struct sk_buff *skb; - - LCS_DBF_TEXT(5, trace, "getskb"); - if (card->dev == NULL || - card->state != DEV_STATE_UP) - /* The card isn't up. Ignore the packet. */ - return; - - skb = dev_alloc_skb(skb_len); - if (skb == NULL) { - dev_err(&card->dev->dev, - " Allocating a socket buffer to interface %s failed\n", - card->dev->name); - card->stats.rx_dropped++; - return; - } - skb_put_data(skb, skb_data, skb_len); - skb->protocol = card->lan_type_trans(skb, card->dev); - card->stats.rx_bytes += skb_len; - card->stats.rx_packets++; - if (skb->protocol == htons(ETH_P_802_2)) - *((__u32 *)skb->cb) = ++card->pkt_seq; - netif_rx(skb); -} - -/* - * LCS main routine to get packets and lancmd replies from the buffers - */ -static void -lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) -{ - struct lcs_card *card; - struct lcs_header *lcs_hdr; - __u16 offset; - - LCS_DBF_TEXT(5, trace, "lcsgtpkt"); - lcs_hdr = (struct lcs_header *) buffer->data; - if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) { - LCS_DBF_TEXT(4, trace, "-eiogpkt"); - return; - } - card = container_of(channel, struct lcs_card, read); - offset = 0; - while (lcs_hdr->offset != 0) { - if (lcs_hdr->offset <= 0 || - lcs_hdr->offset > LCS_IOBUFFERSIZE || - lcs_hdr->offset < offset) { - /* Offset invalid. */ - card->stats.rx_length_errors++; - card->stats.rx_errors++; - return; - } - if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL) - lcs_get_control(card, (struct lcs_cmd *) lcs_hdr); - else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET) - lcs_get_skb(card, (char *)(lcs_hdr + 1), - lcs_hdr->offset - offset - - sizeof(struct lcs_header)); - else - dev_info_once(&card->dev->dev, - "Unknown frame type %d\n", - lcs_hdr->type); - offset = lcs_hdr->offset; - lcs_hdr->offset = LCS_ILLEGAL_OFFSET; - lcs_hdr = (struct lcs_header *) (buffer->data + offset); - } - /* The buffer is now empty. Make it ready again. */ - lcs_ready_buffer(&card->read, buffer); -} - -/* - * get network statistics for ifconfig and other user programs - */ -static struct net_device_stats * -lcs_getstats(struct net_device *dev) -{ - struct lcs_card *card; - - LCS_DBF_TEXT(4, trace, "netstats"); - card = (struct lcs_card *) dev->ml_priv; - return &card->stats; -} - -/* - * stop lcs device - * This function will be called by user doing ifconfig xxx down - */ -static int -lcs_stop_device(struct net_device *dev) -{ - struct lcs_card *card; - int rc; - - LCS_DBF_TEXT(2, trace, "stopdev"); - card = (struct lcs_card *) dev->ml_priv; - netif_carrier_off(dev); - netif_tx_disable(dev); - dev->flags &= ~IFF_UP; - wait_event(card->write.wait_q, - (card->write.state != LCS_CH_STATE_RUNNING)); - rc = lcs_stopcard(card); - if (rc) - dev_err(&card->dev->dev, - " Shutting down the LCS device failed\n"); - return rc; -} - -/* - * start lcs device and make it runnable - * This function will be called by user doing ifconfig xxx up - */ -static int -lcs_open_device(struct net_device *dev) -{ - struct lcs_card *card; - int rc; - - LCS_DBF_TEXT(2, trace, "opendev"); - card = (struct lcs_card *) dev->ml_priv; - /* initialize statistics */ - rc = lcs_detect(card); - if (rc) { - pr_err("Error in opening device!\n"); - - } else { - dev->flags |= IFF_UP; - netif_carrier_on(dev); - netif_wake_queue(dev); - card->state = DEV_STATE_UP; - } - return rc; -} - -/* - * show function for portno called by cat or similar things - */ -static ssize_t -lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf) -{ - struct lcs_card *card; - - card = dev_get_drvdata(dev); - - if (!card) - return 0; - - return sysfs_emit(buf, "%d\n", card->portno); -} - -/* - * store the value which is piped to file portno - */ -static ssize_t -lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct lcs_card *card; - int rc; - s16 value; - - card = dev_get_drvdata(dev); - - if (!card) - return 0; - - rc = kstrtos16(buf, 0, &value); - if (rc) - return -EINVAL; - /* TODO: sanity checks */ - card->portno = value; - if (card->dev) - card->dev->dev_port = card->portno; - - return count; - -} - -static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); - -static const char *lcs_type[] = { - "not a channel", - "2216 parallel", - "2216 channel", - "OSA LCS card", - "unknown channel type", - "unsupported channel type", -}; - -static ssize_t -lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct ccwgroup_device *cgdev; - - cgdev = to_ccwgroupdev(dev); - if (!cgdev) - return -ENODEV; - - return sysfs_emit(buf, "%s\n", - lcs_type[cgdev->cdev[0]->id.driver_info]); -} - -static DEVICE_ATTR(type, 0444, lcs_type_show, NULL); - -static ssize_t -lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct lcs_card *card; - - card = dev_get_drvdata(dev); - - return card ? sysfs_emit(buf, "%u\n", card->lancmd_timeout) : 0; -} - -static ssize_t -lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct lcs_card *card; - unsigned int value; - int rc; - - card = dev_get_drvdata(dev); - - if (!card) - return 0; - - rc = kstrtouint(buf, 0, &value); - if (rc) - return -EINVAL; - /* TODO: sanity checks */ - card->lancmd_timeout = value; - - return count; - -} - -static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); - -static ssize_t -lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct lcs_card *card = dev_get_drvdata(dev); - char *tmp; - int i; - - if (!card) - return -EINVAL; - if (card->state != DEV_STATE_UP) - return -EPERM; - i = simple_strtoul(buf, &tmp, 16); - if (i == 1) - lcs_schedule_recovery(card); - return count; -} - -static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store); - -static struct attribute * lcs_attrs[] = { - &dev_attr_portno.attr, - &dev_attr_type.attr, - &dev_attr_lancmd_timeout.attr, - &dev_attr_recover.attr, - NULL, -}; -static struct attribute_group lcs_attr_group = { - .attrs = lcs_attrs, -}; -static const struct attribute_group *lcs_attr_groups[] = { - &lcs_attr_group, - NULL, -}; -static const struct device_type lcs_devtype = { - .name = "lcs", - .groups = lcs_attr_groups, -}; - -/* - * lcs_probe_device is called on establishing a new ccwgroup_device. - */ -static int -lcs_probe_device(struct ccwgroup_device *ccwgdev) -{ - struct lcs_card *card; - - if (!get_device(&ccwgdev->dev)) - return -ENODEV; - - LCS_DBF_TEXT(2, setup, "add_dev"); - card = lcs_alloc_card(); - if (!card) { - LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM); - put_device(&ccwgdev->dev); - return -ENOMEM; - } - dev_set_drvdata(&ccwgdev->dev, card); - ccwgdev->cdev[0]->handler = lcs_irq; - ccwgdev->cdev[1]->handler = lcs_irq; - card->gdev = ccwgdev; - INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread); - card->thread_start_mask = 0; - card->thread_allowed_mask = 0; - card->thread_running_mask = 0; - ccwgdev->dev.type = &lcs_devtype; - - return 0; -} - -static int -lcs_register_netdev(struct ccwgroup_device *ccwgdev) -{ - struct lcs_card *card; - - LCS_DBF_TEXT(2, setup, "regnetdv"); - card = dev_get_drvdata(&ccwgdev->dev); - if (card->dev->reg_state != NETREG_UNINITIALIZED) - return 0; - SET_NETDEV_DEV(card->dev, &ccwgdev->dev); - return register_netdev(card->dev); -} - -/* - * lcs_new_device will be called by setting the group device online. - */ -static const struct net_device_ops lcs_netdev_ops = { - .ndo_open = lcs_open_device, - .ndo_stop = lcs_stop_device, - .ndo_get_stats = lcs_getstats, - .ndo_start_xmit = lcs_start_xmit, -}; - -static const struct net_device_ops lcs_mc_netdev_ops = { - .ndo_open = lcs_open_device, - .ndo_stop = lcs_stop_device, - .ndo_get_stats = lcs_getstats, - .ndo_start_xmit = lcs_start_xmit, - .ndo_set_rx_mode = lcs_set_multicast_list, -}; - -static int -lcs_new_device(struct ccwgroup_device *ccwgdev) -{ - struct lcs_card *card; - struct net_device *dev=NULL; - enum lcs_dev_states recover_state; - int rc; - - card = dev_get_drvdata(&ccwgdev->dev); - if (!card) - return -ENODEV; - - LCS_DBF_TEXT(2, setup, "newdev"); - LCS_DBF_HEX(3, setup, &card, sizeof(void*)); - card->read.ccwdev = ccwgdev->cdev[0]; - card->write.ccwdev = ccwgdev->cdev[1]; - - recover_state = card->state; - rc = ccw_device_set_online(card->read.ccwdev); - if (rc) - goto out_err; - rc = ccw_device_set_online(card->write.ccwdev); - if (rc) - goto out_werr; - - LCS_DBF_TEXT(3, setup, "lcsnewdv"); - - lcs_setup_card(card); - rc = lcs_detect(card); - if (rc) { - LCS_DBF_TEXT(2, setup, "dtctfail"); - dev_err(&ccwgdev->dev, - "Detecting a network adapter for LCS devices" - " failed with rc=%d (0x%x)\n", rc, rc); - lcs_stopcard(card); - goto out; - } - if (card->dev) { - LCS_DBF_TEXT(2, setup, "samedev"); - LCS_DBF_HEX(3, setup, &card, sizeof(void*)); - goto netdev_out; - } - switch (card->lan_type) { - case LCS_FRAME_TYPE_ENET: - card->lan_type_trans = eth_type_trans; - dev = alloc_etherdev(0); - break; - default: - LCS_DBF_TEXT(3, setup, "errinit"); - pr_err(" Initialization failed\n"); - goto out; - } - if (!dev) - goto out; - card->dev = dev; - card->dev->ml_priv = card; - card->dev->netdev_ops = &lcs_netdev_ops; - card->dev->dev_port = card->portno; - eth_hw_addr_set(card->dev, card->mac); -#ifdef CONFIG_IP_MULTICAST - if (!lcs_check_multicast_support(card)) - card->dev->netdev_ops = &lcs_mc_netdev_ops; -#endif -netdev_out: - lcs_set_allowed_threads(card,0xffffffff); - if (recover_state == DEV_STATE_RECOVER) { - lcs_set_multicast_list(card->dev); - card->dev->flags |= IFF_UP; - netif_carrier_on(card->dev); - netif_wake_queue(card->dev); - card->state = DEV_STATE_UP; - } else { - lcs_stopcard(card); - } - - if (lcs_register_netdev(ccwgdev) != 0) - goto out; - - /* Print out supported assists: IPv6 */ - pr_info("LCS device %s %s IPv6 support\n", card->dev->name, - (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? - "with" : "without"); - /* Print out supported assist: Multicast */ - pr_info("LCS device %s %s Multicast support\n", card->dev->name, - (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? - "with" : "without"); - return 0; -out: - - ccw_device_set_offline(card->write.ccwdev); -out_werr: - ccw_device_set_offline(card->read.ccwdev); -out_err: - return -ENODEV; -} - -/* - * lcs_shutdown_device, called when setting the group device offline. - */ -static int -__lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode) -{ - struct lcs_card *card; - enum lcs_dev_states recover_state; - int ret = 0, ret2 = 0, ret3 = 0; - - LCS_DBF_TEXT(3, setup, "shtdndev"); - card = dev_get_drvdata(&ccwgdev->dev); - if (!card) - return -ENODEV; - if (recovery_mode == 0) { - lcs_set_allowed_threads(card, 0); - if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD)) - return -ERESTARTSYS; - } - LCS_DBF_HEX(3, setup, &card, sizeof(void*)); - recover_state = card->state; - - ret = lcs_stop_device(card->dev); - ret2 = ccw_device_set_offline(card->read.ccwdev); - ret3 = ccw_device_set_offline(card->write.ccwdev); - if (!ret) - ret = (ret2) ? ret2 : ret3; - if (ret) - LCS_DBF_TEXT_(3, setup, "1err:%d", ret); - if (recover_state == DEV_STATE_UP) { - card->state = DEV_STATE_RECOVER; - } - return 0; -} - -static int -lcs_shutdown_device(struct ccwgroup_device *ccwgdev) -{ - return __lcs_shutdown_device(ccwgdev, 0); -} - -/* - * drive lcs recovery after startup and startlan initiated by Lan Gateway - */ -static int -lcs_recovery(void *ptr) -{ - struct lcs_card *card; - struct ccwgroup_device *gdev; - int rc; - - card = (struct lcs_card *) ptr; - - LCS_DBF_TEXT(4, trace, "recover1"); - if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD)) - return 0; - LCS_DBF_TEXT(4, trace, "recover2"); - gdev = card->gdev; - dev_warn(&gdev->dev, - "A recovery process has been started for the LCS device\n"); - rc = __lcs_shutdown_device(gdev, 1); - rc = lcs_new_device(gdev); - if (!rc) - pr_info("Device %s successfully recovered!\n", - card->dev->name); - else - pr_info("Device %s could not be recovered!\n", - card->dev->name); - lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD); - return 0; -} - -/* - * lcs_remove_device, free buffers and card - */ -static void -lcs_remove_device(struct ccwgroup_device *ccwgdev) -{ - struct lcs_card *card; - - card = dev_get_drvdata(&ccwgdev->dev); - if (!card) - return; - - LCS_DBF_TEXT(3, setup, "remdev"); - LCS_DBF_HEX(3, setup, &card, sizeof(void*)); - if (ccwgdev->state == CCWGROUP_ONLINE) { - lcs_shutdown_device(ccwgdev); - } - if (card->dev) - unregister_netdev(card->dev); - lcs_cleanup_card(card); - lcs_free_card(card); - dev_set_drvdata(&ccwgdev->dev, NULL); - put_device(&ccwgdev->dev); -} - -static struct ccw_device_id lcs_ids[] = { - {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel}, - {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216}, - {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2}, - {}, -}; -MODULE_DEVICE_TABLE(ccw, lcs_ids); - -static struct ccw_driver lcs_ccw_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "lcs", - }, - .ids = lcs_ids, - .probe = ccwgroup_probe_ccwdev, - .remove = ccwgroup_remove_ccwdev, - .int_class = IRQIO_LCS, -}; - -/* - * LCS ccwgroup driver registration - */ -static struct ccwgroup_driver lcs_group_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "lcs", - }, - .ccw_driver = &lcs_ccw_driver, - .setup = lcs_probe_device, - .remove = lcs_remove_device, - .set_online = lcs_new_device, - .set_offline = lcs_shutdown_device, -}; - -static ssize_t group_store(struct device_driver *ddrv, const char *buf, - size_t count) -{ - int err; - err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf); - return err ? err : count; -} -static DRIVER_ATTR_WO(group); - -static struct attribute *lcs_drv_attrs[] = { - &driver_attr_group.attr, - NULL, -}; -static struct attribute_group lcs_drv_attr_group = { - .attrs = lcs_drv_attrs, -}; -static const struct attribute_group *lcs_drv_attr_groups[] = { - &lcs_drv_attr_group, - NULL, -}; - -/* - * LCS Module/Kernel initialization function - */ -static int -__init lcs_init_module(void) -{ - int rc; - - pr_info("Loading %s\n", version); - rc = lcs_register_debug_facility(); - LCS_DBF_TEXT(0, setup, "lcsinit"); - if (rc) - goto out_err; - lcs_root_dev = root_device_register("lcs"); - rc = PTR_ERR_OR_ZERO(lcs_root_dev); - if (rc) - goto register_err; - rc = ccw_driver_register(&lcs_ccw_driver); - if (rc) - goto ccw_err; - lcs_group_driver.driver.groups = lcs_drv_attr_groups; - rc = ccwgroup_driver_register(&lcs_group_driver); - if (rc) - goto ccwgroup_err; - return 0; - -ccwgroup_err: - ccw_driver_unregister(&lcs_ccw_driver); -ccw_err: - root_device_unregister(lcs_root_dev); -register_err: - lcs_unregister_debug_facility(); -out_err: - pr_err("Initializing the lcs device driver failed\n"); - return rc; -} - - -/* - * LCS module cleanup function - */ -static void -__exit lcs_cleanup_module(void) -{ - pr_info("Terminating lcs module.\n"); - LCS_DBF_TEXT(0, trace, "cleanup"); - ccwgroup_driver_unregister(&lcs_group_driver); - ccw_driver_unregister(&lcs_ccw_driver); - root_device_unregister(lcs_root_dev); - lcs_unregister_debug_facility(); -} - -module_init(lcs_init_module); -module_exit(lcs_cleanup_module); - -MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>"); -MODULE_LICENSE("GPL"); - diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h deleted file mode 100644 index a2699b70b050..000000000000 --- a/drivers/s390/net/lcs.h +++ /dev/null @@ -1,342 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/*lcs.h*/ - -#include <linux/interrupt.h> -#include <linux/netdevice.h> -#include <linux/skbuff.h> -#include <linux/workqueue.h> -#include <linux/refcount.h> -#include <asm/ccwdev.h> - -#define LCS_DBF_TEXT(level, name, text) \ - do { \ - debug_text_event(lcs_dbf_##name, level, text); \ - } while (0) - -#define LCS_DBF_HEX(level,name,addr,len) \ -do { \ - debug_event(lcs_dbf_##name,level,(void*)(addr),len); \ -} while (0) - -#define LCS_DBF_TEXT_(level,name,text...) \ - do { \ - if (debug_level_enabled(lcs_dbf_##name, level)) { \ - scnprintf(debug_buffer, sizeof(debug_buffer), text); \ - debug_text_event(lcs_dbf_##name, level, debug_buffer); \ - } \ - } while (0) - -/** - * sysfs related stuff - */ -#define CARD_FROM_DEV(cdev) \ - (struct lcs_card *) dev_get_drvdata( \ - &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev); - -/** - * Enum for classifying detected devices. - */ -enum lcs_channel_types { - /* Device is not a channel */ - lcs_channel_type_none, - - /* Device is a 2216 channel */ - lcs_channel_type_parallel, - - /* Device is a 2216 channel */ - lcs_channel_type_2216, - - /* Device is a OSA2 card */ - lcs_channel_type_osa2 -}; - -/** - * CCW commands used in this driver - */ -#define LCS_CCW_WRITE 0x01 -#define LCS_CCW_READ 0x02 -#define LCS_CCW_TRANSFER 0x08 - -/** - * LCS device status primitives - */ -#define LCS_CMD_STARTLAN 0x01 -#define LCS_CMD_STOPLAN 0x02 -#define LCS_CMD_LANSTAT 0x04 -#define LCS_CMD_STARTUP 0x07 -#define LCS_CMD_SHUTDOWN 0x08 -#define LCS_CMD_QIPASSIST 0xb2 -#define LCS_CMD_SETIPM 0xb4 -#define LCS_CMD_DELIPM 0xb5 - -#define LCS_INITIATOR_TCPIP 0x00 -#define LCS_INITIATOR_LGW 0x01 -#define LCS_STD_CMD_SIZE 16 -#define LCS_MULTICAST_CMD_SIZE 404 - -/** - * LCS IPASSIST MASKS,only used when multicast is switched on - */ -/* Not supported by LCS */ -#define LCS_IPASS_ARP_PROCESSING 0x0001 -#define LCS_IPASS_IN_CHECKSUM_SUPPORT 0x0002 -#define LCS_IPASS_OUT_CHECKSUM_SUPPORT 0x0004 -#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008 -#define LCS_IPASS_IP_FILTERING 0x0010 -/* Supported by lcs 3172 */ -#define LCS_IPASS_IPV6_SUPPORT 0x0020 -#define LCS_IPASS_MULTICAST_SUPPORT 0x0040 - -/** - * LCS sense byte definitions - */ -#define LCS_SENSE_BYTE_0 0 -#define LCS_SENSE_BYTE_1 1 -#define LCS_SENSE_BYTE_2 2 -#define LCS_SENSE_BYTE_3 3 -#define LCS_SENSE_INTERFACE_DISCONNECT 0x01 -#define LCS_SENSE_EQUIPMENT_CHECK 0x10 -#define LCS_SENSE_BUS_OUT_CHECK 0x20 -#define LCS_SENSE_INTERVENTION_REQUIRED 0x40 -#define LCS_SENSE_CMD_REJECT 0x80 -#define LCS_SENSE_RESETTING_EVENT 0x80 -#define LCS_SENSE_DEVICE_ONLINE 0x20 - -/** - * LCS packet type definitions - */ -#define LCS_FRAME_TYPE_CONTROL 0 -#define LCS_FRAME_TYPE_ENET 1 -#define LCS_FRAME_TYPE_TR 2 -#define LCS_FRAME_TYPE_FDDI 7 -#define LCS_FRAME_TYPE_AUTO -1 - -/** - * some more definitions,we will sort them later - */ -#define LCS_ILLEGAL_OFFSET 0xffff -#define LCS_IOBUFFERSIZE 0x5000 -#define LCS_NUM_BUFFS 32 /* needs to be power of 2 */ -#define LCS_MAC_LENGTH 6 -#define LCS_INVALID_PORT_NO -1 -#define LCS_LANCMD_TIMEOUT_DEFAULT 5 - -/** - * Multicast state - */ -#define LCS_IPM_STATE_SET_REQUIRED 0 -#define LCS_IPM_STATE_DEL_REQUIRED 1 -#define LCS_IPM_STATE_ON_CARD 2 - -/** - * LCS IP Assist declarations - * seems to be only used for multicast - */ -#define LCS_IPASS_ARP_PROCESSING 0x0001 -#define LCS_IPASS_INBOUND_CSUM_SUPP 0x0002 -#define LCS_IPASS_OUTBOUND_CSUM_SUPP 0x0004 -#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008 -#define LCS_IPASS_IP_FILTERING 0x0010 -#define LCS_IPASS_IPV6_SUPPORT 0x0020 -#define LCS_IPASS_MULTICAST_SUPPORT 0x0040 - -/** - * LCS Buffer states - */ -enum lcs_buffer_states { - LCS_BUF_STATE_EMPTY, /* buffer is empty */ - LCS_BUF_STATE_LOCKED, /* buffer is locked, don't touch */ - LCS_BUF_STATE_READY, /* buffer is ready for read/write */ - LCS_BUF_STATE_PROCESSED, -}; - -/** - * LCS Channel State Machine declarations - */ -enum lcs_channel_states { - LCS_CH_STATE_INIT, - LCS_CH_STATE_HALTED, - LCS_CH_STATE_STOPPED, - LCS_CH_STATE_RUNNING, - LCS_CH_STATE_SUSPENDED, - LCS_CH_STATE_CLEARED, - LCS_CH_STATE_ERROR, -}; - -/** - * LCS device state machine - */ -enum lcs_dev_states { - DEV_STATE_DOWN, - DEV_STATE_UP, - DEV_STATE_RECOVER, -}; - -enum lcs_threads { - LCS_SET_MC_THREAD = 1, - LCS_RECOVERY_THREAD = 2, -}; - -/** - * LCS struct declarations - */ -struct lcs_header { - __u16 offset; - __u8 type; - __u8 slot; -} __attribute__ ((packed)); - -struct lcs_ip_mac_pair { - __be32 ip_addr; - __u8 mac_addr[LCS_MAC_LENGTH]; - __u8 reserved[2]; -} __attribute__ ((packed)); - -struct lcs_ipm_list { - struct list_head list; - struct lcs_ip_mac_pair ipm; - __u8 ipm_state; -}; - -struct lcs_cmd { - __u16 offset; - __u8 type; - __u8 slot; - __u8 cmd_code; - __u8 initiator; - __u16 sequence_no; - __u16 return_code; - union { - struct { - __u8 lan_type; - __u8 portno; - __u16 parameter_count; - __u8 operator_flags[3]; - __u8 reserved[3]; - } lcs_std_cmd; - struct { - __u16 unused1; - __u16 buff_size; - __u8 unused2[6]; - } lcs_startup; - struct { - __u8 lan_type; - __u8 portno; - __u8 unused[10]; - __u8 mac_addr[LCS_MAC_LENGTH]; - __u32 num_packets_deblocked; - __u32 num_packets_blocked; - __u32 num_packets_tx_on_lan; - __u32 num_tx_errors_detected; - __u32 num_tx_packets_disgarded; - __u32 num_packets_rx_from_lan; - __u32 num_rx_errors_detected; - __u32 num_rx_discarded_nobuffs_avail; - __u32 num_rx_packets_too_large; - } lcs_lanstat_cmd; -#ifdef CONFIG_IP_MULTICAST - struct { - __u8 lan_type; - __u8 portno; - __u16 num_ip_pairs; - __u16 ip_assists_supported; - __u16 ip_assists_enabled; - __u16 version; - struct { - struct lcs_ip_mac_pair - ip_mac_pair[32]; - __u32 response_data; - } lcs_ipass_ctlmsg __attribute ((packed)); - } lcs_qipassist __attribute__ ((packed)); -#endif /*CONFIG_IP_MULTICAST */ - } cmd __attribute__ ((packed)); -} __attribute__ ((packed)); - -/** - * Forward declarations. - */ -struct lcs_card; -struct lcs_channel; - -/** - * Definition of an lcs buffer. - */ -struct lcs_buffer { - enum lcs_buffer_states state; - void *data; - int count; - /* Callback for completion notification. */ - void (*callback)(struct lcs_channel *, struct lcs_buffer *); -}; - -struct lcs_reply { - struct list_head list; - __u16 sequence_no; - refcount_t refcnt; - /* Callback for completion notification. */ - void (*callback)(struct lcs_card *, struct lcs_cmd *); - wait_queue_head_t wait_q; - struct lcs_card *card; - struct timer_list timer; - int received; - int rc; -}; - -/** - * Definition of an lcs channel - */ -struct lcs_channel { - enum lcs_channel_states state; - struct ccw_device *ccwdev; - struct ccw1 ccws[LCS_NUM_BUFFS + 1]; - wait_queue_head_t wait_q; - struct tasklet_struct irq_tasklet; - struct lcs_buffer iob[LCS_NUM_BUFFS]; - int io_idx; - int buf_idx; -}; - - -/** - * definition of the lcs card - */ -struct lcs_card { - spinlock_t lock; - spinlock_t ipm_lock; - enum lcs_dev_states state; - struct net_device *dev; - struct net_device_stats stats; - __be16 (*lan_type_trans)(struct sk_buff *skb, - struct net_device *dev); - struct ccwgroup_device *gdev; - struct lcs_channel read; - struct lcs_channel write; - struct lcs_buffer *tx_buffer; - int tx_emitted; - struct list_head lancmd_waiters; - int lancmd_timeout; - - struct work_struct kernel_thread_starter; - spinlock_t mask_lock; - unsigned long thread_start_mask; - unsigned long thread_running_mask; - unsigned long thread_allowed_mask; - wait_queue_head_t wait_q; - -#ifdef CONFIG_IP_MULTICAST - struct list_head ipm_list; -#endif - __u8 mac[LCS_MAC_LENGTH]; - __u16 ip_assists_supported; - __u16 ip_assists_enabled; - __s8 lan_type; - __u32 pkt_seq; - __u16 sequence_no; - __s16 portno; - /* Some info copied from probeinfo */ - u8 device_forced; - u8 max_port_no; - u8 hint_port_no; - s16 port_protocol_no; -} __attribute__ ((aligned(8))); - diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c deleted file mode 100644 index 8852b03f943b..000000000000 --- a/drivers/s390/net/netiucv.c +++ /dev/null @@ -1,2095 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * IUCV network driver - * - * Copyright IBM Corp. 2001, 2009 - * - * Author(s): - * Original netiucv driver: - * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) - * Sysfs integration and all bugs therein: - * Cornelia Huck (cornelia.huck@de.ibm.com) - * PM functions: - * Ursula Braun (ursula.braun@de.ibm.com) - * - * Documentation used: - * the source of the original IUCV driver by: - * Stefan Hegewald <hegewald@de.ibm.com> - * Hartmut Penner <hpenner@de.ibm.com> - * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) - * Martin Schwidefsky (schwidefsky@de.ibm.com) - * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 - */ - -#define KMSG_COMPONENT "netiucv" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - -#undef DEBUG - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/errno.h> -#include <linux/types.h> -#include <linux/interrupt.h> -#include <linux/timer.h> -#include <linux/bitops.h> - -#include <linux/signal.h> -#include <linux/string.h> -#include <linux/device.h> - -#include <linux/ip.h> -#include <linux/if_arp.h> -#include <linux/tcp.h> -#include <linux/skbuff.h> -#include <linux/ctype.h> -#include <net/dst.h> - -#include <linux/io.h> -#include <linux/uaccess.h> -#include <asm/ebcdic.h> - -#include <net/iucv/iucv.h> -#include "fsm.h" - -MODULE_AUTHOR - ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); -MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); - -/* - * Debug Facility stuff - */ -#define IUCV_DBF_SETUP_NAME "iucv_setup" -#define IUCV_DBF_SETUP_LEN 64 -#define IUCV_DBF_SETUP_PAGES 2 -#define IUCV_DBF_SETUP_NR_AREAS 1 -#define IUCV_DBF_SETUP_LEVEL 3 - -#define IUCV_DBF_DATA_NAME "iucv_data" -#define IUCV_DBF_DATA_LEN 128 -#define IUCV_DBF_DATA_PAGES 2 -#define IUCV_DBF_DATA_NR_AREAS 1 -#define IUCV_DBF_DATA_LEVEL 2 - -#define IUCV_DBF_TRACE_NAME "iucv_trace" -#define IUCV_DBF_TRACE_LEN 16 -#define IUCV_DBF_TRACE_PAGES 4 -#define IUCV_DBF_TRACE_NR_AREAS 1 -#define IUCV_DBF_TRACE_LEVEL 3 - -#define IUCV_DBF_TEXT(name,level,text) \ - do { \ - debug_text_event(iucv_dbf_##name,level,text); \ - } while (0) - -#define IUCV_DBF_HEX(name,level,addr,len) \ - do { \ - debug_event(iucv_dbf_##name,level,(void*)(addr),len); \ - } while (0) - -DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); - -#define IUCV_DBF_TEXT_(name, level, text...) \ - do { \ - if (debug_level_enabled(iucv_dbf_##name, level)) { \ - char* __buf = get_cpu_var(iucv_dbf_txt_buf); \ - sprintf(__buf, text); \ - debug_text_event(iucv_dbf_##name, level, __buf); \ - put_cpu_var(iucv_dbf_txt_buf); \ - } \ - } while (0) - -#define IUCV_DBF_SPRINTF(name,level,text...) \ - do { \ - debug_sprintf_event(iucv_dbf_trace, level, ##text ); \ - debug_sprintf_event(iucv_dbf_trace, level, text ); \ - } while (0) - -/* - * some more debug stuff - */ -#define PRINTK_HEADER " iucv: " /* for debugging */ - -static struct device_driver netiucv_driver = { - .owner = THIS_MODULE, - .name = "netiucv", - .bus = &iucv_bus, -}; - -/* - * Per connection profiling data - */ -struct connection_profile { - unsigned long maxmulti; - unsigned long maxcqueue; - unsigned long doios_single; - unsigned long doios_multi; - unsigned long txlen; - unsigned long tx_time; - unsigned long send_stamp; - unsigned long tx_pending; - unsigned long tx_max_pending; -}; - -/* - * Representation of one iucv connection - */ -struct iucv_connection { - struct list_head list; - struct iucv_path *path; - struct sk_buff *rx_buff; - struct sk_buff *tx_buff; - struct sk_buff_head collect_queue; - struct sk_buff_head commit_queue; - spinlock_t collect_lock; - int collect_len; - int max_buffsize; - fsm_timer timer; - fsm_instance *fsm; - struct net_device *netdev; - struct connection_profile prof; - char userid[9]; - char userdata[17]; -}; - -/* - * Linked list of all connection structs. - */ -static LIST_HEAD(iucv_connection_list); -static DEFINE_RWLOCK(iucv_connection_rwlock); - -/* - * Representation of event-data for the - * connection state machine. - */ -struct iucv_event { - struct iucv_connection *conn; - void *data; -}; - -/* - * Private part of the network device structure - */ -struct netiucv_priv { - struct net_device_stats stats; - unsigned long tbusy; - fsm_instance *fsm; - struct iucv_connection *conn; - struct device *dev; -}; - -/* - * Link level header for a packet. - */ -struct ll_header { - u16 next; -}; - -#define NETIUCV_HDRLEN (sizeof(struct ll_header)) -#define NETIUCV_BUFSIZE_MAX 65537 -#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX -#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) -#define NETIUCV_MTU_DEFAULT 9216 -#define NETIUCV_QUEUELEN_DEFAULT 50 -#define NETIUCV_TIMEOUT_5SEC 5000 - -/* - * Compatibility macros for busy handling - * of network devices. - */ -static void netiucv_clear_busy(struct net_device *dev) -{ - struct netiucv_priv *priv = netdev_priv(dev); - clear_bit(0, &priv->tbusy); - netif_wake_queue(dev); -} - -static int netiucv_test_and_set_busy(struct net_device *dev) -{ - struct netiucv_priv *priv = netdev_priv(dev); - netif_stop_queue(dev); - return test_and_set_bit(0, &priv->tbusy); -} - -static u8 iucvMagic_ascii[16] = { - 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 -}; - -static u8 iucvMagic_ebcdic[16] = { - 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, - 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 -}; - -/* - * Convert an iucv userId to its printable - * form (strip whitespace at end). - * - * @param An iucv userId - * - * @returns The printable string (static data!!) - */ -static char *netiucv_printname(char *name, int len) -{ - static char tmp[17]; - char *p = tmp; - memcpy(tmp, name, len); - tmp[len] = '\0'; - while (*p && ((p - tmp) < len) && (!isspace(*p))) - p++; - *p = '\0'; - return tmp; -} - -static char *netiucv_printuser(struct iucv_connection *conn) -{ - static char tmp_uid[9]; - static char tmp_udat[17]; - static char buf[100]; - - if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) { - tmp_uid[8] = '\0'; - tmp_udat[16] = '\0'; - memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8); - memcpy(tmp_udat, conn->userdata, 16); - EBCASC(tmp_udat, 16); - memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16); - sprintf(buf, "%s.%s", tmp_uid, tmp_udat); - return buf; - } else - return netiucv_printname(conn->userid, 8); -} - -/* - * States of the interface statemachine. - */ -enum dev_states { - DEV_STATE_STOPPED, - DEV_STATE_STARTWAIT, - DEV_STATE_STOPWAIT, - DEV_STATE_RUNNING, - /* - * MUST be always the last element!! - */ - NR_DEV_STATES -}; - -static const char *dev_state_names[] = { - "Stopped", - "StartWait", - "StopWait", - "Running", -}; - -/* - * Events of the interface statemachine. - */ -enum dev_events { - DEV_EVENT_START, - DEV_EVENT_STOP, - DEV_EVENT_CONUP, - DEV_EVENT_CONDOWN, - /* - * MUST be always the last element!! - */ - NR_DEV_EVENTS -}; - -static const char *dev_event_names[] = { - "Start", - "Stop", - "Connection up", - "Connection down", -}; - -/* - * Events of the connection statemachine - */ -enum conn_events { - /* - * Events, representing callbacks from - * lowlevel iucv layer) - */ - CONN_EVENT_CONN_REQ, - CONN_EVENT_CONN_ACK, - CONN_EVENT_CONN_REJ, - CONN_EVENT_CONN_SUS, - CONN_EVENT_CONN_RES, - CONN_EVENT_RX, - CONN_EVENT_TXDONE, - - /* - * Events, representing errors return codes from - * calls to lowlevel iucv layer - */ - - /* - * Event, representing timer expiry. - */ - CONN_EVENT_TIMER, - - /* - * Events, representing commands from upper levels. - */ - CONN_EVENT_START, - CONN_EVENT_STOP, - - /* - * MUST be always the last element!! - */ - NR_CONN_EVENTS, -}; - -static const char *conn_event_names[] = { - "Remote connection request", - "Remote connection acknowledge", - "Remote connection reject", - "Connection suspended", - "Connection resumed", - "Data received", - "Data sent", - - "Timer", - - "Start", - "Stop", -}; - -/* - * States of the connection statemachine. - */ -enum conn_states { - /* - * Connection not assigned to any device, - * initial state, invalid - */ - CONN_STATE_INVALID, - - /* - * Userid assigned but not operating - */ - CONN_STATE_STOPPED, - - /* - * Connection registered, - * no connection request sent yet, - * no connection request received - */ - CONN_STATE_STARTWAIT, - - /* - * Connection registered and connection request sent, - * no acknowledge and no connection request received yet. - */ - CONN_STATE_SETUPWAIT, - - /* - * Connection up and running idle - */ - CONN_STATE_IDLE, - - /* - * Data sent, awaiting CONN_EVENT_TXDONE - */ - CONN_STATE_TX, - - /* - * Error during registration. - */ - CONN_STATE_REGERR, - - /* - * Error during registration. - */ - CONN_STATE_CONNERR, - - /* - * MUST be always the last element!! - */ - NR_CONN_STATES, -}; - -static const char *conn_state_names[] = { - "Invalid", - "Stopped", - "StartWait", - "SetupWait", - "Idle", - "TX", - "Terminating", - "Registration error", - "Connect error", -}; - - -/* - * Debug Facility Stuff - */ -static debug_info_t *iucv_dbf_setup = NULL; -static debug_info_t *iucv_dbf_data = NULL; -static debug_info_t *iucv_dbf_trace = NULL; - -DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf); - -static void iucv_unregister_dbf_views(void) -{ - debug_unregister(iucv_dbf_setup); - debug_unregister(iucv_dbf_data); - debug_unregister(iucv_dbf_trace); -} -static int iucv_register_dbf_views(void) -{ - iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME, - IUCV_DBF_SETUP_PAGES, - IUCV_DBF_SETUP_NR_AREAS, - IUCV_DBF_SETUP_LEN); - iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME, - IUCV_DBF_DATA_PAGES, - IUCV_DBF_DATA_NR_AREAS, - IUCV_DBF_DATA_LEN); - iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME, - IUCV_DBF_TRACE_PAGES, - IUCV_DBF_TRACE_NR_AREAS, - IUCV_DBF_TRACE_LEN); - - if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) || - (iucv_dbf_trace == NULL)) { - iucv_unregister_dbf_views(); - return -ENOMEM; - } - debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view); - debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL); - - debug_register_view(iucv_dbf_data, &debug_hex_ascii_view); - debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL); - - debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view); - debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL); - - return 0; -} - -/* - * Callback-wrappers, called from lowlevel iucv layer. - */ - -static void netiucv_callback_rx(struct iucv_path *path, - struct iucv_message *msg) -{ - struct iucv_connection *conn = path->private; - struct iucv_event ev; - - ev.conn = conn; - ev.data = msg; - fsm_event(conn->fsm, CONN_EVENT_RX, &ev); -} - -static void netiucv_callback_txdone(struct iucv_path *path, - struct iucv_message *msg) -{ - struct iucv_connection *conn = path->private; - struct iucv_event ev; - - ev.conn = conn; - ev.data = msg; - fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev); -} - -static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) -{ - struct iucv_connection *conn = path->private; - - fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn); -} - -static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid, - u8 *ipuser) -{ - struct iucv_connection *conn = path->private; - struct iucv_event ev; - static char tmp_user[9]; - static char tmp_udat[17]; - int rc; - - rc = -EINVAL; - memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8); - memcpy(tmp_udat, ipuser, 16); - EBCASC(tmp_udat, 16); - read_lock_bh(&iucv_connection_rwlock); - list_for_each_entry(conn, &iucv_connection_list, list) { - if (strncmp(ipvmid, conn->userid, 8) || - strncmp(ipuser, conn->userdata, 16)) - continue; - /* Found a matching connection for this path. */ - conn->path = path; - ev.conn = conn; - ev.data = path; - fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); - rc = 0; - } - IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n", - tmp_user, netiucv_printname(tmp_udat, 16)); - read_unlock_bh(&iucv_connection_rwlock); - return rc; -} - -static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser) -{ - struct iucv_connection *conn = path->private; - - fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn); -} - -static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser) -{ - struct iucv_connection *conn = path->private; - - fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn); -} - -static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser) -{ - struct iucv_connection *conn = path->private; - - fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn); -} - -/* - * NOP action for statemachines - */ -static void netiucv_action_nop(fsm_instance *fi, int event, void *arg) -{ -} - -/* - * Actions of the connection statemachine - */ - -/* - * netiucv_unpack_skb - * @conn: The connection where this skb has been received. - * @pskb: The received skb. - * - * Unpack a just received skb and hand it over to upper layers. - * Helper function for conn_action_rx. - */ -static void netiucv_unpack_skb(struct iucv_connection *conn, - struct sk_buff *pskb) -{ - struct net_device *dev = conn->netdev; - struct netiucv_priv *privptr = netdev_priv(dev); - u16 offset = 0; - - skb_put(pskb, NETIUCV_HDRLEN); - pskb->dev = dev; - pskb->ip_summed = CHECKSUM_NONE; - pskb->protocol = cpu_to_be16(ETH_P_IP); - - while (1) { - struct sk_buff *skb; - struct ll_header *header = (struct ll_header *) pskb->data; - - if (!header->next) - break; - - skb_pull(pskb, NETIUCV_HDRLEN); - header->next -= offset; - offset += header->next; - header->next -= NETIUCV_HDRLEN; - if (skb_tailroom(pskb) < header->next) { - IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", - header->next, skb_tailroom(pskb)); - return; - } - skb_put(pskb, header->next); - skb_reset_mac_header(pskb); - skb = dev_alloc_skb(pskb->len); - if (!skb) { - IUCV_DBF_TEXT(data, 2, - "Out of memory in netiucv_unpack_skb\n"); - privptr->stats.rx_dropped++; - return; - } - skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), - pskb->len); - skb_reset_mac_header(skb); - skb->dev = pskb->dev; - skb->protocol = pskb->protocol; - pskb->ip_summed = CHECKSUM_UNNECESSARY; - privptr->stats.rx_packets++; - privptr->stats.rx_bytes += skb->len; - netif_rx(skb); - skb_pull(pskb, header->next); - skb_put(pskb, NETIUCV_HDRLEN); - } -} - -static void conn_action_rx(fsm_instance *fi, int event, void *arg) -{ - struct iucv_event *ev = arg; - struct iucv_connection *conn = ev->conn; - struct iucv_message *msg = ev->data; - struct netiucv_priv *privptr = netdev_priv(conn->netdev); - int rc; - - IUCV_DBF_TEXT(trace, 4, __func__); - - if (!conn->netdev) { - iucv_message_reject(conn->path, msg); - IUCV_DBF_TEXT(data, 2, - "Received data for unlinked connection\n"); - return; - } - if (msg->length > conn->max_buffsize) { - iucv_message_reject(conn->path, msg); - privptr->stats.rx_dropped++; - IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", - msg->length, conn->max_buffsize); - return; - } - conn->rx_buff->data = conn->rx_buff->head; - skb_reset_tail_pointer(conn->rx_buff); - conn->rx_buff->len = 0; - rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, - msg->length, NULL); - if (rc || msg->length < 5) { - privptr->stats.rx_errors++; - IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); - return; - } - netiucv_unpack_skb(conn, conn->rx_buff); -} - -static void conn_action_txdone(fsm_instance *fi, int event, void *arg) -{ - struct iucv_event *ev = arg; - struct iucv_connection *conn = ev->conn; - struct iucv_message *msg = ev->data; - struct iucv_message txmsg; - struct netiucv_priv *privptr = NULL; - u32 single_flag = msg->tag; - u32 txbytes = 0; - u32 txpackets = 0; - u32 stat_maxcq = 0; - struct sk_buff *skb; - unsigned long saveflags; - struct ll_header header; - int rc; - - IUCV_DBF_TEXT(trace, 4, __func__); - - if (!conn || !conn->netdev) { - IUCV_DBF_TEXT(data, 2, - "Send confirmation for unlinked connection\n"); - return; - } - privptr = netdev_priv(conn->netdev); - conn->prof.tx_pending--; - if (single_flag) { - if ((skb = skb_dequeue(&conn->commit_queue))) { - refcount_dec(&skb->users); - if (privptr) { - privptr->stats.tx_packets++; - privptr->stats.tx_bytes += - (skb->len - NETIUCV_HDRLEN - - NETIUCV_HDRLEN); - } - dev_kfree_skb_any(skb); - } - } - conn->tx_buff->data = conn->tx_buff->head; - skb_reset_tail_pointer(conn->tx_buff); - conn->tx_buff->len = 0; - spin_lock_irqsave(&conn->collect_lock, saveflags); - while ((skb = skb_dequeue(&conn->collect_queue))) { - header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN; - skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN); - skb_copy_from_linear_data(skb, - skb_put(conn->tx_buff, skb->len), - skb->len); - txbytes += skb->len; - txpackets++; - stat_maxcq++; - refcount_dec(&skb->users); - dev_kfree_skb_any(skb); - } - if (conn->collect_len > conn->prof.maxmulti) - conn->prof.maxmulti = conn->collect_len; - conn->collect_len = 0; - spin_unlock_irqrestore(&conn->collect_lock, saveflags); - if (conn->tx_buff->len == 0) { - fsm_newstate(fi, CONN_STATE_IDLE); - return; - } - - header.next = 0; - skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN); - conn->prof.send_stamp = jiffies; - txmsg.class = 0; - txmsg.tag = 0; - rc = iucv_message_send(conn->path, &txmsg, 0, 0, - conn->tx_buff->data, conn->tx_buff->len); - conn->prof.doios_multi++; - conn->prof.txlen += conn->tx_buff->len; - conn->prof.tx_pending++; - if (conn->prof.tx_pending > conn->prof.tx_max_pending) - conn->prof.tx_max_pending = conn->prof.tx_pending; - if (rc) { - conn->prof.tx_pending--; - fsm_newstate(fi, CONN_STATE_IDLE); - if (privptr) - privptr->stats.tx_errors += txpackets; - IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); - } else { - if (privptr) { - privptr->stats.tx_packets += txpackets; - privptr->stats.tx_bytes += txbytes; - } - if (stat_maxcq > conn->prof.maxcqueue) - conn->prof.maxcqueue = stat_maxcq; - } -} - -static struct iucv_handler netiucv_handler = { - .path_pending = netiucv_callback_connreq, - .path_complete = netiucv_callback_connack, - .path_severed = netiucv_callback_connrej, - .path_quiesced = netiucv_callback_connsusp, - .path_resumed = netiucv_callback_connres, - .message_pending = netiucv_callback_rx, - .message_complete = netiucv_callback_txdone, -}; - -static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) -{ - struct iucv_event *ev = arg; - struct iucv_connection *conn = ev->conn; - struct iucv_path *path = ev->data; - struct net_device *netdev = conn->netdev; - struct netiucv_priv *privptr = netdev_priv(netdev); - int rc; - - IUCV_DBF_TEXT(trace, 3, __func__); - - conn->path = path; - path->msglim = NETIUCV_QUEUELEN_DEFAULT; - path->flags = 0; - rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn); - if (rc) { - IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); - return; - } - fsm_newstate(fi, CONN_STATE_IDLE); - netdev->tx_queue_len = conn->path->msglim; - fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); -} - -static void conn_action_connreject(fsm_instance *fi, int event, void *arg) -{ - struct iucv_event *ev = arg; - struct iucv_path *path = ev->data; - - IUCV_DBF_TEXT(trace, 3, __func__); - iucv_path_sever(path, NULL); -} - -static void conn_action_connack(fsm_instance *fi, int event, void *arg) -{ - struct iucv_connection *conn = arg; - struct net_device *netdev = conn->netdev; - struct netiucv_priv *privptr = netdev_priv(netdev); - - IUCV_DBF_TEXT(trace, 3, __func__); - fsm_deltimer(&conn->timer); - fsm_newstate(fi, CONN_STATE_IDLE); - netdev->tx_queue_len = conn->path->msglim; - fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); -} - -static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg) -{ - struct iucv_connection *conn = arg; - - IUCV_DBF_TEXT(trace, 3, __func__); - fsm_deltimer(&conn->timer); - iucv_path_sever(conn->path, conn->userdata); - fsm_newstate(fi, CONN_STATE_STARTWAIT); -} - -static void conn_action_connsever(fsm_instance *fi, int event, void *arg) -{ - struct iucv_connection *conn = arg; - struct net_device *netdev = conn->netdev; - struct netiucv_priv *privptr = netdev_priv(netdev); - - IUCV_DBF_TEXT(trace, 3, __func__); - - fsm_deltimer(&conn->timer); - iucv_path_sever(conn->path, conn->userdata); - dev_info(privptr->dev, "The peer z/VM guest %s has closed the " - "connection\n", netiucv_printuser(conn)); - IUCV_DBF_TEXT(data, 2, - "conn_action_connsever: Remote dropped connection\n"); - fsm_newstate(fi, CONN_STATE_STARTWAIT); - fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); -} - -static void conn_action_start(fsm_instance *fi, int event, void *arg) -{ - struct iucv_connection *conn = arg; - struct net_device *netdev = conn->netdev; - struct netiucv_priv *privptr = netdev_priv(netdev); - int rc; - - IUCV_DBF_TEXT(trace, 3, __func__); - - fsm_newstate(fi, CONN_STATE_STARTWAIT); - - /* - * We must set the state before calling iucv_connect because the - * callback handler could be called at any point after the connection - * request is sent - */ - - fsm_newstate(fi, CONN_STATE_SETUPWAIT); - conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL); - IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n", - netdev->name, netiucv_printuser(conn)); - - rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid, - NULL, conn->userdata, conn); - switch (rc) { - case 0: - netdev->tx_queue_len = conn->path->msglim; - fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, - CONN_EVENT_TIMER, conn); - return; - case 11: - dev_warn(privptr->dev, - "The IUCV device failed to connect to z/VM guest %s\n", - netiucv_printname(conn->userid, 8)); - fsm_newstate(fi, CONN_STATE_STARTWAIT); - break; - case 12: - dev_warn(privptr->dev, - "The IUCV device failed to connect to the peer on z/VM" - " guest %s\n", netiucv_printname(conn->userid, 8)); - fsm_newstate(fi, CONN_STATE_STARTWAIT); - break; - case 13: - dev_err(privptr->dev, - "Connecting the IUCV device would exceed the maximum" - " number of IUCV connections\n"); - fsm_newstate(fi, CONN_STATE_CONNERR); - break; - case 14: - dev_err(privptr->dev, - "z/VM guest %s has too many IUCV connections" - " to connect with the IUCV device\n", - netiucv_printname(conn->userid, 8)); - fsm_newstate(fi, CONN_STATE_CONNERR); - break; - case 15: - dev_err(privptr->dev, - "The IUCV device cannot connect to a z/VM guest with no" - " IUCV authorization\n"); - fsm_newstate(fi, CONN_STATE_CONNERR); - break; - default: - dev_err(privptr->dev, - "Connecting the IUCV device failed with error %d\n", - rc); - fsm_newstate(fi, CONN_STATE_CONNERR); - break; - } - IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc); - kfree(conn->path); - conn->path = NULL; -} - -static void netiucv_purge_skb_queue(struct sk_buff_head *q) -{ - struct sk_buff *skb; - - while ((skb = skb_dequeue(q))) { - refcount_dec(&skb->users); - dev_kfree_skb_any(skb); - } -} - -static void conn_action_stop(fsm_instance *fi, int event, void *arg) -{ - struct iucv_event *ev = arg; - struct iucv_connection *conn = ev->conn; - struct net_device *netdev = conn->netdev; - struct netiucv_priv *privptr = netdev_priv(netdev); - - IUCV_DBF_TEXT(trace, 3, __func__); - - fsm_deltimer(&conn->timer); - fsm_newstate(fi, CONN_STATE_STOPPED); - netiucv_purge_skb_queue(&conn->collect_queue); - if (conn->path) { - IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n"); - iucv_path_sever(conn->path, conn->userdata); - kfree(conn->path); - conn->path = NULL; - } - netiucv_purge_skb_queue(&conn->commit_queue); - fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); -} - -static void conn_action_inval(fsm_instance *fi, int event, void *arg) -{ - struct iucv_connection *conn = arg; - struct net_device *netdev = conn->netdev; - - IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n", - netdev->name, conn->userid); -} - -static const fsm_node conn_fsm[] = { - { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval }, - { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start }, - - { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop }, - { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop }, - { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop }, - { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop }, - { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop }, - { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop }, - { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop }, - - { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject }, - { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept }, - { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept }, - { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject }, - { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject }, - - { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack }, - { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev }, - - { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever }, - { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever }, - { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever }, - - { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx }, - { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx }, - - { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone }, - { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone }, -}; - -static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); - - -/* - * Actions for interface - statemachine. - */ - -/* - * dev_action_start - * @fi: An instance of an interface statemachine. - * @event: The event, just happened. - * @arg: Generic pointer, casted from struct net_device * upon call. - * - * Startup connection by sending CONN_EVENT_START to it. - */ -static void dev_action_start(fsm_instance *fi, int event, void *arg) -{ - struct net_device *dev = arg; - struct netiucv_priv *privptr = netdev_priv(dev); - - IUCV_DBF_TEXT(trace, 3, __func__); - - fsm_newstate(fi, DEV_STATE_STARTWAIT); - fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn); -} - -/* - * Shutdown connection by sending CONN_EVENT_STOP to it. - * - * @param fi An instance of an interface statemachine. - * @param event The event, just happened. - * @param arg Generic pointer, casted from struct net_device * upon call. - */ -static void -dev_action_stop(fsm_instance *fi, int event, void *arg) -{ - struct net_device *dev = arg; - struct netiucv_priv *privptr = netdev_priv(dev); - struct iucv_event ev; - - IUCV_DBF_TEXT(trace, 3, __func__); - - ev.conn = privptr->conn; - - fsm_newstate(fi, DEV_STATE_STOPWAIT); - fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev); -} - -/* - * Called from connection statemachine - * when a connection is up and running. - * - * @param fi An instance of an interface statemachine. - * @param event The event, just happened. - * @param arg Generic pointer, casted from struct net_device * upon call. - */ -static void -dev_action_connup(fsm_instance *fi, int event, void *arg) -{ - struct net_device *dev = arg; - struct netiucv_priv *privptr = netdev_priv(dev); - - IUCV_DBF_TEXT(trace, 3, __func__); - - switch (fsm_getstate(fi)) { - case DEV_STATE_STARTWAIT: - fsm_newstate(fi, DEV_STATE_RUNNING); - dev_info(privptr->dev, - "The IUCV device has been connected" - " successfully to %s\n", - netiucv_printuser(privptr->conn)); - IUCV_DBF_TEXT(setup, 3, - "connection is up and running\n"); - break; - case DEV_STATE_STOPWAIT: - IUCV_DBF_TEXT(data, 2, - "dev_action_connup: in DEV_STATE_STOPWAIT\n"); - break; - } -} - -/* - * Called from connection statemachine - * when a connection has been shutdown. - * - * @param fi An instance of an interface statemachine. - * @param event The event, just happened. - * @param arg Generic pointer, casted from struct net_device * upon call. - */ -static void -dev_action_conndown(fsm_instance *fi, int event, void *arg) -{ - IUCV_DBF_TEXT(trace, 3, __func__); - - switch (fsm_getstate(fi)) { - case DEV_STATE_RUNNING: - fsm_newstate(fi, DEV_STATE_STARTWAIT); - break; - case DEV_STATE_STOPWAIT: - fsm_newstate(fi, DEV_STATE_STOPPED); - IUCV_DBF_TEXT(setup, 3, "connection is down\n"); - break; - } -} - -static const fsm_node dev_fsm[] = { - { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, - - { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start }, - { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown }, - - { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop }, - { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup }, - - { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, - { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown }, - { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop }, -}; - -static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node); - -/* - * Transmit a packet. - * This is a helper function for netiucv_tx(). - * - * @param conn Connection to be used for sending. - * @param skb Pointer to struct sk_buff of packet to send. - * The linklevel header has already been set up - * by netiucv_tx(). - * - * @return 0 on success, -ERRNO on failure. (Never fails.) - */ -static int netiucv_transmit_skb(struct iucv_connection *conn, - struct sk_buff *skb) -{ - struct iucv_message msg; - unsigned long saveflags; - struct ll_header header; - int rc; - - if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) { - int l = skb->len + NETIUCV_HDRLEN; - - spin_lock_irqsave(&conn->collect_lock, saveflags); - if (conn->collect_len + l > - (conn->max_buffsize - NETIUCV_HDRLEN)) { - rc = -EBUSY; - IUCV_DBF_TEXT(data, 2, - "EBUSY from netiucv_transmit_skb\n"); - } else { - refcount_inc(&skb->users); - skb_queue_tail(&conn->collect_queue, skb); - conn->collect_len += l; - rc = 0; - } - spin_unlock_irqrestore(&conn->collect_lock, saveflags); - } else { - struct sk_buff *nskb = skb; - /* - * Copy the skb to a new allocated skb in lowmem only if the - * data is located above 2G in memory or tailroom is < 2. - */ - unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) + - NETIUCV_HDRLEN)) >> 31; - int copied = 0; - if (hi || (skb_tailroom(skb) < 2)) { - nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + - NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); - if (!nskb) { - IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); - rc = -ENOMEM; - return rc; - } else { - skb_reserve(nskb, NETIUCV_HDRLEN); - skb_put_data(nskb, skb->data, skb->len); - } - copied = 1; - } - /* - * skb now is below 2G and has enough room. Add headers. - */ - header.next = nskb->len + NETIUCV_HDRLEN; - memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); - header.next = 0; - skb_put_data(nskb, &header, NETIUCV_HDRLEN); - - fsm_newstate(conn->fsm, CONN_STATE_TX); - conn->prof.send_stamp = jiffies; - - msg.tag = 1; - msg.class = 0; - rc = iucv_message_send(conn->path, &msg, 0, 0, - nskb->data, nskb->len); - conn->prof.doios_single++; - conn->prof.txlen += skb->len; - conn->prof.tx_pending++; - if (conn->prof.tx_pending > conn->prof.tx_max_pending) - conn->prof.tx_max_pending = conn->prof.tx_pending; - if (rc) { - struct netiucv_priv *privptr; - fsm_newstate(conn->fsm, CONN_STATE_IDLE); - conn->prof.tx_pending--; - privptr = netdev_priv(conn->netdev); - if (privptr) - privptr->stats.tx_errors++; - if (copied) - dev_kfree_skb(nskb); - else { - /* - * Remove our headers. They get added - * again on retransmit. - */ - skb_pull(skb, NETIUCV_HDRLEN); - skb_trim(skb, skb->len - NETIUCV_HDRLEN); - } - IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); - } else { - if (copied) - dev_kfree_skb(skb); - refcount_inc(&nskb->users); - skb_queue_tail(&conn->commit_queue, nskb); - } - } - - return rc; -} - -/* - * Interface API for upper network layers - */ - -/* - * Open an interface. - * Called from generic network layer when ifconfig up is run. - * - * @param dev Pointer to interface struct. - * - * @return 0 on success, -ERRNO on failure. (Never fails.) - */ -static int netiucv_open(struct net_device *dev) -{ - struct netiucv_priv *priv = netdev_priv(dev); - - fsm_event(priv->fsm, DEV_EVENT_START, dev); - return 0; -} - -/* - * Close an interface. - * Called from generic network layer when ifconfig down is run. - * - * @param dev Pointer to interface struct. - * - * @return 0 on success, -ERRNO on failure. (Never fails.) - */ -static int netiucv_close(struct net_device *dev) -{ - struct netiucv_priv *priv = netdev_priv(dev); - - fsm_event(priv->fsm, DEV_EVENT_STOP, dev); - return 0; -} - -/* - * Start transmission of a packet. - * Called from generic network device layer. - */ -static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev) -{ - struct netiucv_priv *privptr = netdev_priv(dev); - int rc; - - IUCV_DBF_TEXT(trace, 4, __func__); - /* - * Some sanity checks ... - */ - if (skb == NULL) { - IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); - privptr->stats.tx_dropped++; - return NETDEV_TX_OK; - } - if (skb_headroom(skb) < NETIUCV_HDRLEN) { - IUCV_DBF_TEXT(data, 2, - "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); - dev_kfree_skb(skb); - privptr->stats.tx_dropped++; - return NETDEV_TX_OK; - } - - /* - * If connection is not running, try to restart it - * and throw away packet. - */ - if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { - dev_kfree_skb(skb); - privptr->stats.tx_dropped++; - privptr->stats.tx_errors++; - privptr->stats.tx_carrier_errors++; - return NETDEV_TX_OK; - } - - if (netiucv_test_and_set_busy(dev)) { - IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n"); - return NETDEV_TX_BUSY; - } - netif_trans_update(dev); - rc = netiucv_transmit_skb(privptr->conn, skb); - netiucv_clear_busy(dev); - return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK; -} - -/* - * netiucv_stats - * @dev: Pointer to interface struct. - * - * Returns interface statistics of a device. - * - * Returns pointer to stats struct of this interface. - */ -static struct net_device_stats *netiucv_stats (struct net_device * dev) -{ - struct netiucv_priv *priv = netdev_priv(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - return &priv->stats; -} - -/* - * attributes in sysfs - */ - -static ssize_t user_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%s\n", netiucv_printuser(priv->conn)); -} - -static int netiucv_check_user(const char *buf, size_t count, char *username, - char *userdata) -{ - const char *p; - int i; - - p = strchr(buf, '.'); - if ((p && ((count > 26) || - ((p - buf) > 8) || - (buf + count - p > 18))) || - (!p && (count > 9))) { - IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); - return -EINVAL; - } - - for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) { - if (isalnum(*p) || *p == '$') { - username[i] = toupper(*p); - continue; - } - if (*p == '\n') - /* trailing lf, grr */ - break; - IUCV_DBF_TEXT_(setup, 2, - "conn_write: invalid character %02x\n", *p); - return -EINVAL; - } - while (i < 8) - username[i++] = ' '; - username[8] = '\0'; - - if (*p == '.') { - p++; - for (i = 0; i < 16 && *p; i++, p++) { - if (*p == '\n') - break; - userdata[i] = toupper(*p); - } - while (i > 0 && i < 16) - userdata[i++] = ' '; - } else - memcpy(userdata, iucvMagic_ascii, 16); - userdata[16] = '\0'; - ASCEBC(userdata, 16); - - return 0; -} - -static ssize_t user_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - struct net_device *ndev = priv->conn->netdev; - char username[9]; - char userdata[17]; - int rc; - struct iucv_connection *cp; - - IUCV_DBF_TEXT(trace, 3, __func__); - rc = netiucv_check_user(buf, count, username, userdata); - if (rc) - return rc; - - if (memcmp(username, priv->conn->userid, 9) && - (ndev->flags & (IFF_UP | IFF_RUNNING))) { - /* username changed while the interface is active. */ - IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); - return -EPERM; - } - read_lock_bh(&iucv_connection_rwlock); - list_for_each_entry(cp, &iucv_connection_list, list) { - if (!strncmp(username, cp->userid, 9) && - !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) { - read_unlock_bh(&iucv_connection_rwlock); - IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s " - "already exists\n", netiucv_printuser(cp)); - return -EEXIST; - } - } - read_unlock_bh(&iucv_connection_rwlock); - memcpy(priv->conn->userid, username, 9); - memcpy(priv->conn->userdata, userdata, 17); - return count; -} - -static DEVICE_ATTR(user, 0644, user_show, user_write); - -static ssize_t buffer_show (struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%d\n", priv->conn->max_buffsize); -} - -static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - struct net_device *ndev = priv->conn->netdev; - unsigned int bs1; - int rc; - - IUCV_DBF_TEXT(trace, 3, __func__); - if (count >= 39) - return -EINVAL; - - rc = kstrtouint(buf, 0, &bs1); - - if (rc == -EINVAL) { - IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n", - buf); - return -EINVAL; - } - if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) { - IUCV_DBF_TEXT_(setup, 2, - "buffer_write: buffer size %d too large\n", - bs1); - return -EINVAL; - } - if ((ndev->flags & IFF_RUNNING) && - (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { - IUCV_DBF_TEXT_(setup, 2, - "buffer_write: buffer size %d too small\n", - bs1); - return -EINVAL; - } - if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { - IUCV_DBF_TEXT_(setup, 2, - "buffer_write: buffer size %d too small\n", - bs1); - return -EINVAL; - } - - priv->conn->max_buffsize = bs1; - if (!(ndev->flags & IFF_RUNNING)) - ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN; - - return count; - -} - -static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); - -static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm)); -} - -static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL); - -static ssize_t conn_fsm_show (struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm)); -} - -static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL); - -static ssize_t maxmulti_show (struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); -} - -static ssize_t maxmulti_write (struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 4, __func__); - priv->conn->prof.maxmulti = 0; - return count; -} - -static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write); - -static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); -} - -static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 4, __func__); - priv->conn->prof.maxcqueue = 0; - return count; -} - -static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write); - -static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); -} - -static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 4, __func__); - priv->conn->prof.doios_single = 0; - return count; -} - -static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write); - -static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); -} - -static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - priv->conn->prof.doios_multi = 0; - return count; -} - -static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write); - -static ssize_t txlen_show (struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%ld\n", priv->conn->prof.txlen); -} - -static ssize_t txlen_write (struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 4, __func__); - priv->conn->prof.txlen = 0; - return count; -} - -static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write); - -static ssize_t txtime_show (struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); -} - -static ssize_t txtime_write (struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 4, __func__); - priv->conn->prof.tx_time = 0; - return count; -} - -static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write); - -static ssize_t txpend_show (struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); -} - -static ssize_t txpend_write (struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 4, __func__); - priv->conn->prof.tx_pending = 0; - return count; -} - -static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write); - -static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); -} - -static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - - IUCV_DBF_TEXT(trace, 4, __func__); - priv->conn->prof.tx_max_pending = 0; - return count; -} - -static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write); - -static struct attribute *netiucv_attrs[] = { - &dev_attr_buffer.attr, - &dev_attr_user.attr, - NULL, -}; - -static struct attribute_group netiucv_attr_group = { - .attrs = netiucv_attrs, -}; - -static struct attribute *netiucv_stat_attrs[] = { - &dev_attr_device_fsm_state.attr, - &dev_attr_connection_fsm_state.attr, - &dev_attr_max_tx_buffer_used.attr, - &dev_attr_max_chained_skbs.attr, - &dev_attr_tx_single_write_ops.attr, - &dev_attr_tx_multi_write_ops.attr, - &dev_attr_netto_bytes.attr, - &dev_attr_max_tx_io_time.attr, - &dev_attr_tx_pending.attr, - &dev_attr_tx_max_pending.attr, - NULL, -}; - -static struct attribute_group netiucv_stat_attr_group = { - .name = "stats", - .attrs = netiucv_stat_attrs, -}; - -static const struct attribute_group *netiucv_attr_groups[] = { - &netiucv_stat_attr_group, - &netiucv_attr_group, - NULL, -}; - -static int netiucv_register_device(struct net_device *ndev) -{ - struct netiucv_priv *priv = netdev_priv(ndev); - struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); - int ret; - - IUCV_DBF_TEXT(trace, 3, __func__); - - if (dev) { - dev_set_name(dev, "net%s", ndev->name); - dev->bus = &iucv_bus; - dev->parent = iucv_root; - dev->groups = netiucv_attr_groups; - /* - * The release function could be called after the - * module has been unloaded. It's _only_ task is to - * free the struct. Therefore, we specify kfree() - * directly here. (Probably a little bit obfuscating - * but legitime ...). - */ - dev->release = (void (*)(struct device *))kfree; - dev->driver = &netiucv_driver; - } else - return -ENOMEM; - - ret = device_register(dev); - if (ret) { - put_device(dev); - return ret; - } - priv->dev = dev; - dev_set_drvdata(dev, priv); - return 0; -} - -static void netiucv_unregister_device(struct device *dev) -{ - IUCV_DBF_TEXT(trace, 3, __func__); - device_unregister(dev); -} - -/* - * Allocate and initialize a new connection structure. - * Add it to the list of netiucv connections; - */ -static struct iucv_connection *netiucv_new_connection(struct net_device *dev, - char *username, - char *userdata) -{ - struct iucv_connection *conn; - - conn = kzalloc(sizeof(*conn), GFP_KERNEL); - if (!conn) - goto out; - skb_queue_head_init(&conn->collect_queue); - skb_queue_head_init(&conn->commit_queue); - spin_lock_init(&conn->collect_lock); - conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; - conn->netdev = dev; - - conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); - if (!conn->rx_buff) - goto out_conn; - conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); - if (!conn->tx_buff) - goto out_rx; - conn->fsm = init_fsm("netiucvconn", conn_state_names, - conn_event_names, NR_CONN_STATES, - NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN, - GFP_KERNEL); - if (!conn->fsm) - goto out_tx; - - fsm_settimer(conn->fsm, &conn->timer); - fsm_newstate(conn->fsm, CONN_STATE_INVALID); - - if (userdata) - memcpy(conn->userdata, userdata, 17); - if (username) { - memcpy(conn->userid, username, 9); - fsm_newstate(conn->fsm, CONN_STATE_STOPPED); - } - - write_lock_bh(&iucv_connection_rwlock); - list_add_tail(&conn->list, &iucv_connection_list); - write_unlock_bh(&iucv_connection_rwlock); - return conn; - -out_tx: - kfree_skb(conn->tx_buff); -out_rx: - kfree_skb(conn->rx_buff); -out_conn: - kfree(conn); -out: - return NULL; -} - -/* - * Release a connection structure and remove it from the - * list of netiucv connections. - */ -static void netiucv_remove_connection(struct iucv_connection *conn) -{ - - IUCV_DBF_TEXT(trace, 3, __func__); - write_lock_bh(&iucv_connection_rwlock); - list_del_init(&conn->list); - write_unlock_bh(&iucv_connection_rwlock); - fsm_deltimer(&conn->timer); - netiucv_purge_skb_queue(&conn->collect_queue); - if (conn->path) { - iucv_path_sever(conn->path, conn->userdata); - kfree(conn->path); - conn->path = NULL; - } - netiucv_purge_skb_queue(&conn->commit_queue); - kfree_fsm(conn->fsm); - kfree_skb(conn->rx_buff); - kfree_skb(conn->tx_buff); -} - -/* - * Release everything of a net device. - */ -static void netiucv_free_netdevice(struct net_device *dev) -{ - struct netiucv_priv *privptr = netdev_priv(dev); - - IUCV_DBF_TEXT(trace, 3, __func__); - - if (!dev) - return; - - if (privptr) { - if (privptr->conn) - netiucv_remove_connection(privptr->conn); - if (privptr->fsm) - kfree_fsm(privptr->fsm); - privptr->conn = NULL; privptr->fsm = NULL; - /* privptr gets freed by free_netdev() */ - } -} - -/* - * Initialize a net device. (Called from kernel in alloc_netdev()) - */ -static const struct net_device_ops netiucv_netdev_ops = { - .ndo_open = netiucv_open, - .ndo_stop = netiucv_close, - .ndo_get_stats = netiucv_stats, - .ndo_start_xmit = netiucv_tx, -}; - -static void netiucv_setup_netdevice(struct net_device *dev) -{ - dev->mtu = NETIUCV_MTU_DEFAULT; - dev->min_mtu = 576; - dev->max_mtu = NETIUCV_MTU_MAX; - dev->needs_free_netdev = true; - dev->priv_destructor = netiucv_free_netdevice; - dev->hard_header_len = NETIUCV_HDRLEN; - dev->addr_len = 0; - dev->type = ARPHRD_SLIP; - dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT; - dev->flags = IFF_POINTOPOINT | IFF_NOARP; - dev->netdev_ops = &netiucv_netdev_ops; -} - -/* - * Allocate and initialize everything of a net device. - */ -static struct net_device *netiucv_init_netdevice(char *username, char *userdata) -{ - struct netiucv_priv *privptr; - struct net_device *dev; - - dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d", - NET_NAME_UNKNOWN, netiucv_setup_netdevice); - if (!dev) - return NULL; - rtnl_lock(); - if (dev_alloc_name(dev, dev->name) < 0) - goto out_netdev; - - privptr = netdev_priv(dev); - privptr->fsm = init_fsm("netiucvdev", dev_state_names, - dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, - dev_fsm, DEV_FSM_LEN, GFP_KERNEL); - if (!privptr->fsm) - goto out_netdev; - - privptr->conn = netiucv_new_connection(dev, username, userdata); - if (!privptr->conn) { - IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); - goto out_fsm; - } - fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); - return dev; - -out_fsm: - kfree_fsm(privptr->fsm); -out_netdev: - rtnl_unlock(); - free_netdev(dev); - return NULL; -} - -static ssize_t connection_store(struct device_driver *drv, const char *buf, - size_t count) -{ - char username[9]; - char userdata[17]; - int rc; - struct net_device *dev; - struct netiucv_priv *priv; - struct iucv_connection *cp; - - IUCV_DBF_TEXT(trace, 3, __func__); - rc = netiucv_check_user(buf, count, username, userdata); - if (rc) - return rc; - - read_lock_bh(&iucv_connection_rwlock); - list_for_each_entry(cp, &iucv_connection_list, list) { - if (!strncmp(username, cp->userid, 9) && - !strncmp(userdata, cp->userdata, 17)) { - read_unlock_bh(&iucv_connection_rwlock); - IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s " - "already exists\n", netiucv_printuser(cp)); - return -EEXIST; - } - } - read_unlock_bh(&iucv_connection_rwlock); - - dev = netiucv_init_netdevice(username, userdata); - if (!dev) { - IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); - return -ENODEV; - } - - rc = netiucv_register_device(dev); - if (rc) { - rtnl_unlock(); - IUCV_DBF_TEXT_(setup, 2, - "ret %d from netiucv_register_device\n", rc); - goto out_free_ndev; - } - - /* sysfs magic */ - priv = netdev_priv(dev); - SET_NETDEV_DEV(dev, priv->dev); - - rc = register_netdevice(dev); - rtnl_unlock(); - if (rc) - goto out_unreg; - - dev_info(priv->dev, "The IUCV interface to %s has been established " - "successfully\n", - netiucv_printuser(priv->conn)); - - return count; - -out_unreg: - netiucv_unregister_device(priv->dev); -out_free_ndev: - netiucv_free_netdevice(dev); - return rc; -} -static DRIVER_ATTR_WO(connection); - -static ssize_t remove_store(struct device_driver *drv, const char *buf, - size_t count) -{ - struct iucv_connection *cp; - struct net_device *ndev; - struct netiucv_priv *priv; - struct device *dev; - char name[IFNAMSIZ]; - const char *p; - int i; - - IUCV_DBF_TEXT(trace, 3, __func__); - - if (count >= IFNAMSIZ) - count = IFNAMSIZ - 1; - - for (i = 0, p = buf; i < count && *p; i++, p++) { - if (*p == '\n' || *p == ' ') - /* trailing lf, grr */ - break; - name[i] = *p; - } - name[i] = '\0'; - - read_lock_bh(&iucv_connection_rwlock); - list_for_each_entry(cp, &iucv_connection_list, list) { - ndev = cp->netdev; - priv = netdev_priv(ndev); - dev = priv->dev; - if (strncmp(name, ndev->name, count)) - continue; - read_unlock_bh(&iucv_connection_rwlock); - if (ndev->flags & (IFF_UP | IFF_RUNNING)) { - dev_warn(dev, "The IUCV device is connected" - " to %s and cannot be removed\n", - priv->conn->userid); - IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); - return -EPERM; - } - unregister_netdev(ndev); - netiucv_unregister_device(dev); - return count; - } - read_unlock_bh(&iucv_connection_rwlock); - IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); - return -EINVAL; -} -static DRIVER_ATTR_WO(remove); - -static struct attribute * netiucv_drv_attrs[] = { - &driver_attr_connection.attr, - &driver_attr_remove.attr, - NULL, -}; - -static struct attribute_group netiucv_drv_attr_group = { - .attrs = netiucv_drv_attrs, -}; - -static const struct attribute_group *netiucv_drv_attr_groups[] = { - &netiucv_drv_attr_group, - NULL, -}; - -static void netiucv_banner(void) -{ - pr_info("driver initialized\n"); -} - -static void __exit netiucv_exit(void) -{ - struct iucv_connection *cp; - struct net_device *ndev; - struct netiucv_priv *priv; - struct device *dev; - - IUCV_DBF_TEXT(trace, 3, __func__); - while (!list_empty(&iucv_connection_list)) { - cp = list_entry(iucv_connection_list.next, - struct iucv_connection, list); - ndev = cp->netdev; - priv = netdev_priv(ndev); - dev = priv->dev; - - unregister_netdev(ndev); - netiucv_unregister_device(dev); - } - - driver_unregister(&netiucv_driver); - iucv_unregister(&netiucv_handler, 1); - iucv_unregister_dbf_views(); - - pr_info("driver unloaded\n"); - return; -} - -static int __init netiucv_init(void) -{ - int rc; - - rc = iucv_register_dbf_views(); - if (rc) - goto out; - rc = iucv_register(&netiucv_handler, 1); - if (rc) - goto out_dbf; - IUCV_DBF_TEXT(trace, 3, __func__); - netiucv_driver.groups = netiucv_drv_attr_groups; - rc = driver_register(&netiucv_driver); - if (rc) { - IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); - goto out_iucv; - } - - netiucv_banner(); - return rc; - -out_iucv: - iucv_unregister(&netiucv_handler, 1); -out_dbf: - iucv_unregister_dbf_views(); -out: - return rc; -} - -module_init(netiucv_init); -module_exit(netiucv_exit); -MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 613eab729704..41fe8a043d61 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -956,7 +956,7 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, struct dst_entry *dst = skb_dst(skb); struct rt6_info *rt; - rt = (struct rt6_info *) dst; + rt = dst_rt6_info(dst); if (dst) { if (proto == htons(ETH_P_IPV6)) dst = dst_check(dst, rt6_get_cookie(rt)); @@ -970,15 +970,14 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb, struct dst_entry *dst) { - struct rtable *rt = (struct rtable *) dst; - - return (rt) ? rt_nexthop(rt, ip_hdr(skb)->daddr) : ip_hdr(skb)->daddr; + return (dst) ? rt_nexthop(dst_rtable(dst), ip_hdr(skb)->daddr) : + ip_hdr(skb)->daddr; } static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb, struct dst_entry *dst) { - struct rt6_info *rt = (struct rt6_info *) dst; + struct rt6_info *rt = dst_rt6_info(dst); if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) return &rt->rt6i_gateway; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index cd783290bde5..1c80e8ca67b5 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -7,10 +7,9 @@ * Frank Blaschka <frank.blaschka@de.ibm.com> */ -#define KMSG_COMPONENT "qeth" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "qeth: " fmt -#include <linux/compat.h> +#include <linux/export.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> @@ -364,30 +363,33 @@ out: return rc; } +static void qeth_free_cq(struct qeth_card *card) +{ + if (card->qdio.c_q) { + qeth_free_qdio_queue(card->qdio.c_q); + card->qdio.c_q = NULL; + } +} + static int qeth_alloc_cq(struct qeth_card *card) { if (card->options.cq == QETH_CQ_ENABLED) { QETH_CARD_TEXT(card, 2, "cqon"); - card->qdio.c_q = qeth_alloc_qdio_queue(); if (!card->qdio.c_q) { - dev_err(&card->gdev->dev, "Failed to create completion queue\n"); - return -ENOMEM; + card->qdio.c_q = qeth_alloc_qdio_queue(); + if (!card->qdio.c_q) { + dev_err(&card->gdev->dev, + "Failed to create completion queue\n"); + return -ENOMEM; + } } } else { QETH_CARD_TEXT(card, 2, "nocq"); - card->qdio.c_q = NULL; + qeth_free_cq(card); } return 0; } -static void qeth_free_cq(struct qeth_card *card) -{ - if (card->qdio.c_q) { - qeth_free_qdio_queue(card->qdio.c_q); - card->qdio.c_q = NULL; - } -} - static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, int delayed) { @@ -426,7 +428,7 @@ static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, ccw->cmd_code = cmd_code; ccw->flags = flags | CCW_FLAG_SLI; ccw->count = len; - ccw->cda = (__u32)virt_to_phys(data); + ccw->cda = virt_to_dma32(data); } static int __qeth_issue_next_read(struct qeth_card *card) @@ -757,7 +759,7 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, if (rc) QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", ipa_name, com, CARD_DEVID(card), rc, - qeth_get_ipa_msg(rc)); + qeth_get_ipa_msg(com, rc)); else QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", ipa_name, com, CARD_DEVID(card)); @@ -1179,6 +1181,20 @@ static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, } } +/** + * qeth_irq() - qeth interrupt handler + * @cdev: ccw device + * @intparm: expect pointer to iob + * @irb: Interruption Response Block + * + * In the good path: + * corresponding qeth channel is locked with last used iob as active_cmd. + * But this function is also called for error interrupts. + * + * Caller ensures that: + * Interrupts are disabled; ccw device lock is held; + * + */ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { @@ -1220,11 +1236,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, iob = (struct qeth_cmd_buffer *) (addr_t)intparm; } - qeth_unlock_channel(card, channel); - rc = qeth_check_irb_error(card, cdev, irb); if (rc) { /* IO was terminated, free its resources. */ + qeth_unlock_channel(card, channel); if (iob) qeth_cancel_cmd(iob, rc); return; @@ -1268,6 +1283,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, rc = qeth_get_problem(card, cdev, irb); if (rc) { card->read_or_write_problem = 1; + qeth_unlock_channel(card, channel); if (iob) qeth_cancel_cmd(iob, rc); qeth_clear_ipacmd_list(card); @@ -1276,6 +1292,26 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, } } + if (scsw_cmd_is_valid_cc(&irb->scsw) && irb->scsw.cmd.cc == 1 && iob) { + /* channel command hasn't started: retry. + * active_cmd is still set to last iob + */ + QETH_CARD_TEXT(card, 2, "irqcc1"); + rc = ccw_device_start_timeout(cdev, __ccw_from_cmd(iob), + (addr_t)iob, 0, 0, iob->timeout); + if (rc) { + QETH_DBF_MESSAGE(2, + "ccw retry on %x failed, rc = %i\n", + CARD_DEVID(card), rc); + QETH_CARD_TEXT_(card, 2, " err%d", rc); + qeth_unlock_channel(card, channel); + qeth_cancel_cmd(iob, rc); + } + return; + } + + qeth_unlock_channel(card, channel); + if (iob) { /* sanity check: */ if (irb->scsw.cmd.count > iob->length) { @@ -1359,7 +1395,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, qeth_tx_complete_buf(queue, buf, error, budget); for (i = 0; i < queue->max_elements; ++i) { - void *data = phys_to_virt(buf->buffer->element[i].addr); + void *data = dma64_to_virt(buf->buffer->element[i].addr); if (__test_and_clear_bit(i, buf->from_kmem_cache) && data) kmem_cache_free(qeth_core_header_cache, data); @@ -1404,7 +1440,7 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card, for (i = 0; i < aob->sb_count && i < queue->max_elements; i++) { - void *data = phys_to_virt(aob->sba[i]); + void *data = dma64_to_virt(aob->sba[i]); if (test_bit(i, buf->from_kmem_cache) && data) kmem_cache_free(qeth_core_header_cache, @@ -2582,7 +2618,8 @@ err_qdio_bufs: static void qeth_tx_completion_timer(struct timer_list *timer) { - struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); + struct qeth_qdio_out_q *queue = timer_container_of(queue, timer, + timer); napi_schedule(&queue->napi); QETH_TXQ_STAT_INC(queue, completion_timer); @@ -2594,6 +2631,10 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card) QETH_CARD_TEXT(card, 2, "allcqdbf"); + /* completion */ + if (qeth_alloc_cq(card)) + goto out_err; + if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) return 0; @@ -2629,10 +2670,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card) queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT; } - /* completion */ - if (qeth_alloc_cq(card)) - goto out_freeoutq; - return 0; out_freeoutq: @@ -2643,6 +2680,8 @@ out_freeoutq: qeth_free_buffer_pool(card); out_buffer_pool: atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); + qeth_free_cq(card); +out_err: return -ENOMEM; } @@ -2650,11 +2689,12 @@ static void qeth_free_qdio_queues(struct qeth_card *card) { int i, j; + qeth_free_cq(card); + if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == QETH_QDIO_UNINITIALIZED) return; - qeth_free_cq(card); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (card->qdio.in_q->bufs[j].rx_skb) { consume_skb(card->qdio.in_q->bufs[j].rx_skb); @@ -2918,8 +2958,8 @@ static int qeth_init_input_buffer(struct qeth_card *card, */ for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { buf->buffer->element[i].length = PAGE_SIZE; - buf->buffer->element[i].addr = - page_to_phys(pool_entry->elements[i]); + buf->buffer->element[i].addr = u64_to_dma64( + page_to_phys(pool_entry->elements[i])); if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; else @@ -3675,7 +3715,7 @@ static void qeth_flush_queue(struct qeth_qdio_out_q *queue) static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) { /* - * check if weed have to switch to non-packing mode or if + * check if we have to switch to non-packing mode or if * we have to get a pci flag out on the queue */ if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || @@ -3708,24 +3748,11 @@ static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr) int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) { - int rc; - - if (card->options.cq == QETH_CQ_NOTAVAILABLE) { - rc = -1; - goto out; - } else { - if (card->options.cq == cq) { - rc = 0; - goto out; - } - - qeth_free_qdio_queues(card); - card->options.cq = cq; - rc = 0; - } -out: - return rc; + if (card->options.cq == QETH_CQ_NOTAVAILABLE) + return -1; + card->options.cq = cq; + return 0; } EXPORT_SYMBOL_GPL(qeth_configure_cq); @@ -3765,9 +3792,9 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && buffer->element[e].addr) { - unsigned long phys_aob_addr = buffer->element[e].addr; + dma64_t phys_aob_addr = buffer->element[e].addr; - qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr)); + qeth_qdio_handle_aob(card, dma64_to_virt(phys_aob_addr)); ++e; } qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); @@ -4042,7 +4069,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, if (hd_len) { is_first_elem = false; - buffer->element[element].addr = virt_to_phys(hdr); + buffer->element[element].addr = virt_to_dma64(hdr); buffer->element[element].length = hd_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; @@ -4063,7 +4090,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, elem_length = min_t(unsigned int, length, PAGE_SIZE - offset_in_page(data)); - buffer->element[element].addr = virt_to_phys(data); + buffer->element[element].addr = virt_to_dma64(data); buffer->element[element].length = elem_length; length -= elem_length; if (is_first_elem) { @@ -4093,7 +4120,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, elem_length = min_t(unsigned int, length, PAGE_SIZE - offset_in_page(data)); - buffer->element[element].addr = virt_to_phys(data); + buffer->element[element].addr = virt_to_dma64(data); buffer->element[element].length = elem_length; buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; @@ -4776,8 +4803,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); if (!rc) { - tmp = is_compat_task() ? compat_ptr(oat_data.ptr) : - u64_to_user_ptr(oat_data.ptr); + tmp = u64_to_user_ptr(oat_data.ptr); oat_data.response_len = priv.response_len; if (copy_to_user(tmp, priv.buffer, priv.response_len) || @@ -5569,7 +5595,7 @@ next_packet: offset = 0; } - hdr = phys_to_virt(element->addr) + offset; + hdr = dma64_to_virt(element->addr) + offset; offset += sizeof(*hdr); skb = NULL; @@ -5661,7 +5687,7 @@ use_skb: walk_packet: while (skb_len) { int data_len = min(skb_len, (int)(element->length - offset)); - char *data = phys_to_virt(element->addr) + offset; + char *data = dma64_to_virt(element->addr) + offset; skb_len -= data_len; offset += data_len; @@ -6226,7 +6252,7 @@ static int qeth_add_dbf_entry(struct qeth_card *card, char *name) new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); if (!new_entry) goto err_dbg; - strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); + strscpy(new_entry->dbf_name, name, sizeof(new_entry->dbf_name)); new_entry->dbf_info = card->debug; mutex_lock(&qeth_dbf_list_mutex); list_add(&new_entry->dbf_list, &qeth_dbf_list); @@ -7023,14 +7049,16 @@ int qeth_open(struct net_device *dev) card->data.state = CH_STATE_UP; netif_tx_start_all_queues(dev); - local_bh_disable(); qeth_for_each_output_queue(card, queue, i) { netif_napi_add_tx(dev, &queue->napi, qeth_tx_poll); napi_enable(&queue->napi); - napi_schedule(&queue->napi); } - napi_enable(&card->napi); + + local_bh_disable(); + qeth_for_each_output_queue(card, queue, i) { + napi_schedule(&queue->napi); + } napi_schedule(&card->napi); /* kick-start the NAPI softirq: */ local_bh_enable(); @@ -7059,7 +7087,7 @@ int qeth_stop(struct net_device *dev) netif_tx_disable(dev); qeth_for_each_output_queue(card, queue, i) { - del_timer_sync(&queue->timer); + timer_delete_sync(&queue->timer); /* Queues may get re-allocated, so remove the NAPIs. */ netif_napi_del(&queue->napi); } diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c index d9266f7d8187..1add124e033b 100644 --- a/drivers/s390/net/qeth_core_mpc.c +++ b/drivers/s390/net/qeth_core_mpc.c @@ -139,82 +139,237 @@ struct ipa_rc_msg { const char *msg; }; -static const struct ipa_rc_msg qeth_ipa_rc_msg[] = { +static const struct ipa_rc_msg qeth_ipa_rc_def_msg[] = { {IPA_RC_SUCCESS, "success"}, {IPA_RC_NOTSUPP, "Command not supported"}, - {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"}, - {IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"}, {IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"}, - {IPA_RC_VNICC_OOSEQ, "Command issued out of sequence"}, - {IPA_RC_INVALID_FORMAT, "invalid format or length"}, {IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"}, - {IPA_RC_SBP_IQD_NOT_CONFIGURED, "Not configured for bridgeport"}, {IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"}, {IPA_RC_UNREGISTERED_ADDR, "Address not registered"}, - {IPA_RC_NO_ID_AVAILABLE, "No identifiers available"}, {IPA_RC_ID_NOT_FOUND, "Identifier not found"}, - {IPA_RC_SBP_IQD_ANO_DEV_PRIMARY, "Primary bridgeport exists already"}, - {IPA_RC_SBP_IQD_CURRENT_SECOND, "Bridgeport is currently secondary"}, - {IPA_RC_SBP_IQD_LIMIT_SECOND, "Limit of secondary bridgeports reached"}, - {IPA_RC_INVALID_IP_VERSION, "IP version incorrect"}, - {IPA_RC_SBP_IQD_CURRENT_PRIMARY, "Bridgeport is currently primary"}, {IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"}, - {IPA_RC_SBP_IQD_NO_QDIO_QUEUES, "QDIO queues not established"}, {IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"}, - {IPA_RC_L2_DUP_MAC, "Duplicate MAC address"}, {IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"}, - {IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"}, - {IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"}, - {IPA_RC_L2_MAC_NOT_AUTH_BY_HYP, "L2 mac not authorized by hypervisor"}, {IPA_RC_L2_MAC_NOT_AUTH_BY_ADP, "L2 mac not authorized by adapter"}, - {IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"}, - {IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"}, - {IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"}, - {IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"}, - {IPA_RC_VNICC_VNICBP, "VNIC is BridgePort"}, - {IPA_RC_SBP_OSA_NOT_CONFIGURED, "Not configured for bridgeport"}, - {IPA_RC_SBP_OSA_OS_MISMATCH, "OS mismatch"}, - {IPA_RC_SBP_OSA_ANO_DEV_PRIMARY, "Primary bridgeport exists already"}, - {IPA_RC_SBP_OSA_CURRENT_SECOND, "Bridgeport is currently secondary"}, - {IPA_RC_SBP_OSA_LIMIT_SECOND, "Limit of secondary bridgeports reached"}, - {IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN, "Not authorized by zManager"}, - {IPA_RC_SBP_OSA_CURRENT_PRIMARY, "Bridgeport is currently primary"}, - {IPA_RC_SBP_OSA_NO_QDIO_QUEUES, "QDIO queues not established"}, {IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"}, {IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"}, {IPA_RC_INVALID_LANTYPE, "Invalid LAN type"}, {IPA_RC_INVALID_LANNUM, "Invalid LAN num"}, - {IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"}, - {IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"}, {IPA_RC_LAN_PORT_STATE_ERROR, "LAN port state error"}, {IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"}, {IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"}, - {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"}, - {IPA_RC_MC_ADDR_NOT_FOUND, "Multicast address not found"}, {IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"}, {IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"}, {IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"}, - {IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"}, - {IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"}, - {IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"}, - {IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"}, - {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"}, - {IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"}, {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"}, /* default for qeth_get_ipa_msg(): */ {IPA_RC_FFFF, "Unknown Error"} }; -const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc) +static const struct ipa_rc_msg qeth_ipa_rc_adp_parms_msg[] = { + {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"} +}; + +static const struct ipa_rc_msg qeth_ipa_rc_diag_ass_msg[] = { + {IPA_RC_INVALID_FORMAT, "invalid format or length"} +}; + +static const struct ipa_rc_msg qeth_ipa_rc_addr_msg[] = { + {IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"}, + {IPA_RC_NO_ID_AVAILABLE, "No identifiers available"}, + {IPA_RC_INVALID_IP_VERSION, "IP version incorrect"} +}; + +static const struct ipa_rc_msg qeth_ipa_rc_vnicc_msg[] = { + {IPA_RC_VNICC_OOSEQ, "Command issued out of sequence"}, + {IPA_RC_VNICC_VNICBP, "VNIC is BridgePort"} +}; + +static const struct ipa_rc_msg qeth_ipa_rc_sbp_iqd_msg[] = { + {IPA_RC_SBP_IQD_NOT_CONFIGURED, "Not configured for bridgeport"}, + {IPA_RC_SBP_IQD_OS_MISMATCH, "OS mismatch"}, + {IPA_RC_SBP_IQD_ANO_DEV_PRIMARY, "Primary bridgeport exists already"}, + {IPA_RC_SBP_IQD_CURRENT_SECOND, "Bridgeport is currently secondary"}, + {IPA_RC_SBP_IQD_LIMIT_SECOND, "Limit of secondary bridgeports reached"}, + {IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN, "Not authorized by zManager"}, + {IPA_RC_SBP_IQD_CURRENT_PRIMARY, "Bridgeport is currently primary"}, + {IPA_RC_SBP_IQD_NO_QDIO_QUEUES, "QDIO queues not established"} +}; + +static const struct ipa_rc_msg qeth_ipa_rc_sbp_osa_msg[] = { + {IPA_RC_SBP_OSA_NOT_CONFIGURED, "Not configured for bridgeport"}, + {IPA_RC_SBP_OSA_OS_MISMATCH, "OS mismatch"}, + {IPA_RC_SBP_OSA_ANO_DEV_PRIMARY, "Primary bridgeport exists already"}, + {IPA_RC_SBP_OSA_CURRENT_SECOND, "Bridgeport is currently secondary"}, + {IPA_RC_SBP_OSA_LIMIT_SECOND, "Limit of secondary bridgeports reached"}, + {IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN, "Not authorized by zManager"}, + {IPA_RC_SBP_OSA_CURRENT_PRIMARY, "Bridgeport is currently primary"}, + {IPA_RC_SBP_OSA_NO_QDIO_QUEUES, "QDIO queues not established"} +}; + +static const struct ipa_rc_msg qeth_ipa_rc_mac_msg[] = { + {IPA_RC_L2_DUP_MAC, "Duplicate MAC address"}, + {IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"}, + {IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"}, + {IPA_RC_L2_MAC_NOT_AUTH_BY_HYP, "L2 mac not authorized by hypervisor"}, + {IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"} +}; + +static const struct ipa_rc_msg qeth_ipa_rc_ip_msg[] = { + {IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"}, + {IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"}, + {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"}, + {IPA_RC_MC_ADDR_NOT_FOUND, "Multicast address not found"} +}; + +static const struct ipa_rc_msg qeth_ipa_rc_lan_msg[] = { + {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"}, + {IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"}, +}; + +static const struct ipa_rc_msg qeth_ipa_rc_vlan_msg[] = { + {IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"}, + {IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"}, + {IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"} +}; + +static const struct ipa_rc_msg qeth_ipa_rc_rtg_msg[] = { + {IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"}, + {IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"}, + {IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"}, + {IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"} +}; + +struct ipa_cmd_rc_map { + enum qeth_ipa_cmds cmd; + const struct ipa_rc_msg *msg_arr; + const size_t arr_len; +}; + +static const struct ipa_cmd_rc_map qeth_ipa_cmd_rc_map[] = { + { + .cmd = IPA_CMD_SETADAPTERPARMS, + .msg_arr = qeth_ipa_rc_adp_parms_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_adp_parms_msg) + }, + { + .cmd = IPA_CMD_SET_DIAG_ASS, + .msg_arr = qeth_ipa_rc_diag_ass_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_diag_ass_msg) + }, + { + .cmd = IPA_CMD_CREATE_ADDR, + .msg_arr = qeth_ipa_rc_addr_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_addr_msg) + }, + { + .cmd = IPA_CMD_DESTROY_ADDR, + .msg_arr = qeth_ipa_rc_addr_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_addr_msg) + }, + { + .cmd = IPA_CMD_VNICC, + .msg_arr = qeth_ipa_rc_vnicc_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_vnicc_msg) + }, + { + .cmd = IPA_CMD_SETBRIDGEPORT_IQD, + .msg_arr = qeth_ipa_rc_sbp_iqd_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_sbp_iqd_msg) + }, + { + .cmd = IPA_CMD_SETBRIDGEPORT_OSA, + .msg_arr = qeth_ipa_rc_sbp_osa_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_sbp_osa_msg) + }, + { + .cmd = IPA_CMD_SETVMAC, + .msg_arr = qeth_ipa_rc_mac_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_mac_msg) + }, + { + .cmd = IPA_CMD_DELVMAC, + .msg_arr = qeth_ipa_rc_mac_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_mac_msg) + }, + { + .cmd = IPA_CMD_SETGMAC, + .msg_arr = qeth_ipa_rc_mac_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_mac_msg) + }, + { + .cmd = IPA_CMD_DELGMAC, + .msg_arr = qeth_ipa_rc_mac_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_mac_msg) + }, + { + .cmd = IPA_CMD_SETIP, + .msg_arr = qeth_ipa_rc_ip_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_ip_msg) + }, + { + .cmd = IPA_CMD_SETIPM, + .msg_arr = qeth_ipa_rc_ip_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_ip_msg) + }, + { + .cmd = IPA_CMD_DELIPM, + .msg_arr = qeth_ipa_rc_ip_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_ip_msg) + }, + { + .cmd = IPA_CMD_STARTLAN, + .msg_arr = qeth_ipa_rc_lan_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_lan_msg) + }, + { + .cmd = IPA_CMD_STOPLAN, + .msg_arr = qeth_ipa_rc_lan_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_lan_msg) + }, + { + .cmd = IPA_CMD_SETVLAN, + .msg_arr = qeth_ipa_rc_vlan_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_vlan_msg) + }, + { + .cmd = IPA_CMD_DELVLAN, + .msg_arr = qeth_ipa_rc_vlan_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_vlan_msg) + }, + { + .cmd = IPA_CMD_SETRTG, + .msg_arr = qeth_ipa_rc_rtg_msg, + .arr_len = ARRAY_SIZE(qeth_ipa_rc_rtg_msg) + } +}; + +const char *qeth_get_ipa_msg(enum qeth_ipa_cmds cmd, + enum qeth_ipa_return_codes rc) { int x; + const struct ipa_rc_msg *msg_arr = NULL; + size_t arr_len = 0; - for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++) - if (qeth_ipa_rc_msg[x].rc == rc) - return qeth_ipa_rc_msg[x].msg; - return qeth_ipa_rc_msg[x].msg; -} + for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_rc_map); x++) { + if (qeth_ipa_cmd_rc_map[x].cmd == cmd) { + msg_arr = qeth_ipa_cmd_rc_map[x].msg_arr; + arr_len = qeth_ipa_cmd_rc_map[x].arr_len; + break; + } + } + for (x = 0; x < arr_len; x++) { + if (msg_arr[x].rc == rc) + return msg_arr[x].msg; + } + + for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_def_msg) - 1; x++) { + if (qeth_ipa_rc_def_msg[x].rc == rc) + return qeth_ipa_rc_def_msg[x].msg; + } + return qeth_ipa_rc_def_msg[x].msg; +} struct ipa_cmd_names { enum qeth_ipa_cmds cmd; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 6257f00786b3..252fc84e6eca 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -159,13 +159,17 @@ enum qeth_ipa_return_codes { IPA_RC_SUCCESS = 0x0000, IPA_RC_NOTSUPP = 0x0001, IPA_RC_IP_TABLE_FULL = 0x0002, + IPA_RC_INVALID_SUBCMD = 0x0002, IPA_RC_UNKNOWN_ERROR = 0x0003, + IPA_RC_HARDWARE_AUTH_ERROR = 0x0003, IPA_RC_UNSUPPORTED_COMMAND = 0x0004, IPA_RC_TRACE_ALREADY_ACTIVE = 0x0005, + IPA_RC_VNICC_OOSEQ = 0x0005, IPA_RC_INVALID_FORMAT = 0x0006, IPA_RC_DUP_IPV6_REMOTE = 0x0008, IPA_RC_SBP_IQD_NOT_CONFIGURED = 0x000C, IPA_RC_DUP_IPV6_HOME = 0x0010, + IPA_RC_SBP_IQD_OS_MISMATCH = 0x0010, IPA_RC_UNREGISTERED_ADDR = 0x0011, IPA_RC_NO_ID_AVAILABLE = 0x0012, IPA_RC_ID_NOT_FOUND = 0x0013, @@ -173,6 +177,7 @@ enum qeth_ipa_return_codes { IPA_RC_SBP_IQD_CURRENT_SECOND = 0x0018, IPA_RC_SBP_IQD_LIMIT_SECOND = 0x001C, IPA_RC_INVALID_IP_VERSION = 0x0020, + IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN = 0x0020, IPA_RC_SBP_IQD_CURRENT_PRIMARY = 0x0024, IPA_RC_LAN_FRAME_MISMATCH = 0x0040, IPA_RC_SBP_IQD_NO_QDIO_QUEUES = 0x00EB, @@ -220,16 +225,6 @@ enum qeth_ipa_return_codes { IPA_RC_INVALID_IP_VERSION2 = 0xf001, IPA_RC_FFFF = 0xffff }; -/* for VNIC Characteristics */ -#define IPA_RC_VNICC_OOSEQ 0x0005 - -/* for SET_DIAGNOSTIC_ASSIST */ -#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL -#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR - -/* for SETBRIDGEPORT (double occupancies) */ -#define IPA_RC_SBP_IQD_OS_MISMATCH IPA_RC_DUP_IPV6_HOME -#define IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN IPA_RC_INVALID_IP_VERSION /* IPA function flags; each flag marks availability of respective function */ enum qeth_ipa_funcs { @@ -862,8 +857,9 @@ enum qeth_ipa_arp_return_codes { QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008, }; -extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); -extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); +const char *qeth_get_ipa_msg(enum qeth_ipa_cmds cmd, + enum qeth_ipa_return_codes rc); +const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); /* Helper functions */ #define IS_IPA_REPLY(cmd) ((cmd)->hdr.initiator == IPA_CMD_INITIATOR_HOST) diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index eea93f8f106f..a3b16d4d16fb 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -7,8 +7,7 @@ * Frank Blaschka <frank.blaschka@de.ibm.com> */ -#define KMSG_COMPONENT "qeth" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "qeth: " fmt #include <linux/list.h> #include <linux/rwsem.h> @@ -518,28 +517,32 @@ static ssize_t qeth_hw_trap_store(struct device *dev, if (qeth_card_hw_is_reachable(card)) state = 1; - if (sysfs_streq(buf, "arm") && !card->info.hwtrap) { - if (state) { + if (sysfs_streq(buf, "arm")) { + if (state && !card->info.hwtrap) { if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM); if (!rc) card->info.hwtrap = 1; - } else + } else { rc = -EINVAL; - } else + } + } else { card->info.hwtrap = 1; - } else if (sysfs_streq(buf, "disarm") && card->info.hwtrap) { - if (state) { + } + } else if (sysfs_streq(buf, "disarm")) { + if (state && card->info.hwtrap) { rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); if (!rc) card->info.hwtrap = 0; - } else + } else { card->info.hwtrap = 0; - } else if (sysfs_streq(buf, "trap") && state && card->info.hwtrap) + } + } else if (sysfs_streq(buf, "trap") && state && card->info.hwtrap) { rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_CAPTURE); - else + } else { rc = -EINVAL; + } mutex_unlock(&card->conf_mutex); return rc ? rc : count; diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c index c1caf7734c3e..d214a889cf4e 100644 --- a/drivers/s390/net/qeth_ethtool.c +++ b/drivers/s390/net/qeth_ethtool.c @@ -3,8 +3,7 @@ * Copyright IBM Corp. 2018 */ -#define KMSG_COMPONENT "qeth" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "qeth: " fmt #include <linux/ethtool.h> #include "qeth_core.h" @@ -247,7 +246,7 @@ static int qeth_set_channels(struct net_device *dev, } static int qeth_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct qeth_card *card = dev->ml_priv; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 75910c0bcc2b..7498a83b1f06 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -7,9 +7,9 @@ * Frank Blaschka <frank.blaschka@de.ibm.com> */ -#define KMSG_COMPONENT "qeth" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "qeth: " fmt +#include <linux/export.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> @@ -22,6 +22,7 @@ #include <linux/hash.h> #include <linux/hashtable.h> #include <net/switchdev.h> +#include <asm/machine.h> #include <asm/chsc.h> #include <asm/css_chars.h> #include <asm/setup.h> @@ -299,7 +300,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) QETH_CARD_TEXT(card, 2, "l2reqmac"); - if (MACHINE_IS_VM) { + if (machine_is_vm()) { rc = qeth_vm_request_mac(card); if (!rc) goto out; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index b92a32b4b114..027bc346232f 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -7,9 +7,9 @@ * Frank Blaschka <frank.blaschka@de.ibm.com> */ -#define KMSG_COMPONENT "qeth" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "qeth: " fmt +#include <linux/export.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/bitops.h> @@ -255,9 +255,10 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) if (!recover) { hash_del(&addr->hnode); kfree(addr); - continue; + } else { + /* prepare for recovery */ + addr->disp_flag = QETH_DISP_ADDR_ADD; } - addr->disp_flag = QETH_DISP_ADDR_ADD; } mutex_unlock(&card->ip_lock); @@ -278,9 +279,11 @@ static void qeth_l3_recover_ip(struct qeth_card *card) if (addr->disp_flag == QETH_DISP_ADDR_ADD) { rc = qeth_l3_register_addr_entry(card, addr); - if (!rc) { + if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) { + /* keep it in the records */ addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; } else { + /* bad address */ hash_del(&addr->hnode); kfree(addr); } diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index c84ec2fbf99b..3dadaacc42a6 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -7,12 +7,14 @@ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) */ +#include <linux/export.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/slab.h> #include <net/iucv/iucv.h> +#include <asm/machine.h> #include <asm/cpcmd.h> #include <asm/ebcdic.h> #include "smsgiucv.h" @@ -138,7 +140,7 @@ static int __init smsg_init(void) { int rc; - if (!MACHINE_IS_VM) { + if (!machine_is_vm()) { rc = -EPROTONOSUPPORT; goto out; } diff --git a/drivers/s390/net/smsgiucv_app.c b/drivers/s390/net/smsgiucv_app.c index 0a263999f7ae..1bd0370460cd 100644 --- a/drivers/s390/net/smsgiucv_app.c +++ b/drivers/s390/net/smsgiucv_app.c @@ -10,8 +10,7 @@ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> * */ -#define KMSG_COMPONENT "smsgiucv_app" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "smsgiucv_app: " fmt #include <linux/ctype.h> #include <linux/err.h> @@ -23,6 +22,7 @@ #include <linux/spinlock.h> #include <linux/workqueue.h> #include <net/iucv/iucv.h> +#include <asm/machine.h> #include "smsgiucv.h" /* prefix used for SMSG registration */ @@ -87,9 +87,10 @@ static struct smsg_app_event *smsg_app_event_alloc(const char *from, ev->envp[3] = NULL; /* setting up environment: sender, prefix name, and message text */ - snprintf(ev->envp[0], ENV_SENDER_LEN, ENV_SENDER_STR "%s", from); - snprintf(ev->envp[1], ENV_PREFIX_LEN, ENV_PREFIX_STR "%s", SMSG_PREFIX); - snprintf(ev->envp[2], ENV_TEXT_LEN(msg), ENV_TEXT_STR "%s", msg); + scnprintf(ev->envp[0], ENV_SENDER_LEN, ENV_SENDER_STR "%s", from); + scnprintf(ev->envp[1], ENV_PREFIX_LEN, ENV_PREFIX_STR "%s", + SMSG_PREFIX); + scnprintf(ev->envp[2], ENV_TEXT_LEN(msg), ENV_TEXT_STR "%s", msg); return ev; } @@ -153,28 +154,17 @@ static int __init smsgiucv_app_init(void) struct device_driver *smsgiucv_drv; int rc; - if (!MACHINE_IS_VM) + if (!machine_is_vm()) return -ENODEV; - smsg_app_dev = kzalloc(sizeof(*smsg_app_dev), GFP_KERNEL); - if (!smsg_app_dev) - return -ENOMEM; - smsgiucv_drv = driver_find(SMSGIUCV_DRV_NAME, &iucv_bus); - if (!smsgiucv_drv) { - kfree(smsg_app_dev); + if (!smsgiucv_drv) return -ENODEV; - } - rc = dev_set_name(smsg_app_dev, KMSG_COMPONENT); - if (rc) { - kfree(smsg_app_dev); - goto fail; - } - smsg_app_dev->bus = &iucv_bus; - smsg_app_dev->parent = iucv_root; - smsg_app_dev->release = (void (*)(struct device *)) kfree; - smsg_app_dev->driver = smsgiucv_drv; + smsg_app_dev = iucv_alloc_device(NULL, smsgiucv_drv, NULL, "smsgiucv_app"); + if (!smsg_app_dev) + return -ENOMEM; + rc = device_register(smsg_app_dev); if (rc) { put_device(smsg_app_dev); diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index df782646e856..01f927ae61b5 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -28,8 +28,7 @@ * Benjamin Block */ -#define KMSG_COMPONENT "zfcp" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zfcp: " fmt #include <linux/seq_file.h> #include <linux/slab.h> @@ -41,7 +40,7 @@ #define ZFCP_BUS_ID_SIZE 20 -MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); +MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("FCP HBA driver"); MODULE_LICENSE("GPL"); @@ -312,15 +311,13 @@ static void zfcp_print_sl(struct seq_file *m, struct service_level *sl) static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter) { - char name[TASK_COMM_LEN]; - - snprintf(name, sizeof(name), "zfcp_q_%s", - dev_name(&adapter->ccw_device->dev)); - adapter->work_queue = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + adapter->work_queue = + alloc_ordered_workqueue("zfcp_q_%s", WQ_MEM_RECLAIM, + dev_name(&adapter->ccw_device->dev)); + if (!adapter->work_queue) + return -ENOMEM; - if (adapter->work_queue) - return 0; - return -ENOMEM; + return 0; } static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter) @@ -518,12 +515,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, if (port) { put_device(&port->dev); retval = -EEXIST; - goto err_out; + goto err_put; } port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); if (!port) - goto err_out; + goto err_put; rwlock_init(&port->unit_list_lock); INIT_LIST_HEAD(&port->unit_list); @@ -546,7 +543,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { kfree(port); - goto err_out; + goto err_put; } retval = -EINVAL; @@ -563,7 +560,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, return port; -err_out: +err_put: zfcp_ccw_adapter_put(adapter); +err_out: return ERR_PTR(retval); } diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index bdf2cc1ea713..67cb947048c4 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -7,8 +7,7 @@ * Copyright IBM Corp. 2002, 2010 */ -#define KMSG_COMPONENT "zfcp" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zfcp: " fmt #include <linux/module.h> #include "zfcp_ext.h" diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index d904625afd40..6b5561c54e2f 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -7,8 +7,7 @@ * Copyright IBM Corp. 2002, 2023 */ -#define KMSG_COMPONENT "zfcp" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zfcp: " fmt #include <linux/module.h> #include <linux/ctype.h> diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 78d52a4c55f5..ec6c0e102119 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -7,8 +7,7 @@ * Copyright IBM Corp. 2002, 2020 */ -#define KMSG_COMPONENT "zfcp" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zfcp: " fmt #include <linux/kthread.h> #include <linux/bug.h> @@ -615,7 +614,7 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask) */ void zfcp_erp_timeout_handler(struct timer_list *t) { - struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); + struct zfcp_fsf_req *fsf_req = timer_container_of(fsf_req, t, timer); struct zfcp_erp_action *act; if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) @@ -629,7 +628,7 @@ void zfcp_erp_timeout_handler(struct timer_list *t) static void zfcp_erp_memwait_handler(struct timer_list *t) { - struct zfcp_erp_action *act = from_timer(act, t, timer); + struct zfcp_erp_action *act = timer_container_of(act, t, timer); zfcp_erp_notify(act, 0); } diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 4f0d0e55f0d4..78ca394e1195 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -7,8 +7,7 @@ * Copyright IBM Corp. 2008, 2017 */ -#define KMSG_COMPONENT "zfcp" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zfcp: " fmt #include <linux/types.h> #include <linux/slab.h> @@ -537,6 +536,11 @@ static void zfcp_fc_adisc_handler(void *data) /* port is still good, nothing to do */ out: atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); + /* + * port ref comes from get_device() in zfcp_fc_test_link() and + * work item zfcp_fc_link_test_work() passes ref via + * zfcp_fc_adisc() to here, if zfcp_fc_adisc() could send ADISC + */ put_device(&port->dev); kmem_cache_free(zfcp_fc_req_cache, fc_req); } @@ -603,7 +607,7 @@ void zfcp_fc_link_test_work(struct work_struct *work) retval = zfcp_fc_adisc(port); if (retval == 0) - return; + return; /* port ref passed to zfcp_fc_adisc(), no put here */ /* send of ADISC was not possible */ atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); @@ -900,8 +904,19 @@ static void zfcp_fc_rspn(struct zfcp_adapter *adapter, zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID, FC_SYMBOLIC_NAME_SIZE); hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost)); - len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost), - FC_SYMBOLIC_NAME_SIZE); + + BUILD_BUG_ON(sizeof(rspn_req->name) != + sizeof(fc_host_symbolic_name(shost))); + BUILD_BUG_ON(sizeof(rspn_req->name) != + type_max(typeof(rspn_req->rspn.fr_name_len)) + 1); + len = strscpy(rspn_req->name, fc_host_symbolic_name(shost), + sizeof(rspn_req->name)); + /* + * It should be impossible for this to truncate (see BUILD_BUG_ON() + * above), but be robust anyway. + */ + if (WARN_ON(len < 0)) + len = sizeof(rspn_req->name) - 1; rspn_req->rspn.fr_name_len = len; sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req)); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index ceed1b6f7cb6..9418086368c3 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -7,8 +7,7 @@ * Copyright IBM Corp. 2002, 2023 */ -#define KMSG_COMPONENT "zfcp" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zfcp: " fmt #include <linux/blktrace_api.h> #include <linux/jiffies.h> @@ -36,7 +35,7 @@ MODULE_PARM_DESC(ber_stop, static void zfcp_fsf_request_timeout_handler(struct timer_list *t) { - struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); + struct zfcp_fsf_req *fsf_req = timer_container_of(fsf_req, t, timer); struct zfcp_adapter *adapter = fsf_req->adapter; zfcp_qdio_siosl(adapter); @@ -458,7 +457,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) return; } - del_timer_sync(&req->timer); + timer_delete_sync(&req->timer); zfcp_fsf_protstatus_eval(req); zfcp_fsf_fsfstatus_eval(req); req->handler(req); @@ -891,7 +890,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); req->issued = get_tod_clock(); if (zfcp_qdio_send(qdio, &req->qdio_req)) { - del_timer_sync(&req->timer); + timer_delete_sync(&req->timer); /* lookup request again, list might have changed */ if (zfcp_reqlist_find_rm(adapter->req_list, req_id) == NULL) @@ -1218,7 +1217,7 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, /** * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) * @wka_port: pointer to zfcp WKA port to send CT/GS to - * @ct: pointer to struct zfcp_send_ct with data for request + * @ct: pointer to struct zfcp_fsf_ct_els with data for CT request * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req * @timeout: timeout that hardware should use, and a later software timeout */ @@ -1316,7 +1315,7 @@ skip_fsfstatus: * zfcp_fsf_send_els - initiate an ELS command (FC-FS) * @adapter: pointer to zfcp adapter * @d_id: N_Port_ID to send ELS to - * @els: pointer to struct zfcp_send_els with data for the command + * @els: pointer to struct zfcp_fsf_ct_els with data for the ELS command * @timeout: timeout that hardware should use, and a later software timeout */ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, @@ -2742,7 +2741,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { sbale = &sbal->element[idx]; - req_id = sbale->addr; + req_id = dma64_to_u64(sbale->addr); fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); if (!fsf_req) { diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index f54f506b02d6..e15a1eabe42d 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -7,8 +7,7 @@ * Copyright IBM Corp. 2002, 2020 */ -#define KMSG_COMPONENT "zfcp" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zfcp: " fmt #include <linux/lockdep.h> #include <linux/slab.h> @@ -102,7 +101,8 @@ static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet) static void zfcp_qdio_request_timer(struct timer_list *timer) { - struct zfcp_qdio *qdio = from_timer(qdio, timer, request_timer); + struct zfcp_qdio *qdio = timer_container_of(qdio, timer, + request_timer); tasklet_schedule(&qdio->request_tasklet); } @@ -125,7 +125,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *)); sbale = qdio->res_q[idx]->element; - req_id = sbale->addr; + req_id = dma64_to_u64(sbale->addr); scount = min(sbale->scount + 1, ZFCP_QDIO_MAX_SBALS_PER_REQ + 1); /* incl. signaling SBAL */ @@ -256,7 +256,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, q_req->sbal_number); return -EINVAL; } - sbale->addr = sg_phys(sg); + sbale->addr = u64_to_dma64(sg_phys(sg)); sbale->length = sg->length; } return 0; @@ -408,7 +408,7 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio) tasklet_disable(&qdio->irq_tasklet); tasklet_disable(&qdio->request_tasklet); - del_timer_sync(&qdio->request_timer); + timer_delete_sync(&qdio->request_timer); qdio_stop_irq(adapter->ccw_device); qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h index 90134d9b69a7..8f7d2ae94441 100644 --- a/drivers/s390/scsi/zfcp_qdio.h +++ b/drivers/s390/scsi/zfcp_qdio.h @@ -129,14 +129,14 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, % QDIO_MAX_BUFFERS_PER_Q; sbale = zfcp_qdio_sbale_req(qdio, q_req); - sbale->addr = req_id; + sbale->addr = u64_to_dma64(req_id); sbale->eflags = 0; sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype; if (unlikely(!data)) return; sbale++; - sbale->addr = virt_to_phys(data); + sbale->addr = virt_to_dma64(data); sbale->length = len; } @@ -159,7 +159,7 @@ void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1); q_req->sbale_curr++; sbale = zfcp_qdio_sbale_curr(qdio, q_req); - sbale->addr = virt_to_phys(data); + sbale->addr = virt_to_dma64(data); sbale->length = len; } diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index b2a8cd792266..141476ea21bb 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -7,8 +7,7 @@ * Copyright IBM Corp. 2002, 2020 */ -#define KMSG_COMPONENT "zfcp" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zfcp: " fmt #include <linux/module.h> #include <linux/types.h> @@ -37,11 +36,11 @@ static bool allow_lun_scan = true; module_param(allow_lun_scan, bool, 0600); MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs"); -static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) +static void zfcp_scsi_sdev_destroy(struct scsi_device *sdev) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); - /* if previous slave_alloc returned early, there is nothing to do */ + /* if previous sdev_init returned early, there is nothing to do */ if (!zfcp_sdev->port) return; @@ -49,7 +48,8 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) put_device(&zfcp_sdev->port->dev); } -static int zfcp_scsi_slave_configure(struct scsi_device *sdp) +static int zfcp_scsi_sdev_configure(struct scsi_device *sdp, + struct queue_limits *lim) { if (sdp->tagged_supported) scsi_change_queue_depth(sdp, default_depth); @@ -110,7 +110,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) return ret; } -static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) +static int zfcp_scsi_sdev_init(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct zfcp_adapter *adapter = @@ -427,9 +427,9 @@ static const struct scsi_host_template zfcp_scsi_host_template = { .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, - .slave_alloc = zfcp_scsi_slave_alloc, - .slave_configure = zfcp_scsi_slave_configure, - .slave_destroy = zfcp_scsi_slave_destroy, + .sdev_init = zfcp_scsi_sdev_init, + .sdev_configure = zfcp_scsi_sdev_configure, + .sdev_destroy = zfcp_scsi_sdev_destroy, .change_queue_depth = scsi_change_queue_depth, .host_reset = zfcp_scsi_sysfs_host_reset, .proc_name = "zfcp", diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index cb67fa80fb12..10a3840b2b6b 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -7,8 +7,7 @@ * Copyright IBM Corp. 2008, 2020 */ -#define KMSG_COMPONENT "zfcp" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "zfcp: " fmt #include <linux/slab.h> #include "zfcp_diag.h" @@ -24,7 +23,7 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ { \ struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ \ - return sprintf(buf, _format, _value); \ + return sysfs_emit(buf, _format, _value); \ } \ static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ zfcp_sysfs_##_feat##_##_name##_show, NULL); @@ -34,7 +33,7 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ struct device_attribute *at,\ char *buf) \ { \ - return sprintf(buf, _format, _value); \ + return sysfs_emit(buf, _format, _value); \ } \ static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ zfcp_sysfs_##_feat##_##_name##_show, NULL); @@ -51,7 +50,7 @@ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ if (!adapter) \ return -ENODEV; \ \ - i = sprintf(buf, _format, _value); \ + i = sysfs_emit(buf, _format, _value); \ zfcp_ccw_adapter_put(adapter); \ return i; \ } \ @@ -95,9 +94,9 @@ static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - return sprintf(buf, "1\n"); + return sysfs_emit(buf, "1\n"); - return sprintf(buf, "0\n"); + return sysfs_emit(buf, "0\n"); } static ssize_t zfcp_sysfs_port_failed_store(struct device *dev, @@ -135,7 +134,7 @@ static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev, scsi_device_put(sdev); } - return sprintf(buf, "%d\n", failed); + return sysfs_emit(buf, "%d\n", failed); } static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev, @@ -176,9 +175,9 @@ static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev, return -ENODEV; if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - i = sprintf(buf, "1\n"); + i = sysfs_emit(buf, "1\n"); else - i = sprintf(buf, "0\n"); + i = sysfs_emit(buf, "0\n"); zfcp_ccw_adapter_put(adapter); return i; @@ -284,7 +283,7 @@ static bool zfcp_sysfs_port_in_use(struct zfcp_port *const port) goto unlock_host_lock; } - /* port is about to be removed, so no more unit_add or slave_alloc */ + /* port is about to be removed, so no more unit_add or sdev_init */ zfcp_sysfs_port_set_removing(port); in_use = false; @@ -348,8 +347,7 @@ zfcp_sysfs_adapter_diag_max_age_show(struct device *dev, if (!adapter) return -ENODEV; - /* ceil(log(2^64 - 1) / log(10)) = 20 */ - rc = scnprintf(buf, 20 + 2, "%lu\n", adapter->diagnostics->max_age); + rc = sysfs_emit(buf, "%lu\n", adapter->diagnostics->max_age); zfcp_ccw_adapter_put(adapter); return rc; @@ -401,14 +399,14 @@ static ssize_t zfcp_sysfs_adapter_fc_security_show( */ status = atomic_read(&adapter->status); if (0 == (status & ZFCP_STATUS_COMMON_OPEN)) - i = sprintf(buf, "unknown\n"); + i = sysfs_emit(buf, "unknown\n"); else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY)) - i = sprintf(buf, "unsupported\n"); + i = sysfs_emit(buf, "unsupported\n"); else { i = zfcp_fsf_scnprint_fc_security( buf, PAGE_SIZE - 1, adapter->fc_security_algorithms, ZFCP_FSF_PRINT_FMT_LIST); - i += scnprintf(buf + i, PAGE_SIZE - i, "\n"); + i += sysfs_emit_at(buf, i, "\n"); } zfcp_ccw_adapter_put(adapter); @@ -450,6 +448,8 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun)) return -EINVAL; + flush_work(&port->rport_work); + retval = zfcp_unit_add(port, fcp_lun); if (retval) return retval; @@ -490,14 +490,14 @@ static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev, 0 != (status & ZFCP_STATUS_PORT_LINK_TEST) || 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) || 0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) - i = sprintf(buf, "unknown\n"); + i = sysfs_emit(buf, "unknown\n"); else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY)) - i = sprintf(buf, "unsupported\n"); + i = sysfs_emit(buf, "unsupported\n"); else { i = zfcp_fsf_scnprint_fc_security( buf, PAGE_SIZE - 1, port->connection_info, ZFCP_FSF_PRINT_FMT_SINGLEITEM); - i += scnprintf(buf + i, PAGE_SIZE - i, "\n"); + i += sysfs_emit_at(buf, i, "\n"); } return i; @@ -569,8 +569,8 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \ do_div(cmin, 1000); \ do_div(cmax, 1000); \ \ - return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \ - fmin, fmax, fsum, cmin, cmax, csum, cc); \ + return sysfs_emit(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \ + fmin, fmax, fsum, cmin, cmax, csum, cc); \ } \ static ssize_t \ zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \ @@ -610,8 +610,8 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \ struct scsi_device *sdev = to_scsi_device(dev); \ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ \ - return sprintf(buf, _format, _value); \ -} \ + return sysfs_emit(buf, _format, _value); \ +} \ static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", @@ -625,7 +625,7 @@ static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev, { struct scsi_device *sdev = to_scsi_device(dev); - return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev)); + return sysfs_emit(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev)); } static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL); @@ -641,7 +641,7 @@ static ssize_t zfcp_sysfs_scsi_zfcp_failed_show(struct device *dev, unsigned int status = atomic_read(&sdev_to_zfcp(sdev)->status); unsigned int failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0; - return sprintf(buf, "%d\n", failed); + return sysfs_emit(buf, "%d\n", failed); } static ssize_t zfcp_sysfs_scsi_zfcp_failed_store(struct device *dev, @@ -714,8 +714,8 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev, retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port); if (retval == 0 || retval == -EAGAIN) - retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, - qtcb_port->cb_util, qtcb_port->a_util); + retval = sysfs_emit(buf, "%u %u %u\n", qtcb_port->cp_util, + qtcb_port->cb_util, qtcb_port->a_util); kfree(qtcb_port); return retval; } @@ -758,7 +758,7 @@ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ if (retval) \ return retval; \ \ - return sprintf(buf, _format, ## _arg); \ + return sysfs_emit(buf, _format, ## _arg); \ } \ static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL); @@ -787,8 +787,8 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev, util = qdio->req_q_util; spin_unlock_bh(&qdio->stat_lock); - return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full), - (unsigned long long)util); + return sysfs_emit(buf, "%d %llu\n", atomic_read(&qdio->req_q_full), + (unsigned long long)util); } static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); @@ -843,8 +843,7 @@ static ssize_t zfcp_sysfs_adapter_diag_b2b_credit_show( .data.nport_serv_param - sizeof(u32)); - rc = scnprintf(buf, 5 + 2, "%hu\n", - be16_to_cpu(nsp->fl_csp.sp_bb_cred)); + rc = sysfs_emit(buf, "%hu\n", be16_to_cpu(nsp->fl_csp.sp_bb_cred)); spin_unlock_irqrestore(&diag_hdr->access_lock, flags); out: @@ -854,7 +853,7 @@ out: static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400, zfcp_sysfs_adapter_diag_b2b_credit_show, NULL); -#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtsize, _prtfmt) \ +#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtfmt) \ static ssize_t zfcp_sysfs_adapter_diag_sfp_##_name##_show( \ struct device *dev, struct device_attribute *attr, char *buf) \ { \ @@ -887,8 +886,8 @@ static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400, goto out; \ \ spin_lock_irqsave(&diag_hdr->access_lock, flags); \ - rc = scnprintf( \ - buf, (_prtsize) + 2, _prtfmt "\n", \ + rc = sysfs_emit( \ + buf, _prtfmt "\n", \ adapter->diagnostics->port_data.data._qtcb_member); \ spin_unlock_irqrestore(&diag_hdr->access_lock, flags); \ \ @@ -899,16 +898,16 @@ static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400, static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \ zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL) -ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 6, "%hd"); -ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu"); -ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu"); -ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu"); -ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, 5, "%hu"); -ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, 2, "%hu"); -ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, 1, "%hu"); -ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, 1, "%hu"); -ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, 1, "%hu"); -ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, 1, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, "%hd"); +ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, "%hu"); static struct attribute *zfcp_sysfs_diag_attrs[] = { &dev_attr_adapter_diag_sfp_temperature.attr, diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c index 60f2a04f0869..4ef2a635d34f 100644 --- a/drivers/s390/scsi/zfcp_unit.c +++ b/drivers/s390/scsi/zfcp_unit.c @@ -170,7 +170,7 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) write_unlock_irq(&port->unit_list_lock); /* * lock order: shost->scan_mutex before zfcp_sysfs_port_units_mutex - * due to zfcp_unit_scsi_scan() => zfcp_scsi_slave_alloc() + * due to zfcp_unit_scsi_scan() => zfcp_scsi_sdev_init() */ mutex_unlock(&zfcp_sysfs_port_units_mutex); diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 02922768b129..4904b831c0a7 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -58,6 +58,8 @@ struct virtio_ccw_device { struct virtio_device vdev; __u8 config[VIRTIO_CCW_CONFIG_SIZE]; struct ccw_device *cdev; + /* we make cdev->dev.dma_parms point to this */ + struct device_dma_parameters dma_parms; __u32 curr_io; int err; unsigned int revision; /* Transport revision */ @@ -72,6 +74,7 @@ struct virtio_ccw_device { unsigned int config_ready; void *airq_info; struct vcdev_dma_area *dma_area; + dma32_t dma_area_addr; }; static inline unsigned long *indicators(struct virtio_ccw_device *vcdev) @@ -84,20 +87,50 @@ static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev) return &vcdev->dma_area->indicators2; } +/* Spec stipulates a 64 bit address */ +static inline dma64_t indicators_dma(struct virtio_ccw_device *vcdev) +{ + u64 dma_area_addr = dma32_to_u32(vcdev->dma_area_addr); + + return dma64_add(u64_to_dma64(dma_area_addr), + offsetof(struct vcdev_dma_area, indicators)); +} + +/* Spec stipulates a 64 bit address */ +static inline dma64_t indicators2_dma(struct virtio_ccw_device *vcdev) +{ + u64 dma_area_addr = dma32_to_u32(vcdev->dma_area_addr); + + return dma64_add(u64_to_dma64(dma_area_addr), + offsetof(struct vcdev_dma_area, indicators2)); +} + +static inline dma32_t config_block_dma(struct virtio_ccw_device *vcdev) +{ + return dma32_add(vcdev->dma_area_addr, + offsetof(struct vcdev_dma_area, config_block)); +} + +static inline dma32_t status_dma(struct virtio_ccw_device *vcdev) +{ + return dma32_add(vcdev->dma_area_addr, + offsetof(struct vcdev_dma_area, status)); +} + struct vq_info_block_legacy { - __u64 queue; + dma64_t queue; __u32 align; __u16 index; __u16 num; } __packed; struct vq_info_block { - __u64 desc; + dma64_t desc; __u32 res0; __u16 index; __u16 num; - __u64 avail; - __u64 used; + dma64_t avail; + dma64_t used; } __packed; struct virtio_feature_desc { @@ -106,8 +139,8 @@ struct virtio_feature_desc { } __packed; struct virtio_thinint_area { - unsigned long summary_indicator; - unsigned long indicator; + dma64_t summary_indicator; + dma64_t indicator; u64 bit_nr; u8 isc; } __packed; @@ -123,6 +156,7 @@ struct virtio_rev_info { struct virtio_ccw_vq_info { struct virtqueue *vq; + dma32_t info_block_addr; int num; union { struct vq_info_block s; @@ -156,6 +190,11 @@ static inline u8 *get_summary_indicator(struct airq_info *info) return summary_indicators + info->summary_indicator_idx; } +static inline dma64_t get_summary_indicator_dma(struct airq_info *info) +{ + return virt_to_dma64(get_summary_indicator(info)); +} + #define CCW_CMD_SET_VQ 0x13 #define CCW_CMD_VDEV_RESET 0x33 #define CCW_CMD_SET_IND 0x43 @@ -250,7 +289,6 @@ static struct airq_info *new_airq_info(int index) info->airq.handler = virtio_airq_handler; info->summary_indicator_idx = index; info->airq.lsi_ptr = get_summary_indicator(info); - info->airq.lsi_mask = 0xff; info->airq.isc = VIRTIO_AIRQ_ISC; rc = register_adapter_interrupt(&info->airq); if (rc) { @@ -261,14 +299,20 @@ static struct airq_info *new_airq_info(int index) return info; } -static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, - u64 *first, void **airq_info) +static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs, + u64 *first, void **airq_info) { - int i, j; + int i, j, queue_idx, highest_queue_idx = -1; struct airq_info *info; - unsigned long indicator_addr = 0; + unsigned long *indicator_addr = NULL; unsigned long bit, flags; + /* Array entries without an actual queue pointer must be ignored. */ + for (i = 0; i < nvqs; i++) { + if (vqs[i]) + highest_queue_idx++; + } + for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { mutex_lock(&airq_areas_lock); if (!airq_areas[i]) @@ -276,9 +320,9 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, info = airq_areas[i]; mutex_unlock(&airq_areas_lock); if (!info) - return 0; + return NULL; write_lock_irqsave(&info->lock, flags); - bit = airq_iv_alloc(info->aiv, nvqs); + bit = airq_iv_alloc(info->aiv, highest_queue_idx + 1); if (bit == -1UL) { /* Not enough vacancies. */ write_unlock_irqrestore(&info->lock, flags); @@ -286,9 +330,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, } *first = bit; *airq_info = info; - indicator_addr = (unsigned long)info->aiv->vector; - for (j = 0; j < nvqs; j++) { - airq_iv_set_ptr(info->aiv, bit + j, + indicator_addr = info->aiv->vector; + for (j = 0, queue_idx = 0; j < nvqs; j++) { + if (!vqs[j]) + continue; + airq_iv_set_ptr(info->aiv, bit + queue_idx++, (unsigned long)vqs[j]); } write_unlock_irqrestore(&info->lock, flags); @@ -349,31 +395,31 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, struct ccw1 *ccw) { int ret; - unsigned long *indicatorp = NULL; struct virtio_thinint_area *thinint_area = NULL; struct airq_info *airq_info = vcdev->airq_info; + dma64_t *indicatorp = NULL; if (vcdev->is_thinint) { thinint_area = ccw_device_dma_zalloc(vcdev->cdev, - sizeof(*thinint_area)); + sizeof(*thinint_area), + &ccw->cda); if (!thinint_area) return; thinint_area->summary_indicator = - (unsigned long) get_summary_indicator(airq_info); + get_summary_indicator_dma(airq_info); thinint_area->isc = VIRTIO_AIRQ_ISC; ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; ccw->count = sizeof(*thinint_area); - ccw->cda = (__u32)virt_to_phys(thinint_area); } else { /* payload is the address of the indicators */ indicatorp = ccw_device_dma_zalloc(vcdev->cdev, - sizeof(indicators(vcdev))); + sizeof(*indicatorp), + &ccw->cda); if (!indicatorp) return; *indicatorp = 0; ccw->cmd_code = CCW_CMD_SET_IND; - ccw->count = sizeof(indicators(vcdev)); - ccw->cda = (__u32)virt_to_phys(indicatorp); + ccw->count = sizeof(*indicatorp); } /* Deregister indicators from host. */ *indicators(vcdev) = 0; @@ -387,7 +433,7 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, "Failed to deregister indicators (%d)\n", ret); else if (vcdev->is_thinint) virtio_ccw_drop_indicators(vcdev); - ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev))); + ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(*indicatorp)); ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area)); } @@ -427,7 +473,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, ccw->cmd_code = CCW_CMD_READ_VQ_CONF; ccw->flags = 0; ccw->count = sizeof(struct vq_config_block); - ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->config_block); + ccw->cda = config_block_dma(vcdev); ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); if (ret) return ret; @@ -464,7 +510,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) } ccw->cmd_code = CCW_CMD_SET_VQ; ccw->flags = 0; - ccw->cda = (__u32)virt_to_phys(info->info_block); + ccw->cda = info->info_block_addr; ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | index); /* @@ -487,7 +533,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev) struct ccw1 *ccw; struct virtio_ccw_device *vcdev = to_vc_device(vdev); - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); if (!ccw) return; @@ -526,7 +572,8 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, goto out_err; } info->info_block = ccw_device_dma_zalloc(vcdev->cdev, - sizeof(*info->info_block)); + sizeof(*info->info_block), + &info->info_block_addr); if (!info->info_block) { dev_warn(&vcdev->cdev->dev, "no info block\n"); err = -ENOMEM; @@ -557,22 +604,22 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, /* Register it with the host. */ queue = virtqueue_get_desc_addr(vq); if (vcdev->revision == 0) { - info->info_block->l.queue = queue; + info->info_block->l.queue = u64_to_dma64(queue); info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN; info->info_block->l.index = i; info->info_block->l.num = info->num; ccw->count = sizeof(info->info_block->l); } else { - info->info_block->s.desc = queue; + info->info_block->s.desc = u64_to_dma64(queue); info->info_block->s.index = i; info->info_block->s.num = info->num; - info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq); - info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq); + info->info_block->s.avail = u64_to_dma64(virtqueue_get_avail_addr(vq)); + info->info_block->s.used = u64_to_dma64(virtqueue_get_used_addr(vq)); ccw->count = sizeof(info->info_block->s); } ccw->cmd_code = CCW_CMD_SET_VQ; ccw->flags = 0; - ccw->cda = (__u32)virt_to_phys(info->info_block); + ccw->cda = info->info_block_addr; err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i); if (err) { dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n"); @@ -606,11 +653,12 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, { int ret; struct virtio_thinint_area *thinint_area = NULL; - unsigned long indicator_addr; + unsigned long *indicator_addr; struct airq_info *info; thinint_area = ccw_device_dma_zalloc(vcdev->cdev, - sizeof(*thinint_area)); + sizeof(*thinint_area), + &ccw->cda); if (!thinint_area) { ret = -ENOMEM; goto out; @@ -623,15 +671,13 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, ret = -ENOSPC; goto out; } - thinint_area->indicator = virt_to_phys((void *)indicator_addr); + thinint_area->indicator = virt_to_dma64(indicator_addr); info = vcdev->airq_info; - thinint_area->summary_indicator = - virt_to_phys(get_summary_indicator(info)); + thinint_area->summary_indicator = get_summary_indicator_dma(info); thinint_area->isc = VIRTIO_AIRQ_ISC; ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; ccw->flags = CCW_FLAG_SLI; ccw->count = sizeof(*thinint_area); - ccw->cda = (__u32)virt_to_phys(thinint_area); ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER); if (ret) { if (ret == -EOPNOTSUPP) { @@ -653,29 +699,29 @@ out: static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[], - const bool *ctx, + struct virtqueue_info vqs_info[], struct irq_affinity *desc) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); - unsigned long *indicatorp = NULL; + dma64_t *indicatorp = NULL; int ret, i, queue_idx = 0; struct ccw1 *ccw; + dma32_t indicatorp_dma = 0; - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); if (!ccw) return -ENOMEM; for (i = 0; i < nvqs; ++i) { - if (!names[i]) { + struct virtqueue_info *vqi = &vqs_info[i]; + + if (!vqi->name) { vqs[i] = NULL; continue; } - vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i], - names[i], ctx ? ctx[i] : false, - ccw); + vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, vqi->callback, + vqi->name, vqi->ctx, ccw); if (IS_ERR(vqs[i])) { ret = PTR_ERR(vqs[i]); vqs[i] = NULL; @@ -688,47 +734,47 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, * the address of the indicators. */ indicatorp = ccw_device_dma_zalloc(vcdev->cdev, - sizeof(indicators(vcdev))); + sizeof(*indicatorp), + &indicatorp_dma); if (!indicatorp) goto out; - *indicatorp = (unsigned long) indicators(vcdev); + *indicatorp = indicators_dma(vcdev); if (vcdev->is_thinint) { ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw); if (ret) /* no error, just fall back to legacy interrupts */ vcdev->is_thinint = false; } + ccw->cda = indicatorp_dma; if (!vcdev->is_thinint) { /* Register queue indicators with host. */ *indicators(vcdev) = 0; ccw->cmd_code = CCW_CMD_SET_IND; ccw->flags = 0; - ccw->count = sizeof(indicators(vcdev)); - ccw->cda = (__u32)virt_to_phys(indicatorp); + ccw->count = sizeof(*indicatorp); ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND); if (ret) goto out; } /* Register indicators2 with host for config changes */ - *indicatorp = (unsigned long) indicators2(vcdev); + *indicatorp = indicators2_dma(vcdev); *indicators2(vcdev) = 0; ccw->cmd_code = CCW_CMD_SET_CONF_IND; ccw->flags = 0; - ccw->count = sizeof(indicators2(vcdev)); - ccw->cda = (__u32)virt_to_phys(indicatorp); + ccw->count = sizeof(*indicatorp); ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND); if (ret) goto out; if (indicatorp) ccw_device_dma_free(vcdev->cdev, indicatorp, - sizeof(indicators(vcdev))); + sizeof(*indicatorp)); ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); return 0; out: if (indicatorp) ccw_device_dma_free(vcdev->cdev, indicatorp, - sizeof(indicators(vcdev))); + sizeof(*indicatorp)); ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); virtio_ccw_del_vqs(vdev); return ret; @@ -739,7 +785,7 @@ static void virtio_ccw_reset(struct virtio_device *vdev) struct virtio_ccw_device *vcdev = to_vc_device(vdev); struct ccw1 *ccw; - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); if (!ccw) return; @@ -763,11 +809,12 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev) u64 rc; struct ccw1 *ccw; - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); if (!ccw) return 0; - features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features)); + features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features), + &ccw->cda); if (!features) { rc = 0; goto out_free; @@ -777,7 +824,6 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev) ccw->cmd_code = CCW_CMD_READ_FEAT; ccw->flags = 0; ccw->count = sizeof(*features); - ccw->cda = (__u32)virt_to_phys(features); ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); if (ret) { rc = 0; @@ -794,7 +840,6 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev) ccw->cmd_code = CCW_CMD_READ_FEAT; ccw->flags = 0; ccw->count = sizeof(*features); - ccw->cda = (__u32)virt_to_phys(features); ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); if (ret == 0) rc |= (u64)le32_to_cpu(features->features) << 32; @@ -826,11 +871,12 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev) return -EINVAL; } - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); if (!ccw) return -ENOMEM; - features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features)); + features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features), + &ccw->cda); if (!features) { ret = -ENOMEM; goto out_free; @@ -847,7 +893,6 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev) ccw->cmd_code = CCW_CMD_WRITE_FEAT; ccw->flags = 0; ccw->count = sizeof(*features); - ccw->cda = (__u32)virt_to_phys(features); ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); if (ret) goto out_free; @@ -861,7 +906,6 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev) ccw->cmd_code = CCW_CMD_WRITE_FEAT; ccw->flags = 0; ccw->count = sizeof(*features); - ccw->cda = (__u32)virt_to_phys(features); ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); out_free: @@ -880,12 +924,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, void *config_area; unsigned long flags; - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); if (!ccw) return; config_area = ccw_device_dma_zalloc(vcdev->cdev, - VIRTIO_CCW_CONFIG_SIZE); + VIRTIO_CCW_CONFIG_SIZE, + &ccw->cda); if (!config_area) goto out_free; @@ -893,7 +938,6 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, ccw->cmd_code = CCW_CMD_READ_CONF; ccw->flags = 0; ccw->count = offset + len; - ccw->cda = (__u32)virt_to_phys(config_area); ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG); if (ret) goto out_free; @@ -920,12 +964,13 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, void *config_area; unsigned long flags; - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); if (!ccw) return; config_area = ccw_device_dma_zalloc(vcdev->cdev, - VIRTIO_CCW_CONFIG_SIZE); + VIRTIO_CCW_CONFIG_SIZE, + &ccw->cda); if (!config_area) goto out_free; @@ -940,7 +985,6 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, ccw->cmd_code = CCW_CMD_WRITE_CONF; ccw->flags = 0; ccw->count = offset + len; - ccw->cda = (__u32)virt_to_phys(config_area); ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG); out_free: @@ -957,14 +1001,14 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev) if (vcdev->revision < 2) return vcdev->dma_area->status; - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); if (!ccw) return old_status; ccw->cmd_code = CCW_CMD_READ_STATUS; ccw->flags = 0; ccw->count = sizeof(vcdev->dma_area->status); - ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->status); + ccw->cda = status_dma(vcdev); ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS); /* * If the channel program failed (should only happen if the device @@ -984,7 +1028,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) struct ccw1 *ccw; int ret; - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); if (!ccw) return; @@ -993,11 +1037,11 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) ccw->cmd_code = CCW_CMD_WRITE_STATUS; ccw->flags = 0; ccw->count = sizeof(status); - ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->status); /* We use ssch for setting the status which is a serializing * instruction that guarantees the memory writes have * completed before ssch. */ + ccw->cda = status_dma(vcdev); ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS); /* Write failed? We assume status is unchanged. */ if (ret) @@ -1269,6 +1313,7 @@ static int virtio_ccw_offline(struct ccw_device *cdev) unregister_virtio_device(&vcdev->vdev); spin_lock_irqsave(get_ccwdev_lock(cdev), flags); dev_set_drvdata(&cdev->dev, NULL); + cdev->dev.dma_parms = NULL; spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); return 0; } @@ -1279,10 +1324,10 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev) struct ccw1 *ccw; int ret; - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); if (!ccw) return -ENOMEM; - rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev)); + rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev), &ccw->cda); if (!rev) { ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); return -ENOMEM; @@ -1292,7 +1337,6 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev) ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV; ccw->flags = 0; ccw->count = sizeof(*rev); - ccw->cda = (__u32)virt_to_phys(rev); vcdev->revision = VIRTIO_CCW_REV_MAX; do { @@ -1333,8 +1377,10 @@ static int virtio_ccw_online(struct ccw_device *cdev) } vcdev->vdev.dev.parent = &cdev->dev; vcdev->cdev = cdev; + cdev->dev.dma_parms = &vcdev->dma_parms; vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev, - sizeof(*vcdev->dma_area)); + sizeof(*vcdev->dma_area), + &vcdev->dma_area_addr); if (!vcdev->dma_area) { ret = -ENOMEM; goto out_free; |
