diff options
Diffstat (limited to 'drivers/s390/cio/device.c')
| -rw-r--r-- | drivers/s390/cio/device.c | 633 |
1 files changed, 222 insertions, 411 deletions
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 1540229a37bb..602f36102c7c 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -8,8 +8,7 @@ * Martin Schwidefsky (schwidefsky@de.ibm.com) */ -#define KMSG_COMPONENT "cio" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "cio: " fmt #include <linux/export.h> #include <linux/init.h> @@ -24,6 +23,7 @@ #include <linux/timer.h> #include <linux/kernel_stat.h> #include <linux/sched/signal.h> +#include <linux/dma-mapping.h> #include <asm/ccwdev.h> #include <asm/cio.h> @@ -48,7 +48,7 @@ static const unsigned long recovery_delay[] = { 3, 30, 300 }; static atomic_t ccw_device_init_count = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); -static struct bus_type ccw_bus_type; +static const struct bus_type ccw_bus_type; /******************* bus type handling ***********************/ @@ -57,10 +57,10 @@ static struct bus_type ccw_bus_type; * subsystem driver and one channel system per machine, but * we still use the abstraction. T.R. says it's a good idea. */ static int -ccw_bus_match (struct device * dev, struct device_driver * drv) +ccw_bus_match (struct device * dev, const struct device_driver * drv) { struct ccw_device *cdev = to_ccwdev(dev); - struct ccw_driver *cdrv = to_ccwdrv(drv); + const struct ccw_driver *cdrv = to_ccwdrv(drv); const struct ccw_device_id *ids = cdrv->ids, *found; if (!ids) @@ -79,7 +79,7 @@ ccw_bus_match (struct device * dev, struct device_driver * drv) * specified size. Return length of resulting string (excluding trailing '\0') * even if string doesn't fit buffer (snprintf semantics). */ static int snprint_alias(char *buf, size_t size, - struct ccw_device_id *id, const char *suffix) + const struct ccw_device_id *id, const char *suffix) { int len; @@ -100,10 +100,10 @@ static int snprint_alias(char *buf, size_t size, /* Set up environment variables for ccw device uevent. Return 0 on success, * non-zero otherwise. */ -static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) +static int ccw_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct ccw_device *cdev = to_ccwdev(dev); - struct ccw_device_id *id = &(cdev->id); + const struct ccw_device *cdev = to_ccwdev(dev); + const struct ccw_device_id *id = &(cdev->id); int ret; char modalias_buf[30]; @@ -136,7 +136,7 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) static void io_subchannel_irq(struct subchannel *); static int io_subchannel_probe(struct subchannel *); -static int io_subchannel_remove(struct subchannel *); +static void io_subchannel_remove(struct subchannel *); static void io_subchannel_shutdown(struct subchannel *); static int io_subchannel_sch_event(struct subchannel *, int); static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, @@ -148,19 +148,6 @@ static struct css_device_id io_subchannel_ids[] = { { /* end of list */ }, }; -static int io_subchannel_prepare(struct subchannel *sch) -{ - struct ccw_device *cdev; - /* - * Don't allow suspend while a ccw device registration - * is still outstanding. - */ - cdev = sch_get_cdev(sch); - if (cdev && !device_is_registered(&cdev->dev)) - return -EAGAIN; - return 0; -} - static int io_subchannel_settle(void) { int ret; @@ -185,7 +172,6 @@ static struct css_driver io_subchannel_driver = { .probe = io_subchannel_probe, .remove = io_subchannel_remove, .shutdown = io_subchannel_shutdown, - .prepare = io_subchannel_prepare, .settle = io_subchannel_settle, }; @@ -214,10 +200,9 @@ devtype_show (struct device *dev, struct device_attribute *attr, char *buf) struct ccw_device_id *id = &(cdev->id); if (id->dev_type != 0) - return sprintf(buf, "%04x/%02x\n", - id->dev_type, id->dev_model); + return sysfs_emit(buf, "%04x/%02x\n", id->dev_type, id->dev_model); else - return sprintf(buf, "n/a\n"); + return sysfs_emit(buf, "n/a\n"); } static ssize_t @@ -226,8 +211,7 @@ cutype_show (struct device *dev, struct device_attribute *attr, char *buf) struct ccw_device *cdev = to_ccwdev(dev); struct ccw_device_id *id = &(cdev->id); - return sprintf(buf, "%04x/%02x\n", - id->cu_type, id->cu_model); + return sysfs_emit(buf, "%04x/%02x\n", id->cu_type, id->cu_model); } static ssize_t @@ -247,7 +231,7 @@ online_show (struct device *dev, struct device_attribute *attr, char *buf) { struct ccw_device *cdev = to_ccwdev(dev); - return sprintf(buf, cdev->online ? "1\n" : "0\n"); + return sysfs_emit(buf, cdev->online ? "1\n" : "0\n"); } int ccw_device_is_orphan(struct ccw_device *cdev) @@ -257,10 +241,13 @@ int ccw_device_is_orphan(struct ccw_device *cdev) static void ccw_device_unregister(struct ccw_device *cdev) { + mutex_lock(&cdev->reg_mutex); if (device_is_registered(&cdev->dev)) { /* Undo device_add(). */ device_del(&cdev->dev); } + mutex_unlock(&cdev->reg_mutex); + if (cdev->private->flags.initialized) { cdev->private->flags.initialized = 0; /* Release reference from device_initialize(). */ @@ -373,10 +360,8 @@ int ccw_device_set_online(struct ccw_device *cdev) spin_lock_irq(cdev->ccwlock); ret = ccw_device_online(cdev); - spin_unlock_irq(cdev->ccwlock); - if (ret == 0) - wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); - else { + if (ret) { + spin_unlock_irq(cdev->ccwlock); CIO_MSG_EVENT(0, "ccw_device_online returned %d, " "device 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, @@ -385,7 +370,12 @@ int ccw_device_set_online(struct ccw_device *cdev) put_device(&cdev->dev); return ret; } - spin_lock_irq(cdev->ccwlock); + /* Wait until a final state is reached */ + while (!dev_fsm_final_state(cdev)) { + spin_unlock_irq(cdev->ccwlock); + wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); + spin_lock_irq(cdev->ccwlock); + } /* Check if online processing was successful */ if ((cdev->private->state != DEV_STATE_ONLINE) && (cdev->private->state != DEV_STATE_W4SENSE)) { @@ -553,21 +543,21 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf) struct subchannel *sch; if (ccw_device_is_orphan(cdev)) - return sprintf(buf, "no device\n"); + return sysfs_emit(buf, "no device\n"); switch (cdev->private->state) { case DEV_STATE_BOXED: - return sprintf(buf, "boxed\n"); + return sysfs_emit(buf, "boxed\n"); case DEV_STATE_DISCONNECTED: case DEV_STATE_DISCONNECTED_SENSE_ID: case DEV_STATE_NOT_OPER: sch = to_subchannel(dev->parent); if (!sch->lpm) - return sprintf(buf, "no path\n"); + return sysfs_emit(buf, "no path\n"); else - return sprintf(buf, "no device\n"); + return sysfs_emit(buf, "no device\n"); default: /* All other states considered fine. */ - return sprintf(buf, "good\n"); + return sysfs_emit(buf, "good\n"); } } @@ -594,7 +584,7 @@ static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, { struct subchannel *sch = to_subchannel(dev); - return sprintf(buf, "%02x\n", sch->vpm); + return sysfs_emit(buf, "%02x\n", sch->vpm); } static DEVICE_ATTR_RO(devtype); @@ -634,18 +624,10 @@ static const struct attribute_group *ccwdev_attr_groups[] = { NULL, }; -static int ccw_device_add(struct ccw_device *cdev) -{ - struct device *dev = &cdev->dev; - - dev->bus = &ccw_bus_type; - return device_add(dev); -} - -static int match_dev_id(struct device *dev, void *data) +static int match_dev_id(struct device *dev, const void *data) { struct ccw_device *cdev = to_ccwdev(dev); - struct ccw_dev_id *dev_id = data; + struct ccw_dev_id *dev_id = (void *)data; return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); } @@ -674,11 +656,13 @@ static void ccw_device_do_unbind_bind(struct ccw_device *cdev) { int ret; + mutex_lock(&cdev->reg_mutex); if (device_is_registered(&cdev->dev)) { device_release_driver(&cdev->dev); ret = device_attach(&cdev->dev); WARN_ON(ret == -ENODEV); } + mutex_unlock(&cdev->reg_mutex); } static void @@ -687,6 +671,9 @@ ccw_device_release(struct device *dev) struct ccw_device *cdev; cdev = to_ccwdev(dev); + cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area, + sizeof(*cdev->private->dma_area)); + cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev); /* Release reference of parent subchannel. */ put_device(cdev->dev.parent); kfree(cdev->private); @@ -696,16 +683,48 @@ ccw_device_release(struct device *dev) static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) { struct ccw_device *cdev; + struct gen_pool *dma_pool; + int ret; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); - if (cdev) { - cdev->private = kzalloc(sizeof(struct ccw_device_private), - GFP_KERNEL | GFP_DMA); - if (cdev->private) - return cdev; + if (!cdev) { + ret = -ENOMEM; + goto err_cdev; + } + cdev->private = kzalloc(sizeof(struct ccw_device_private), + GFP_KERNEL | GFP_DMA); + if (!cdev->private) { + ret = -ENOMEM; + goto err_priv; } + + cdev->dev.dma_mask = sch->dev.dma_mask; + ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask); + if (ret) + goto err_coherent_mask; + + dma_pool = cio_gp_dma_create(&cdev->dev, 1); + if (!dma_pool) { + ret = -ENOMEM; + goto err_dma_pool; + } + cdev->private->dma_pool = dma_pool; + cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev, + sizeof(*cdev->private->dma_area)); + if (!cdev->private->dma_area) { + ret = -ENOMEM; + goto err_dma_area; + } + return cdev; +err_dma_area: + cio_gp_dma_destroy(dma_pool, &cdev->dev); +err_dma_pool: +err_coherent_mask: + kfree(cdev->private); +err_priv: kfree(cdev); - return ERR_PTR(-ENOMEM); +err_cdev: + return ERR_PTR(ret); } static void ccw_device_todo(struct work_struct *work); @@ -726,11 +745,13 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, INIT_LIST_HEAD(&priv->cmb_list); init_waitqueue_head(&priv->wait_q); timer_setup(&priv->timer, ccw_device_timeout, 0); + mutex_init(&cdev->reg_mutex); atomic_set(&priv->onoff, 0); - cdev->ccwlock = sch->lock; + cdev->ccwlock = &sch->lock; cdev->dev.parent = &sch->dev; cdev->dev.release = ccw_device_release; + cdev->dev.bus = &ccw_bus_type; cdev->dev.groups = ccwdev_attr_groups; /* Do first half of device_register. */ device_initialize(&cdev->dev); @@ -743,9 +764,9 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, goto out_put; } priv->flags.initialized = 1; - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); sch_set_cdev(sch, cdev); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); return 0; out_put: @@ -810,6 +831,7 @@ static void io_subchannel_register(struct ccw_device *cdev) * be registered). We need to reprobe since we may now have sense id * information. */ + mutex_lock(&cdev->reg_mutex); if (device_is_registered(&cdev->dev)) { if (!cdev->drv) { ret = device_reprobe(&cdev->dev); @@ -823,46 +845,29 @@ static void io_subchannel_register(struct ccw_device *cdev) adjust_init_count = 0; goto out; } - /* - * Now we know this subchannel will stay, we can throw - * our delayed uevent. - */ - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); /* make it known to the system */ - ret = ccw_device_add(cdev); + ret = device_add(&cdev->dev); if (ret) { CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, ret); - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); sch_set_cdev(sch, NULL); - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); + mutex_unlock(&cdev->reg_mutex); /* Release initial device reference. */ put_device(&cdev->dev); goto out_err; } out: cdev->private->flags.recog_done = 1; + mutex_unlock(&cdev->reg_mutex); wake_up(&cdev->private->wait_q); out_err: if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count)) wake_up(&ccw_device_init_wq); } -static void ccw_device_call_sch_unregister(struct ccw_device *cdev) -{ - struct subchannel *sch; - - /* Get subchannel reference for local processing. */ - if (!get_device(cdev->dev.parent)) - return; - sch = to_subchannel(cdev->dev.parent); - css_sch_device_unregister(sch); - /* Release subchannel reference for local processing. */ - put_device(&sch->dev); -} - /* * subchannel recognition done. Called from the state machine. */ @@ -884,7 +889,7 @@ io_subchannel_recog_done(struct ccw_device *cdev) wake_up(&ccw_device_init_wq); break; case DEV_STATE_OFFLINE: - /* + /* * We can't register the device in interrupt context so * we schedule a work item. */ @@ -899,9 +904,9 @@ static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) atomic_inc(&ccw_device_init_count); /* Start async. device sensing. */ - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); ccw_device_recognition(cdev); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); } static int ccw_device_move_to_sch(struct ccw_device *cdev, @@ -916,12 +921,12 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev, return -ENODEV; if (!sch_is_pseudo_sch(old_sch)) { - spin_lock_irq(old_sch->lock); + spin_lock_irq(&old_sch->lock); old_enabled = old_sch->schib.pmcw.ena; rc = 0; if (old_enabled) rc = cio_disable_subchannel(old_sch); - spin_unlock_irq(old_sch->lock); + spin_unlock_irq(&old_sch->lock); if (rc == -EBUSY) { /* Release child reference for new parent. */ put_device(&sch->dev); @@ -938,10 +943,10 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev, cdev->private->dev_id.devno, sch->schid.ssid, sch->schib.pmcw.dev, rc); if (old_enabled) { - /* Try to reenable the old subchannel. */ - spin_lock_irq(old_sch->lock); - cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch); - spin_unlock_irq(old_sch->lock); + /* Try to re-enable the old subchannel. */ + spin_lock_irq(&old_sch->lock); + cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch)); + spin_unlock_irq(&old_sch->lock); } /* Release child reference for new parent. */ put_device(&sch->dev); @@ -949,19 +954,19 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev, } /* Clean up old subchannel. */ if (!sch_is_pseudo_sch(old_sch)) { - spin_lock_irq(old_sch->lock); + spin_lock_irq(&old_sch->lock); sch_set_cdev(old_sch, NULL); - spin_unlock_irq(old_sch->lock); + spin_unlock_irq(&old_sch->lock); css_schedule_eval(old_sch->schid); } /* Release child reference for old parent. */ put_device(&old_sch->dev); /* Initialize new subchannel. */ - spin_lock_irq(sch->lock); - cdev->ccwlock = sch->lock; + spin_lock_irq(&sch->lock); + cdev->ccwlock = &sch->lock; if (!sch_is_pseudo_sch(sch)) sch_set_cdev(sch, cdev); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); if (!sch_is_pseudo_sch(sch)) css_update_ssd_info(sch); return 0; @@ -1032,14 +1037,11 @@ static int io_subchannel_probe(struct subchannel *sch) "0.%x.%04x (rc=%d)\n", sch->schid.ssid, sch->schid.sch_no, rc); /* - * The console subchannel already has an associated ccw_device. - * Throw the delayed uevent for the subchannel, register - * the ccw_device and exit. - */ - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + * The console subchannel already has an associated ccw_device. + * Register it and exit. + */ cdev = sch_get_cdev(sch); - rc = ccw_device_add(cdev); + rc = device_add(&cdev->dev); if (rc) { /* Release online reference. */ put_device(&cdev->dev); @@ -1062,18 +1064,26 @@ static int io_subchannel_probe(struct subchannel *sch) if (!io_priv) goto out_schedule; + io_priv->dma_area = dma_alloc_coherent(&sch->dev, + sizeof(*io_priv->dma_area), + &io_priv->dma_area_dma, GFP_KERNEL); + if (!io_priv->dma_area) { + kfree(io_priv); + goto out_schedule; + } + set_io_private(sch, io_priv); css_schedule_eval(sch->schid); return 0; out_schedule: - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); css_sched_sch_todo(sch, SCH_TODO_UNREG); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); return 0; } -static int io_subchannel_remove(struct subchannel *sch) +static void io_subchannel_remove(struct subchannel *sch) { struct io_subchannel_private *io_priv = to_io_private(sch); struct ccw_device *cdev; @@ -1083,14 +1093,15 @@ static int io_subchannel_remove(struct subchannel *sch) goto out_free; ccw_device_unregister(cdev); - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); sch_set_cdev(sch, NULL); set_io_private(sch, NULL); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); out_free: + dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), + io_priv->dma_area, io_priv->dma_area_dma); kfree(io_priv); sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); - return 0; } static void io_subchannel_verify(struct subchannel *sch) @@ -1100,6 +1111,8 @@ static void io_subchannel_verify(struct subchannel *sch) cdev = sch_get_cdev(sch); if (cdev) dev_fsm_event(cdev, DEV_EVENT_VERIFY); + else + css_schedule_eval(sch->schid); } static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) @@ -1133,7 +1146,8 @@ static int io_subchannel_chp_event(struct subchannel *sch, struct chp_link *link, int event) { struct ccw_device *cdev = sch_get_cdev(sch); - int mask; + int mask, chpid, valid_bit; + int path_event[8]; mask = chp_ssd_get_mask(&sch->ssd_info, link); if (!mask) @@ -1168,6 +1182,18 @@ static int io_subchannel_chp_event(struct subchannel *sch, cdev->private->path_new_mask |= mask; io_subchannel_verify(sch); break; + case CHP_FCES_EVENT: + /* Forward Endpoint Security event */ + for (chpid = 0, valid_bit = 0x80; chpid < 8; chpid++, + valid_bit >>= 1) { + if (mask & valid_bit) + path_event[chpid] = PE_PATH_FCES_EVENT; + else + path_event[chpid] = PE_NONE; + } + if (cdev && cdev->drv && cdev->drv->path_event) + cdev->drv->path_event(cdev, path_event); + break; } return 0; } @@ -1177,7 +1203,7 @@ static void io_subchannel_quiesce(struct subchannel *sch) struct ccw_device *cdev; int ret; - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); cdev = sch_get_cdev(sch); if (cio_is_console(sch->schid)) goto out_unlock; @@ -1194,15 +1220,15 @@ static void io_subchannel_quiesce(struct subchannel *sch) ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, HZ/10); - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); wait_event(cdev->private->wait_q, cdev->private->state != DEV_STATE_QUIESCE); - spin_lock_irq(sch->lock); + spin_lock_irq(&sch->lock); } ret = cio_disable_subchannel(sch); } out_unlock: - spin_unlock_irq(sch->lock); + spin_unlock_irq(&sch->lock); } static void io_subchannel_shutdown(struct subchannel *sch) @@ -1230,7 +1256,7 @@ static int recovery_check(struct device *dev, void *data) sch = to_subchannel(cdev->dev.parent); if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm) break; - /* fall through */ + fallthrough; case DEV_STATE_DISCONNECTED: CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", cdev->private->dev_id.ssid, @@ -1289,21 +1315,34 @@ void ccw_device_schedule_recovery(void) spin_unlock_irqrestore(&recovery_lock, flags); } -static int purge_fn(struct device *dev, void *data) +static int purge_fn(struct subchannel *sch, void *data) { - struct ccw_device *cdev = to_ccwdev(dev); - struct ccw_dev_id *id = &cdev->private->dev_id; + struct ccw_device *cdev; - spin_lock_irq(cdev->ccwlock); - if (is_blacklisted(id->ssid, id->devno) && - (cdev->private->state == DEV_STATE_OFFLINE) && - (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { - CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, - id->devno); + spin_lock_irq(&sch->lock); + if (sch->st != SUBCHANNEL_TYPE_IO || !sch->schib.pmcw.dnv) + goto unlock; + + if (!is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) + goto unlock; + + cdev = sch_get_cdev(sch); + if (cdev) { + if (cdev->private->state != DEV_STATE_OFFLINE) + goto unlock; + + if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) + goto unlock; ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); atomic_set(&cdev->private->onoff, 0); } - spin_unlock_irq(cdev->ccwlock); + + css_sched_sch_todo(sch, SCH_TODO_UNREG); + CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x%s\n", sch->schid.ssid, + sch->schib.pmcw.dev, cdev ? "" : " (no cdev)"); + +unlock: + spin_unlock_irq(&sch->lock); /* Abort loop in case of pending signal. */ if (signal_pending(current)) return -EINTR; @@ -1319,7 +1358,7 @@ static int purge_fn(struct device *dev, void *data) int ccw_purge_blacklisted(void) { CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); - bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn); + for_each_subchannel_staged(purge_fn, NULL, NULL); return 0; } @@ -1348,6 +1387,7 @@ void ccw_device_set_notoper(struct ccw_device *cdev) enum io_sch_action { IO_SCH_UNREG, IO_SCH_ORPH_UNREG, + IO_SCH_UNREG_CDEV, IO_SCH_ATTACH, IO_SCH_UNREG_ATTACH, IO_SCH_ORPH_ATTACH, @@ -1355,14 +1395,18 @@ enum io_sch_action { IO_SCH_VERIFY, IO_SCH_DISC, IO_SCH_NOP, + IO_SCH_ORPH_CDEV, }; static enum io_sch_action sch_get_action(struct subchannel *sch) { struct ccw_device *cdev; + int rc; cdev = sch_get_cdev(sch); - if (cio_update_schib(sch)) { + rc = cio_update_schib(sch); + + if (rc == -ENODEV) { /* Not operational. */ if (!cdev) return IO_SCH_UNREG; @@ -1370,6 +1414,16 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) return IO_SCH_UNREG; return IO_SCH_ORPH_UNREG; } + + /* Avoid unregistering subchannels without working device. */ + if (rc == -EACCES) { + if (!cdev) + return IO_SCH_NOP; + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) + return IO_SCH_UNREG_CDEV; + return IO_SCH_ORPH_CDEV; + } + /* Operational. */ if (!cdev) return IO_SCH_ATTACH; @@ -1380,12 +1434,12 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) } if ((sch->schib.pmcw.pam & sch->opm) == 0) { if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) - return IO_SCH_UNREG; + return IO_SCH_UNREG_CDEV; return IO_SCH_DISC; } if (device_is_disconnected(cdev)) return IO_SCH_REPROBE; - if (cdev->online && !cdev->private->flags.resuming) + if (cdev->online) return IO_SCH_VERIFY; if (cdev->private->state == DEV_STATE_NOT_OPER) return IO_SCH_UNREG_ATTACH; @@ -1410,7 +1464,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) enum io_sch_action action; int rc = -EAGAIN; - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); if (!device_is_registered(&sch->dev)) goto out_unlock; if (work_pending(&sch->todo_work)) @@ -1439,9 +1493,11 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) rc = 0; goto out_unlock; case IO_SCH_ORPH_UNREG: + case IO_SCH_ORPH_CDEV: case IO_SCH_ORPH_ATTACH: ccw_device_set_disconnected(cdev); break; + case IO_SCH_UNREG_CDEV: case IO_SCH_UNREG_ATTACH: case IO_SCH_UNREG: if (!cdev) @@ -1462,28 +1518,25 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) default: break; } - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); /* All other actions require process context. */ if (!process) goto out; /* Handle attached ccw device. */ switch (action) { case IO_SCH_ORPH_UNREG: + case IO_SCH_ORPH_CDEV: case IO_SCH_ORPH_ATTACH: /* Move ccw device to orphanage. */ rc = ccw_device_move_to_orph(cdev); if (rc) goto out; break; + case IO_SCH_UNREG_CDEV: case IO_SCH_UNREG_ATTACH: - spin_lock_irqsave(sch->lock, flags); - if (cdev->private->flags.resuming) { - /* Device will be handled later. */ - rc = 0; - goto out_unlock; - } + spin_lock_irqsave(&sch->lock, flags); sch_set_cdev(sch, NULL); - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); /* Unregister ccw device. */ ccw_device_unregister(cdev); break; @@ -1494,8 +1547,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) switch (action) { case IO_SCH_ORPH_UNREG: case IO_SCH_UNREG: - if (!cdev || !cdev->private->flags.resuming) - css_sch_device_unregister(sch); + css_sch_device_unregister(sch); break; case IO_SCH_ORPH_ATTACH: case IO_SCH_UNREG_ATTACH: @@ -1513,9 +1565,9 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) put_device(&cdev->dev); goto out; } - spin_lock_irqsave(sch->lock, flags); + spin_lock_irqsave(&sch->lock, flags); ccw_device_trigger_reprobe(cdev); - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); /* Release reference from get_ccwdev_by_dev_id() */ put_device(&cdev->dev); break; @@ -1525,7 +1577,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) return 0; out_unlock: - spin_unlock_irqrestore(sch->lock, flags); + spin_unlock_irqrestore(&sch->lock, flags); out: return rc; } @@ -1593,13 +1645,19 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv) return ERR_CAST(sch); io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); - if (!io_priv) { - put_device(&sch->dev); - return ERR_PTR(-ENOMEM); - } + if (!io_priv) + goto err_priv; + io_priv->dma_area = dma_alloc_coherent(&sch->dev, + sizeof(*io_priv->dma_area), + &io_priv->dma_area_dma, GFP_KERNEL); + if (!io_priv->dma_area) + goto err_dma_area; set_io_private(sch, io_priv); cdev = io_subchannel_create_ccwdev(sch); if (IS_ERR(cdev)) { + dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), + io_priv->dma_area, io_priv->dma_area_dma); + set_io_private(sch, NULL); put_device(&sch->dev); kfree(io_priv); return cdev; @@ -1607,6 +1665,12 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv) cdev->drv = drv; ccw_device_set_int_class(cdev); return cdev; + +err_dma_area: + kfree(io_priv); +err_priv: + put_device(&sch->dev); + return ERR_PTR(-ENOMEM); } void __init ccw_device_destroy_console(struct ccw_device *cdev) @@ -1615,6 +1679,8 @@ void __init ccw_device_destroy_console(struct ccw_device *cdev) struct io_subchannel_private *io_priv = to_io_private(sch); set_io_private(sch, NULL); + dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), + io_priv->dma_area, io_priv->dma_area_dma); put_device(&sch->dev); put_device(&cdev->dev); kfree(io_priv); @@ -1636,33 +1702,11 @@ void ccw_device_wait_idle(struct ccw_device *cdev) cio_tsch(sch); if (sch->schib.scsw.cmd.actl == 0) break; - udelay_simple(100); + udelay(100); } } - -static int ccw_device_pm_restore(struct device *dev); - -int ccw_device_force_console(struct ccw_device *cdev) -{ - return ccw_device_pm_restore(&cdev->dev); -} -EXPORT_SYMBOL_GPL(ccw_device_force_console); #endif -/* - * get ccw_device matching the busid, but only if owned by cdrv - */ -static int -__ccwdev_check_busid(struct device *dev, void *id) -{ - char *bus_id; - - bus_id = id; - - return (strcmp(bus_id, dev_name(dev)) == 0); -} - - /** * get_ccwdev_by_busid() - obtain device from a bus id * @cdrv: driver the device is owned by @@ -1679,8 +1723,7 @@ struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, { struct device *dev; - dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id, - __ccwdev_check_busid); + dev = driver_find_device_by_name(&cdrv->driver, bus_id); return dev ? to_ccwdev(dev) : NULL; } @@ -1714,7 +1757,7 @@ ccw_device_probe (struct device *dev) return 0; } -static int ccw_device_remove(struct device *dev) +static void ccw_device_remove(struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_driver *cdrv = cdev->drv; @@ -1748,8 +1791,6 @@ static int ccw_device_remove(struct device *dev) spin_unlock_irq(cdev->ccwlock); io_subchannel_quiesce(sch); __disable_cmf(cdev); - - return 0; } static void ccw_device_shutdown(struct device *dev) @@ -1762,243 +1803,13 @@ static void ccw_device_shutdown(struct device *dev) __disable_cmf(cdev); } -static int ccw_device_pm_prepare(struct device *dev) -{ - struct ccw_device *cdev = to_ccwdev(dev); - - if (work_pending(&cdev->private->todo_work)) - return -EAGAIN; - /* Fail while device is being set online/offline. */ - if (atomic_read(&cdev->private->onoff)) - return -EAGAIN; - - if (cdev->online && cdev->drv && cdev->drv->prepare) - return cdev->drv->prepare(cdev); - - return 0; -} - -static void ccw_device_pm_complete(struct device *dev) -{ - struct ccw_device *cdev = to_ccwdev(dev); - - if (cdev->online && cdev->drv && cdev->drv->complete) - cdev->drv->complete(cdev); -} - -static int ccw_device_pm_freeze(struct device *dev) -{ - struct ccw_device *cdev = to_ccwdev(dev); - struct subchannel *sch = to_subchannel(cdev->dev.parent); - int ret, cm_enabled; - - /* Fail suspend while device is in transistional state. */ - if (!dev_fsm_final_state(cdev)) - return -EAGAIN; - if (!cdev->online) - return 0; - if (cdev->drv && cdev->drv->freeze) { - ret = cdev->drv->freeze(cdev); - if (ret) - return ret; - } - - spin_lock_irq(sch->lock); - cm_enabled = cdev->private->cmb != NULL; - spin_unlock_irq(sch->lock); - if (cm_enabled) { - /* Don't have the css write on memory. */ - ret = ccw_set_cmf(cdev, 0); - if (ret) - return ret; - } - /* From here on, disallow device driver I/O. */ - spin_lock_irq(sch->lock); - ret = cio_disable_subchannel(sch); - spin_unlock_irq(sch->lock); - - return ret; -} - -static int ccw_device_pm_thaw(struct device *dev) -{ - struct ccw_device *cdev = to_ccwdev(dev); - struct subchannel *sch = to_subchannel(cdev->dev.parent); - int ret, cm_enabled; - - if (!cdev->online) - return 0; - - spin_lock_irq(sch->lock); - /* Allow device driver I/O again. */ - ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); - cm_enabled = cdev->private->cmb != NULL; - spin_unlock_irq(sch->lock); - if (ret) - return ret; - - if (cm_enabled) { - ret = ccw_set_cmf(cdev, 1); - if (ret) - return ret; - } - - if (cdev->drv && cdev->drv->thaw) - ret = cdev->drv->thaw(cdev); - - return ret; -} - -static void __ccw_device_pm_restore(struct ccw_device *cdev) -{ - struct subchannel *sch = to_subchannel(cdev->dev.parent); - - spin_lock_irq(sch->lock); - if (cio_is_console(sch->schid)) { - cio_enable_subchannel(sch, (u32)(addr_t)sch); - goto out_unlock; - } - /* - * While we were sleeping, devices may have gone or become - * available again. Kick re-detection. - */ - cdev->private->flags.resuming = 1; - cdev->private->path_new_mask = LPM_ANYPATH; - css_sched_sch_todo(sch, SCH_TODO_EVAL); - spin_unlock_irq(sch->lock); - css_wait_for_slow_path(); - - /* cdev may have been moved to a different subchannel. */ - sch = to_subchannel(cdev->dev.parent); - spin_lock_irq(sch->lock); - if (cdev->private->state != DEV_STATE_ONLINE && - cdev->private->state != DEV_STATE_OFFLINE) - goto out_unlock; - - ccw_device_recognition(cdev); - spin_unlock_irq(sch->lock); - wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || - cdev->private->state == DEV_STATE_DISCONNECTED); - spin_lock_irq(sch->lock); - -out_unlock: - cdev->private->flags.resuming = 0; - spin_unlock_irq(sch->lock); -} - -static int resume_handle_boxed(struct ccw_device *cdev) -{ - cdev->private->state = DEV_STATE_BOXED; - if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK) - return 0; - ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); - return -ENODEV; -} - -static int resume_handle_disc(struct ccw_device *cdev) -{ - cdev->private->state = DEV_STATE_DISCONNECTED; - if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK) - return 0; - ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); - return -ENODEV; -} - -static int ccw_device_pm_restore(struct device *dev) -{ - struct ccw_device *cdev = to_ccwdev(dev); - struct subchannel *sch; - int ret = 0; - - __ccw_device_pm_restore(cdev); - sch = to_subchannel(cdev->dev.parent); - spin_lock_irq(sch->lock); - if (cio_is_console(sch->schid)) - goto out_restore; - - /* check recognition results */ - switch (cdev->private->state) { - case DEV_STATE_OFFLINE: - case DEV_STATE_ONLINE: - cdev->private->flags.donotify = 0; - break; - case DEV_STATE_BOXED: - ret = resume_handle_boxed(cdev); - if (ret) - goto out_unlock; - goto out_restore; - default: - ret = resume_handle_disc(cdev); - if (ret) - goto out_unlock; - goto out_restore; - } - /* check if the device type has changed */ - if (!ccw_device_test_sense_data(cdev)) { - ccw_device_update_sense_data(cdev); - ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); - ret = -ENODEV; - goto out_unlock; - } - if (!cdev->online) - goto out_unlock; - - if (ccw_device_online(cdev)) { - ret = resume_handle_disc(cdev); - if (ret) - goto out_unlock; - goto out_restore; - } - spin_unlock_irq(sch->lock); - wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); - spin_lock_irq(sch->lock); - - if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) { - ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); - ret = -ENODEV; - goto out_unlock; - } - - /* reenable cmf, if needed */ - if (cdev->private->cmb) { - spin_unlock_irq(sch->lock); - ret = ccw_set_cmf(cdev, 1); - spin_lock_irq(sch->lock); - if (ret) { - CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " - "(rc=%d)\n", cdev->private->dev_id.ssid, - cdev->private->dev_id.devno, ret); - ret = 0; - } - } - -out_restore: - spin_unlock_irq(sch->lock); - if (cdev->online && cdev->drv && cdev->drv->restore) - ret = cdev->drv->restore(cdev); - return ret; - -out_unlock: - spin_unlock_irq(sch->lock); - return ret; -} - -static const struct dev_pm_ops ccw_pm_ops = { - .prepare = ccw_device_pm_prepare, - .complete = ccw_device_pm_complete, - .freeze = ccw_device_pm_freeze, - .thaw = ccw_device_pm_thaw, - .restore = ccw_device_pm_restore, -}; - -static struct bus_type ccw_bus_type = { +static const struct bus_type ccw_bus_type = { .name = "ccw", .match = ccw_bus_match, .uevent = ccw_uevent, .probe = ccw_device_probe, .remove = ccw_device_remove, .shutdown = ccw_device_shutdown, - .pm = &ccw_pm_ops, }; /** @@ -2060,12 +1871,12 @@ static void ccw_device_todo(struct work_struct *work) case CDEV_TODO_UNREG_EVAL: if (!sch_is_pseudo_sch(sch)) css_schedule_eval(sch->schid); - /* fall-through */ + fallthrough; case CDEV_TODO_UNREG: - if (sch_is_pseudo_sch(sch)) - ccw_device_unregister(cdev); - else - ccw_device_call_sch_unregister(cdev); + spin_lock_irq(&sch->lock); + sch_set_cdev(sch, NULL); + spin_unlock_irq(&sch->lock); + ccw_device_unregister(cdev); break; default: break; |
