summaryrefslogtreecommitdiff
path: root/drivers/s390/cio/vfio_ccw_drv.c
diff options
context:
space:
mode:
authorFarhan Ali <alifm@linux.ibm.com>2020-05-05 14:27:44 +0200
committerCornelia Huck <cohuck@redhat.com>2020-06-03 11:28:19 +0200
commit3f02cb2fd9d2d9e8762102886e3e4b51285797ee (patch)
treee07f6a751a1651d02a8415e8c653fb035039abef /drivers/s390/cio/vfio_ccw_drv.c
parentd8cac29b1d52204e6632d2887eff766acd02b9aa (diff)
vfio-ccw: Wire up the CRW irq and CRW region
Use the IRQ to notify userspace that there is a CRW pending in the region, related to path-availability changes on the passthrough subchannel. Signed-off-by: Farhan Ali <alifm@linux.ibm.com> Signed-off-by: Eric Farman <farman@linux.ibm.com> Reviewed-by: Cornelia Huck <cohuck@redhat.com> Message-Id: <20200505122745.53208-8-farman@linux.ibm.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
Diffstat (limited to 'drivers/s390/cio/vfio_ccw_drv.c')
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c49
1 files changed, 49 insertions, 0 deletions
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index e4deae6fd525..9144360851ed 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -108,6 +108,16 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
eventfd_signal(private->io_trigger, 1);
}
+static void vfio_ccw_crw_todo(struct work_struct *work)
+{
+ struct vfio_ccw_private *private;
+
+ private = container_of(work, struct vfio_ccw_private, crw_work);
+
+ if (!list_empty(&private->crw) && private->crw_trigger)
+ eventfd_signal(private->crw_trigger, 1);
+}
+
/*
* Css driver callbacks
*/
@@ -186,7 +196,9 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (ret)
goto out_free;
+ INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
+ INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
atomic_set(&private->avail, 1);
private->state = VFIO_CCW_STATE_STANDBY;
@@ -217,9 +229,15 @@ out_free:
static int vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+ struct vfio_ccw_crw *crw, *temp;
vfio_ccw_sch_quiesce(sch);
+ list_for_each_entry_safe(crw, temp, &private->crw, next) {
+ list_del(&crw->next);
+ kfree(crw);
+ }
+
vfio_ccw_mdev_unreg(sch);
dev_set_drvdata(&sch->dev, NULL);
@@ -281,6 +299,33 @@ out_unlock:
return rc;
}
+static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
+ unsigned int rsc,
+ unsigned int erc,
+ unsigned int rsid)
+{
+ struct vfio_ccw_crw *crw;
+
+ /*
+ * If unable to allocate a CRW, just drop the event and
+ * carry on. The guest will either see a later one or
+ * learn when it issues its own store subchannel.
+ */
+ crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
+ if (!crw)
+ return;
+
+ /*
+ * Build the CRW based on the inputs given to us.
+ */
+ crw->crw.rsc = rsc;
+ crw->crw.erc = erc;
+ crw->crw.rsid = rsid;
+
+ list_add_tail(&crw->next, &private->crw);
+ queue_work(vfio_ccw_work_q, &private->crw_work);
+}
+
static int vfio_ccw_chp_event(struct subchannel *sch,
struct chp_link *link, int event)
{
@@ -311,6 +356,8 @@ static int vfio_ccw_chp_event(struct subchannel *sch,
/* Path is gone */
if (sch->schib.pmcw.lpum & mask)
cio_cancel_halt_clear(sch, &retry);
+ vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
+ link->chpid.id);
break;
case CHP_VARY_ON:
/* Path logically turned on */
@@ -320,6 +367,8 @@ static int vfio_ccw_chp_event(struct subchannel *sch,
case CHP_ONLINE:
/* Path became available */
sch->lpm |= mask & sch->opm;
+ vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
+ link->chpid.id);
break;
}