diff options
Diffstat (limited to 'drivers/s390/cio/qdio_main.c')
| -rw-r--r-- | drivers/s390/cio/qdio_main.c | 1489 |
1 files changed, 564 insertions, 925 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index fb1c1e0483ed..7dd967165025 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Linux for s390 qdio support, buffer handling, qdio API and module support. * @@ -6,16 +7,19 @@ * Jan Glauber <jang@linux.vnet.ibm.com> * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> */ + +#include <linux/export.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> -#include <linux/timer.h> +#include <linux/kmemleak.h> #include <linux/delay.h> #include <linux/gfp.h> #include <linux/io.h> #include <linux/atomic.h> #include <asm/debug.h> #include <asm/qdio.h> +#include <asm/asm.h> #include <asm/ipl.h> #include "cio.h" @@ -30,39 +34,40 @@ MODULE_DESCRIPTION("QDIO base support"); MODULE_LICENSE("GPL"); static inline int do_siga_sync(unsigned long schid, - unsigned int out_mask, unsigned int in_mask, + unsigned long out_mask, unsigned long in_mask, unsigned int fc) { - register unsigned long __fc asm ("0") = fc; - register unsigned long __schid asm ("1") = schid; - register unsigned long out asm ("2") = out_mask; - register unsigned long in asm ("3") = in_mask; int cc; asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[schid]\n" + " lgr 2,%[out]\n" + " lgr 3,%[in]\n" " siga 0\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) - : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc"); - return cc; + CC_IPM(cc) + : CC_OUT(cc, cc) + : [fc] "d" (fc), [schid] "d" (schid), + [out] "d" (out_mask), [in] "d" (in_mask) + : CC_CLOBBER_LIST("0", "1", "2", "3")); + return CC_TRANSFORM(cc); } -static inline int do_siga_input(unsigned long schid, unsigned int mask, - unsigned int fc) +static inline int do_siga_input(unsigned long schid, unsigned long mask, + unsigned long fc) { - register unsigned long __fc asm ("0") = fc; - register unsigned long __schid asm ("1") = schid; - register unsigned long __mask asm ("2") = mask; int cc; asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[schid]\n" + " lgr 2,%[mask]\n" " siga 0\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) - : "d" (__fc), "d" (__schid), "d" (__mask) : "cc"); - return cc; + CC_IPM(cc) + : CC_OUT(cc, cc) + : [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask) + : CC_CLOBBER_LIST("0", "1", "2")); + return CC_TRANSFORM(cc); } /** @@ -71,45 +76,30 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask, * @mask: which output queues to process * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer * @fc: function code to perform + * @aob: asynchronous operation block * * Returns condition code. * Note: For IQDC unicast queues only the highest priority queue is processed. */ static inline int do_siga_output(unsigned long schid, unsigned long mask, - unsigned int *bb, unsigned int fc, - unsigned long aob) + unsigned int *bb, unsigned long fc, + dma64_t aob) { - register unsigned long __fc asm("0") = fc; - register unsigned long __schid asm("1") = schid; - register unsigned long __mask asm("2") = mask; - register unsigned long __aob asm("3") = aob; int cc; asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[schid]\n" + " lgr 2,%[mask]\n" + " lgr 3,%[aob]\n" " siga 0\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc), "+d" (__fc), "+d" (__aob) - : "d" (__schid), "d" (__mask) - : "cc"); - *bb = __fc >> 31; - return cc; -} - -static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) -{ - /* all done or next buffer state different */ - if (ccq == 0 || ccq == 32) - return 0; - /* no buffer processed */ - if (ccq == 97) - return 1; - /* not all buffers processed */ - if (ccq == 96) - return 2; - /* notify devices immediately */ - DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); - return -EIO; + " lgr %[fc],0\n" + CC_IPM(cc) + : CC_OUT(cc, cc), [fc] "+&d" (fc) + : [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob) + : CC_CLOBBER_LIST("0", "1", "2", "3")); + *bb = fc >> 31; + return CC_TRANSFORM(cc); } /** @@ -126,7 +116,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, int start, int count, int auto_ack) { - int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0; + int tmp_count = count, tmp_start = start, nr = q->nr; unsigned int ccq = 0; qperf_inc(q, eqbs); @@ -136,34 +126,30 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, again: ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, auto_ack); - rc = qdio_check_ccq(q, ccq); - if (!rc) - return count - tmp_count; - - if (rc == 1) { - DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); - goto again; - } - if (rc == 2) { + switch (ccq) { + case 0: + case 32: + /* all done, or next buffer state different */ + return count - tmp_count; + case 96: + /* not all buffers processed */ qperf_inc(q, eqbs_partial); - DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x", tmp_count); - /* - * Retry once, if that fails bail out and process the - * extracted buffers before trying again. - */ - if (!retried++) - goto again; - else - return count - tmp_count; + return count - tmp_count; + case 97: + /* no buffer processed */ + DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); + goto again; + default: + DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); + DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); + DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); + q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr, + q->first_to_check, count, q->irq_ptr->int_parm); + return 0; } - - DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); - DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); - q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, - q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); - return 0; } /** @@ -183,58 +169,65 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, unsigned int ccq = 0; int tmp_count = count, tmp_start = start; int nr = q->nr; - int rc; - if (!count) - return 0; qperf_inc(q, sqbs); if (!q->is_input_q) nr += q->irq_ptr->nr_input_qs; again: ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); - rc = qdio_check_ccq(q, ccq); - if (!rc) { + + switch (ccq) { + case 0: + case 32: + /* all done, or active buffer adapter-owned */ WARN_ON_ONCE(tmp_count); return count - tmp_count; - } - - if (rc == 1 || rc == 2) { + case 96: + /* not all buffers processed */ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); qperf_inc(q, sqbs_partial); goto again; + default: + DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); + DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); + DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); + q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr, + q->first_to_check, count, q->irq_ptr->int_parm); + return 0; } - - DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); - DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); - q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, - q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); - return 0; } -/* returns number of examined buffers and their common state in *state */ +/* + * Returns number of examined buffers and their common state in *state. + * Requested number of buffers-to-examine must be > 0. + */ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, unsigned char *state, unsigned int count, - int auto_ack, int merge_pending) + int auto_ack) { unsigned char __state = 0; - int i; + int i = 1; if (is_qebsm(q)) return qdio_do_eqbs(q, state, bufnr, count, auto_ack); - for (i = 0; i < count; i++) { - if (!__state) { - __state = q->slsb.val[bufnr]; - if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) - __state = SLSB_P_OUTPUT_EMPTY; - } else if (merge_pending) { - if ((q->slsb.val[bufnr] & __state) != __state) - break; - } else if (q->slsb.val[bufnr] != __state) - break; + /* get initial state: */ + __state = q->slsb.val[bufnr]; + + /* Bail out early if there is no work on the queue: */ + if (__state & SLSB_OWNER_CU) + goto out; + + for (; i < count; i++) { bufnr = next_buf(bufnr); + + /* stop if next state differs from initial state: */ + if (q->slsb.val[bufnr] != __state) + break; } + +out: *state = __state; return i; } @@ -242,7 +235,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state, int auto_ack) { - return get_buf_states(q, bufnr, state, 1, auto_ack, 0); + return get_buf_states(q, bufnr, state, 1, auto_ack); } /* wrap-around safe setting of slsb states, returns number of changed buffers */ @@ -254,10 +247,17 @@ static inline int set_buf_states(struct qdio_q *q, int bufnr, if (is_qebsm(q)) return qdio_do_sqbs(q, state, bufnr, count); + /* Ensure that all preceding changes to the SBALs are visible: */ + mb(); + for (i = 0; i < count; i++) { - xchg(&q->slsb.val[bufnr], state); + WRITE_ONCE(q->slsb.val[bufnr], state); bufnr = next_buf(bufnr); } + + /* Make our SLSB changes visible: */ + mb(); + return count; } @@ -302,26 +302,37 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, return (cc) ? -EIO : 0; } +static inline int qdio_sync_input_queue(struct qdio_q *q) +{ + return qdio_siga_sync(q, 0, q->mask); +} + +static inline int qdio_sync_output_queue(struct qdio_q *q) +{ + return qdio_siga_sync(q, q->mask, 0); +} + static inline int qdio_siga_sync_q(struct qdio_q *q) { if (q->is_input_q) - return qdio_siga_sync(q, 0, q->mask); + return qdio_sync_input_queue(q); else - return qdio_siga_sync(q, q->mask, 0); + return qdio_sync_output_queue(q); } -static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, - unsigned long aob) +static int qdio_siga_output(struct qdio_q *q, unsigned int count, + unsigned int *busy_bit, dma64_t aob) { unsigned long schid = *((u32 *) &q->irq_ptr->schid); unsigned int fc = QDIO_SIGA_WRITE; u64 start_time = 0; int retries = 0, cc; - unsigned long laob = 0; - if (q->u.out.use_cq && aob != 0) { - fc = QDIO_SIGA_WRITEQ; - laob = aob; + if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) { + if (count > 1) + fc = QDIO_SIGA_WRITEM; + else if (aob) + fc = QDIO_SIGA_WRITEQ; } if (is_qebsm(q)) { @@ -329,19 +340,17 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, fc |= QDIO_SIGA_QEBSM_FLAG; } again: - WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) || - (aob && fc != QDIO_SIGA_WRITEQ)); - cc = do_siga_output(schid, q->mask, busy_bit, fc, laob); + cc = do_siga_output(schid, q->mask, busy_bit, fc, aob); /* hipersocket busy condition */ if (unlikely(*busy_bit)) { retries++; if (!start_time) { - start_time = get_tod_clock(); + start_time = get_tod_clock_fast(); goto again; } - if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) + if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE) goto again; } if (retries) { @@ -372,454 +381,266 @@ static inline int qdio_siga_input(struct qdio_q *q) return (cc) ? -EIO : 0; } -#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0) -#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U) - -static inline void qdio_sync_queues(struct qdio_q *q) -{ - /* PCI capable outbound queues will also be scanned so sync them too */ - if (pci_out_supported(q)) - qdio_siga_sync_all(q); - else - qdio_siga_sync_q(q); -} - int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state) { - if (need_siga_sync(q)) + if (qdio_need_siga_sync(q->irq_ptr)) qdio_siga_sync_q(q); - return get_buf_states(q, bufnr, state, 1, 0, 0); + return get_buf_state(q, bufnr, state, 0); } static inline void qdio_stop_polling(struct qdio_q *q) { - if (!q->u.in.polling) + if (!q->u.in.batch_count) return; - q->u.in.polling = 0; qperf_inc(q, stop_polling); /* show the card that we are not polling anymore */ - if (is_qebsm(q)) { - set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, - q->u.in.ack_count); - q->u.in.ack_count = 0; - } else - set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); + set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT, + q->u.in.batch_count); + q->u.in.batch_count = 0; } -static inline void account_sbals(struct qdio_q *q, int count) +static inline void account_sbals(struct qdio_q *q, unsigned int count) { - int pos = 0; - q->q_stats.nr_sbal_total += count; - if (count == QDIO_MAX_BUFFERS_MASK) { - q->q_stats.nr_sbals[7]++; - return; - } - while (count >>= 1) - pos++; - q->q_stats.nr_sbals[pos]++; + q->q_stats.nr_sbals[ilog2(count)]++; } -static void process_buffer_error(struct qdio_q *q, int count) +static void process_buffer_error(struct qdio_q *q, unsigned int start, + int count) { - unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : - SLSB_P_OUTPUT_NOT_INIT; - - q->qdio_error = QDIO_ERROR_SLSB_STATE; - /* special handling for no target buffer empty */ - if ((!q->is_input_q && - (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) { + if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q && + q->sbal[start]->element[15].sflags == 0x10) { qperf_inc(q, target_full); - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", - q->first_to_check); - goto set; + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start); + return; } DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); - DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); + DBF_ERROR("FTC:%3d C:%3d", start, count); DBF_ERROR("F14:%2x F15:%2x", - q->sbal[q->first_to_check]->element[14].sflags, - q->sbal[q->first_to_check]->element[15].sflags); - -set: - /* - * Interrupts may be avoided as long as the error is present - * so change the buffer state immediately to avoid starvation. - */ - set_buf_states(q, q->first_to_check, state, count); + q->sbal[start]->element[14].sflags, + q->sbal[start]->element[15].sflags); } -static inline void inbound_primed(struct qdio_q *q, int count) +static inline void inbound_handle_work(struct qdio_q *q, unsigned int start, + int count, bool auto_ack) { - int new; - - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count); - - /* for QEBSM the ACK was already set by EQBS */ - if (is_qebsm(q)) { - if (!q->u.in.polling) { - q->u.in.polling = 1; - q->u.in.ack_count = count; - q->u.in.ack_start = q->first_to_check; - return; - } - - /* delete the previous ACK's */ - set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, - q->u.in.ack_count); - q->u.in.ack_count = count; - q->u.in.ack_start = q->first_to_check; - return; - } + /* ACK the newest SBAL: */ + if (!auto_ack) + set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK); - /* - * ACK the newest buffer. The ACK will be removed in qdio_stop_polling - * or by the next inbound run. - */ - new = add_buf(q->first_to_check, count - 1); - if (q->u.in.polling) { - /* reset the previous ACK but first set the new one */ - set_buf_state(q, new, SLSB_P_INPUT_ACK); - set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); - } else { - q->u.in.polling = 1; - set_buf_state(q, new, SLSB_P_INPUT_ACK); - } - - q->u.in.ack_start = new; - count--; - if (!count) - return; - /* need to change ALL buffers to get more interrupts */ - set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count); + if (!q->u.in.batch_count) + q->u.in.batch_start = start; + q->u.in.batch_count += count; } -static int get_inbound_buffer_frontier(struct qdio_q *q) +static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start, + unsigned int *error) { - int count, stop; unsigned char state = 0; + int count; - q->timestamp = get_tod_clock(); + q->timestamp = get_tod_clock_fast(); - /* - * Don't check 128 buffers, as otherwise qdio_inbound_q_moved - * would return 0. - */ - count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); - stop = add_buf(q->first_to_check, count); + count = atomic_read(&q->nr_buf_used); + if (!count) + return 0; - if (q->first_to_check == stop) - goto out; + if (qdio_need_siga_sync(q->irq_ptr)) + qdio_sync_input_queue(q); - /* - * No siga sync here, as a PCI or we after a thin interrupt - * already sync'ed the queues. - */ - count = get_buf_states(q, q->first_to_check, &state, count, 1, 0); + count = get_buf_states(q, start, &state, count, 1); if (!count) - goto out; + return 0; switch (state) { case SLSB_P_INPUT_PRIMED: - inbound_primed(q, count); - q->first_to_check = add_buf(q->first_to_check, count); - if (atomic_sub(count, &q->nr_buf_used) == 0) + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, + count); + + inbound_handle_work(q, start, count, is_qebsm(q)); + if (atomic_sub_return(count, &q->nr_buf_used) == 0) qperf_inc(q, inbound_queue_full); if (q->irq_ptr->perf_stat_enabled) account_sbals(q, count); - break; + return count; case SLSB_P_INPUT_ERROR: - process_buffer_error(q, count); - q->first_to_check = add_buf(q->first_to_check, count); - atomic_sub(count, &q->nr_buf_used); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr, + count); + + *error = QDIO_ERROR_SLSB_STATE; + process_buffer_error(q, start, count); + inbound_handle_work(q, start, count, false); + if (atomic_sub_return(count, &q->nr_buf_used) == 0) + qperf_inc(q, inbound_queue_full); if (q->irq_ptr->perf_stat_enabled) account_sbals_error(q, count); - break; + return count; case SLSB_CU_INPUT_EMPTY: - case SLSB_P_INPUT_NOT_INIT: - case SLSB_P_INPUT_ACK: if (q->irq_ptr->perf_stat_enabled) q->q_stats.nr_sbal_nop++; - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); - break; + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x", + q->nr, start); + return 0; + case SLSB_P_INPUT_NOT_INIT: + case SLSB_P_INPUT_ACK: + /* We should never see this state, throw a WARN: */ default: - WARN_ON_ONCE(1); + dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1, + "found state %#x at index %u on queue %u\n", + state, start, q->nr); + return 0; } -out: - return q->first_to_check; } -static int qdio_inbound_q_moved(struct qdio_q *q) +int qdio_inspect_input_queue(struct ccw_device *cdev, unsigned int nr, + unsigned int *bufnr, unsigned int *error) { - int bufnr; + struct qdio_irq *irq = cdev->private->qdio_data; + unsigned int start; + struct qdio_q *q; + int count; + + if (!irq) + return -ENODEV; - bufnr = get_inbound_buffer_frontier(q); + q = irq->input_qs[nr]; + start = q->first_to_check; + *error = 0; - if (bufnr != q->last_move) { - q->last_move = bufnr; - if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) - q->u.in.timestamp = get_tod_clock(); - return 1; - } else + count = get_inbound_buffer_frontier(q, start, error); + if (count == 0) return 0; + + *bufnr = start; + q->first_to_check = add_buf(start, count); + return count; } +EXPORT_SYMBOL_GPL(qdio_inspect_input_queue); -static inline int qdio_inbound_q_done(struct qdio_q *q) +static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start) { unsigned char state = 0; if (!atomic_read(&q->nr_buf_used)) return 1; - if (need_siga_sync(q)) - qdio_siga_sync_q(q); - get_buf_state(q, q->first_to_check, &state, 0); + if (qdio_need_siga_sync(q->irq_ptr)) + qdio_sync_input_queue(q); + get_buf_state(q, start, &state, 0); if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) /* more work coming */ return 0; - if (is_thinint_irq(q->irq_ptr)) - return 1; - - /* don't poll under z/VM */ - if (MACHINE_IS_VM) - return 1; - - /* - * At this point we know, that inbound first_to_check - * has (probably) not moved (see qdio_inbound_processing). - */ - if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", - q->first_to_check); - return 1; - } else - return 0; + return 1; } -static inline int contains_aobs(struct qdio_q *q) -{ - return !q->is_input_q && q->u.out.use_cq; -} - -static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count) +static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start, + unsigned int *error) { unsigned char state = 0; - int j, b = start; - - if (!contains_aobs(q)) - return; - - for (j = 0; j < count; ++j) { - get_buf_state(q, b, &state, 0); - if (state == SLSB_P_OUTPUT_PENDING) { - struct qaob *aob = q->u.out.aobs[b]; - if (aob == NULL) - continue; - - q->u.out.sbal_state[b].flags |= - QDIO_OUTBUF_STATE_FLAG_PENDING; - q->u.out.aobs[b] = NULL; - } else if (state == SLSB_P_OUTPUT_EMPTY) { - q->u.out.sbal_state[b].aob = NULL; - } - b = next_buf(b); - } -} - -static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, - int bufnr) -{ - unsigned long phys_aob = 0; - - if (!q->use_cq) - goto out; - - if (!q->aobs[bufnr]) { - struct qaob *aob = qdio_allocate_aob(); - q->aobs[bufnr] = aob; - } - if (q->aobs[bufnr]) { - q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE; - q->sbal_state[bufnr].aob = q->aobs[bufnr]; - q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; - phys_aob = virt_to_phys(q->aobs[bufnr]); - WARN_ON_ONCE(phys_aob & 0xFF); - } - -out: - return phys_aob; -} - -static void qdio_kick_handler(struct qdio_q *q) -{ - int start = q->first_to_kick; - int end = q->first_to_check; int count; - if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) - return; - - count = sub_buf(end, start); - - if (q->is_input_q) { - qperf_inc(q, inbound_handler); - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); - } else { - qperf_inc(q, outbound_handler); - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", - start, count); - } - - qdio_handle_aobs(q, start, count); - - q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, - q->irq_ptr->int_parm); - - /* for the next time */ - q->first_to_kick = end; - q->qdio_error = 0; -} - -static void __qdio_inbound_processing(struct qdio_q *q) -{ - qperf_inc(q, tasklet_inbound); - - if (!qdio_inbound_q_moved(q)) - return; - - qdio_kick_handler(q); - - if (!qdio_inbound_q_done(q)) { - /* means poll time is not yet over */ - qperf_inc(q, tasklet_inbound_resched); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { - tasklet_schedule(&q->tasklet); - return; - } - } + q->timestamp = get_tod_clock_fast(); - qdio_stop_polling(q); - /* - * We need to check again to not lose initiative after - * resetting the ACK state. - */ - if (!qdio_inbound_q_done(q)) { - qperf_inc(q, tasklet_inbound_resched2); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) - tasklet_schedule(&q->tasklet); - } -} - -void qdio_inbound_processing(unsigned long data) -{ - struct qdio_q *q = (struct qdio_q *)data; - __qdio_inbound_processing(q); -} - -static int get_outbound_buffer_frontier(struct qdio_q *q) -{ - int count, stop; - unsigned char state = 0; - - q->timestamp = get_tod_clock(); - - if (need_siga_sync(q)) - if (((queue_type(q) != QDIO_IQDIO_QFMT) && - !pci_out_supported(q)) || - (queue_type(q) == QDIO_IQDIO_QFMT && - multicast_outbound(q))) - qdio_siga_sync_q(q); + count = atomic_read(&q->nr_buf_used); + if (!count) + return 0; - /* - * Don't check 128 buffers, as otherwise qdio_inbound_q_moved - * would return 0. - */ - count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); - stop = add_buf(q->first_to_check, count); - if (q->first_to_check == stop) - goto out; + if (qdio_need_siga_sync(q->irq_ptr)) + qdio_sync_output_queue(q); - count = get_buf_states(q, q->first_to_check, &state, count, 0, 1); + count = get_buf_states(q, start, &state, count, 0); if (!count) - goto out; + return 0; switch (state) { + case SLSB_P_OUTPUT_PENDING: + *error = QDIO_ERROR_SLSB_PENDING; + fallthrough; case SLSB_P_OUTPUT_EMPTY: /* the adapter got it */ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); atomic_sub(count, &q->nr_buf_used); - q->first_to_check = add_buf(q->first_to_check, count); if (q->irq_ptr->perf_stat_enabled) account_sbals(q, count); - - break; + return count; case SLSB_P_OUTPUT_ERROR: - process_buffer_error(q, count); - q->first_to_check = add_buf(q->first_to_check, count); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x", + q->nr, count); + + *error = QDIO_ERROR_SLSB_STATE; + process_buffer_error(q, start, count); atomic_sub(count, &q->nr_buf_used); if (q->irq_ptr->perf_stat_enabled) account_sbals_error(q, count); - break; + return count; case SLSB_CU_OUTPUT_PRIMED: /* the adapter has not fetched the output yet */ if (q->irq_ptr->perf_stat_enabled) q->q_stats.nr_sbal_nop++; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); - break; - case SLSB_P_OUTPUT_NOT_INIT: + return 0; case SLSB_P_OUTPUT_HALTED: - break; + return 0; + case SLSB_P_OUTPUT_NOT_INIT: + /* We should never see this state, throw a WARN: */ default: - WARN_ON_ONCE(1); + dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1, + "found state %#x at index %u on queue %u\n", + state, start, q->nr); + return 0; } - -out: - return q->first_to_check; } -/* all buffers processed? */ -static inline int qdio_outbound_q_done(struct qdio_q *q) +int qdio_inspect_output_queue(struct ccw_device *cdev, unsigned int nr, + unsigned int *bufnr, unsigned int *error) { - return atomic_read(&q->nr_buf_used) == 0; -} + struct qdio_irq *irq = cdev->private->qdio_data; + unsigned int start; + struct qdio_q *q; + int count; -static inline int qdio_outbound_q_moved(struct qdio_q *q) -{ - int bufnr; + if (!irq) + return -ENODEV; - bufnr = get_outbound_buffer_frontier(q); + q = irq->output_qs[nr]; + start = q->first_to_check; + *error = 0; - if (bufnr != q->last_move) { - q->last_move = bufnr; - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); - return 1; - } else + count = get_outbound_buffer_frontier(q, start, error); + if (count == 0) return 0; + + *bufnr = start; + q->first_to_check = add_buf(start, count); + return count; } +EXPORT_SYMBOL_GPL(qdio_inspect_output_queue); -static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) +static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count, + dma64_t aob) { int retries = 0, cc; unsigned int busy_bit; - if (!need_siga_out(q)) + if (!qdio_need_siga_out(q->irq_ptr)) return 0; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); retry: qperf_inc(q, siga_write); - cc = qdio_siga_output(q, &busy_bit, aob); + cc = qdio_siga_output(q, count, &busy_bit, aob); switch (cc) { case 0: break; @@ -849,111 +670,6 @@ retry: return cc; } -static void __qdio_outbound_processing(struct qdio_q *q) -{ - qperf_inc(q, tasklet_outbound); - WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0); - - if (qdio_outbound_q_moved(q)) - qdio_kick_handler(q); - - if (queue_type(q) == QDIO_ZFCP_QFMT) - if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) - goto sched; - - if (q->u.out.pci_out_enabled) - return; - - /* - * Now we know that queue type is either qeth without pci enabled - * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY - * is noticed and outbound_handler is called after some time. - */ - if (qdio_outbound_q_done(q)) - del_timer(&q->u.out.timer); - else - if (!timer_pending(&q->u.out.timer)) - mod_timer(&q->u.out.timer, jiffies + 10 * HZ); - return; - -sched: - if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) - return; - tasklet_schedule(&q->tasklet); -} - -/* outbound tasklet */ -void qdio_outbound_processing(unsigned long data) -{ - struct qdio_q *q = (struct qdio_q *)data; - __qdio_outbound_processing(q); -} - -void qdio_outbound_timer(unsigned long data) -{ - struct qdio_q *q = (struct qdio_q *)data; - - if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) - return; - tasklet_schedule(&q->tasklet); -} - -static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) -{ - struct qdio_q *out; - int i; - - if (!pci_out_supported(q)) - return; - - for_each_output_queue(q->irq_ptr, out, i) - if (!qdio_outbound_q_done(out)) - tasklet_schedule(&out->tasklet); -} - -static void __tiqdio_inbound_processing(struct qdio_q *q) -{ - qperf_inc(q, tasklet_inbound); - if (need_siga_sync(q) && need_siga_sync_after_ai(q)) - qdio_sync_queues(q); - - /* - * The interrupt could be caused by a PCI request. Check the - * PCI capable outbound queues. - */ - qdio_check_outbound_after_thinint(q); - - if (!qdio_inbound_q_moved(q)) - return; - - qdio_kick_handler(q); - - if (!qdio_inbound_q_done(q)) { - qperf_inc(q, tasklet_inbound_resched); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { - tasklet_schedule(&q->tasklet); - return; - } - } - - qdio_stop_polling(q); - /* - * We need to check again to not lose initiative after - * resetting the ACK state. - */ - if (!qdio_inbound_q_done(q)) { - qperf_inc(q, tasklet_inbound_resched2); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) - tasklet_schedule(&q->tasklet); - } -} - -void tiqdio_inbound_processing(unsigned long data) -{ - struct qdio_q *q = (struct qdio_q *)data; - __tiqdio_inbound_processing(q); -} - static inline void qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) { @@ -975,63 +691,29 @@ static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb) /* PCI interrupt handler */ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) { - int i; - struct qdio_q *q; - - if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) - return; - - for_each_input_queue(irq_ptr, q, i) { - if (q->u.in.queue_start_poll) { - /* skip if polling is enabled or already in work */ - if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, - &q->u.in.queue_irq_state)) { - qperf_inc(q, int_discarded); - continue; - } - q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, - q->irq_ptr->int_parm); - } else { - tasklet_schedule(&q->tasklet); - } - } - - if (!pci_out_supported(q)) + if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return; - for_each_output_queue(irq_ptr, q, i) { - if (qdio_outbound_q_done(q)) - continue; - if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) - qdio_siga_sync_q(q); - tasklet_schedule(&q->tasklet); - } + qdio_deliver_irq(irq_ptr); + irq_ptr->last_data_irq_time = get_lowcore()->int_clock; } -static void qdio_handle_activate_check(struct ccw_device *cdev, - unsigned long intparm, int cstat, int dstat) +static void qdio_handle_activate_check(struct qdio_irq *irq_ptr, + unsigned long intparm, int cstat, + int dstat) { - struct qdio_irq *irq_ptr = cdev->private->qdio_data; - struct qdio_q *q; - int count; + unsigned int first_to_check = 0; DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); DBF_ERROR("intp :%lx", intparm); DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); - if (irq_ptr->nr_input_qs) { - q = irq_ptr->input_qs[0]; - } else if (irq_ptr->nr_output_qs) { - q = irq_ptr->output_qs[0]; - } else { - dump_stack(); - goto no_handler; - } + /* zfcp wants this: */ + if (irq_ptr->nr_input_qs) + first_to_check = irq_ptr->input_qs[0]->first_to_check; - count = sub_buf(q->first_to_check, q->first_to_kick); - q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE, - q->nr, q->first_to_kick, count, irq_ptr->int_parm); -no_handler: + irq_ptr->error_handler(irq_ptr->cdev, QDIO_ERROR_ACTIVATE, 0, + first_to_check, 0, irq_ptr->int_parm); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); /* * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen. @@ -1040,26 +722,27 @@ no_handler: lgr_info_log(); } -static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, - int dstat) +static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat, + int dstat, int dcc) { - struct qdio_irq *irq_ptr = cdev->private->qdio_data; - DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); if (cstat) goto error; if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END)) goto error; + if (dcc == 1) + return -EAGAIN; if (!(dstat & DEV_STAT_DEV_END)) goto error; qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); - return; + return 0; error: DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + return -EIO; } /* qdio interrupt handler */ @@ -1067,10 +750,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; - int cstat, dstat; + struct subchannel_id schid; + int cstat, dstat, rc, dcc; if (!intparm || !irq_ptr) { - DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_ERROR("qint:%4x", schid.sch_no); return; } @@ -1086,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, qdio_irq_check_sense(irq_ptr, irb); cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; + dcc = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0; + rc = 0; switch (irq_ptr->state) { case QDIO_IRQ_STATE_INACTIVE: - qdio_establish_handle_irq(cdev, cstat, dstat); + rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc); break; case QDIO_IRQ_STATE_CLEANUP: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); @@ -1101,14 +788,27 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, return; } if (cstat || dstat) - qdio_handle_activate_check(cdev, intparm, cstat, + qdio_handle_activate_check(irq_ptr, intparm, cstat, dstat); + else if (dcc == 1) + rc = -EAGAIN; break; case QDIO_IRQ_STATE_STOPPED: break; default: WARN_ON_ONCE(1); } + + if (rc == -EAGAIN) { + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry"); + rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0); + if (!rc) + return; + DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no); + DBF_ERROR("rc:%4x", rc); + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + } + wake_up(&cdev->private->wait_q); } @@ -1123,28 +823,45 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, int qdio_get_ssqd_desc(struct ccw_device *cdev, struct qdio_ssqd_desc *data) { + struct subchannel_id schid; if (!cdev || !cdev->private) return -EINVAL; - DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); - return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("get ssqd:%4x", schid.sch_no); + return qdio_setup_get_ssqd(NULL, &schid, data); } EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); -static void qdio_shutdown_queues(struct ccw_device *cdev) +static int qdio_cancel_ccw(struct qdio_irq *irq, int how) { - struct qdio_irq *irq_ptr = cdev->private->qdio_data; - struct qdio_q *q; - int i; - - for_each_input_queue(irq_ptr, q, i) - tasklet_kill(&q->tasklet); + struct ccw_device *cdev = irq->cdev; + long timeout; + int rc; - for_each_output_queue(irq_ptr, q, i) { - del_timer(&q->u.out.timer); - tasklet_kill(&q->tasklet); + spin_lock_irq(get_ccwdev_lock(cdev)); + qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP); + if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) + rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); + else + /* default behaviour is halt */ + rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); + spin_unlock_irq(get_ccwdev_lock(cdev)); + if (rc) { + DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no); + DBF_ERROR("rc:%4d", rc); + return rc; } + + timeout = wait_event_interruptible_timeout(cdev->private->wait_q, + irq->state == QDIO_IRQ_STATE_INACTIVE || + irq->state == QDIO_IRQ_STATE_ERR, + 10 * HZ); + if (timeout <= 0) + rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; + + return rc; } /** @@ -1155,14 +872,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) int qdio_shutdown(struct ccw_device *cdev, int how) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct subchannel_id schid; int rc; - unsigned long flags; if (!irq_ptr) return -ENODEV; WARN_ON_ONCE(irqs_disabled()); - DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qshutdown:%4x", schid.sch_no); mutex_lock(&irq_ptr->setup_mutex); /* @@ -1175,44 +893,15 @@ int qdio_shutdown(struct ccw_device *cdev, int how) } /* - * Indicate that the device is going down. Scheduling the queue - * tasklets is forbidden from here on. + * Indicate that the device is going down. */ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); - tiqdio_remove_input_queues(irq_ptr); - qdio_shutdown_queues(cdev); qdio_shutdown_debug_entries(irq_ptr); - /* cleanup subchannel */ - spin_lock_irqsave(get_ccwdev_lock(cdev), flags); - - if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) - rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); - else - /* default behaviour is halt */ - rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); - if (rc) { - DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no); - DBF_ERROR("rc:%4d", rc); - goto no_cleanup; - } - - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - wait_event_interruptible_timeout(cdev->private->wait_q, - irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || - irq_ptr->state == QDIO_IRQ_STATE_ERR, - 10 * HZ); - spin_lock_irqsave(get_ccwdev_lock(cdev), flags); - -no_cleanup: + rc = qdio_cancel_ccw(irq_ptr, how); qdio_shutdown_thinint(irq_ptr); - - /* restore interrupt handler */ - if ((void *)cdev->handler == (void *)qdio_int_handler) - cdev->handler = irq_ptr->orig_handler; - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + qdio_shutdown_irq(irq_ptr); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); mutex_unlock(&irq_ptr->setup_mutex); @@ -1229,54 +918,67 @@ EXPORT_SYMBOL_GPL(qdio_shutdown); int qdio_free(struct ccw_device *cdev) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct subchannel_id schid; if (!irq_ptr) return -ENODEV; - DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qfree:%4x", schid.sch_no); + DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); mutex_lock(&irq_ptr->setup_mutex); - if (irq_ptr->debug_area != NULL) { - debug_unregister(irq_ptr->debug_area); - irq_ptr->debug_area = NULL; - } + irq_ptr->debug_area = NULL; cdev->private->qdio_data = NULL; mutex_unlock(&irq_ptr->setup_mutex); - qdio_release_memory(irq_ptr); + qdio_free_queues(irq_ptr); + free_page((unsigned long) irq_ptr->qdr); + free_page(irq_ptr->chsc_page); + kfree(irq_ptr->ccw); + free_page((unsigned long) irq_ptr); return 0; } EXPORT_SYMBOL_GPL(qdio_free); /** * qdio_allocate - allocate qdio queues and associated data - * @init_data: initialization data + * @cdev: associated ccw device + * @no_input_qs: allocate this number of Input Queues + * @no_output_qs: allocate this number of Output Queues */ -int qdio_allocate(struct qdio_initialize *init_data) +int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs, + unsigned int no_output_qs) { + struct subchannel_id schid; struct qdio_irq *irq_ptr; + int rc = -ENOMEM; - DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qallocate:%4x", schid.sch_no); - if ((init_data->no_input_qs && !init_data->input_handler) || - (init_data->no_output_qs && !init_data->output_handler)) + if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ || + no_output_qs > QDIO_MAX_QUEUES_PER_IRQ) return -EINVAL; - if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) || - (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)) - return -EINVAL; + irq_ptr = (void *) get_zeroed_page(GFP_KERNEL); + if (!irq_ptr) + return -ENOMEM; - if ((!init_data->input_sbal_addr_array) || - (!init_data->output_sbal_addr_array)) - return -EINVAL; + irq_ptr->ccw = kmalloc(sizeof(*irq_ptr->ccw), GFP_KERNEL | GFP_DMA); + if (!irq_ptr->ccw) + goto err_ccw; - /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ - irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!irq_ptr) - goto out_err; + /* kmemleak doesn't scan the page-allocated irq_ptr: */ + kmemleak_not_leak(irq_ptr->ccw); + irq_ptr->cdev = cdev; mutex_init(&irq_ptr->setup_mutex); - qdio_allocate_dbf(init_data, irq_ptr); + if (qdio_allocate_dbf(irq_ptr)) + goto err_dbf; + + DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs, + no_output_qs); /* * Allocate a page for the chsc calls in qdio_establish. @@ -1286,120 +988,152 @@ int qdio_allocate(struct qdio_initialize *init_data) */ irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); if (!irq_ptr->chsc_page) - goto out_rel; + goto err_chsc; /* qdr is used in ccw1.cda which is u32 */ irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!irq_ptr->qdr) - goto out_rel; + goto err_qdr; - if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, - init_data->no_output_qs)) - goto out_rel; + rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs); + if (rc) + goto err_queues; - init_data->cdev->private->qdio_data = irq_ptr; + cdev->private->qdio_data = irq_ptr; qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); return 0; -out_rel: - qdio_release_memory(irq_ptr); -out_err: - return -ENOMEM; + +err_queues: + free_page((unsigned long) irq_ptr->qdr); +err_qdr: + free_page(irq_ptr->chsc_page); +err_chsc: +err_dbf: + kfree(irq_ptr->ccw); +err_ccw: + free_page((unsigned long) irq_ptr); + return rc; } EXPORT_SYMBOL_GPL(qdio_allocate); -static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) +static void qdio_trace_init_data(struct qdio_irq *irq, + struct qdio_initialize *data) { - struct qdio_q *q = irq_ptr->input_qs[0]; - int i, use_cq = 0; - - if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT) - use_cq = 1; - - for_each_output_queue(irq_ptr, q, i) { - if (use_cq) { - if (qdio_enable_async_operation(&q->u.out) < 0) { - use_cq = 0; - continue; - } - } else - qdio_disable_async_operation(&q->u.out); - } - DBF_EVENT("use_cq:%d", use_cq); + DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format); + DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format); + DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR); + DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs, + data->no_output_qs); + DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR); + DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR); + DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR); + DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR); + DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *), + DBF_ERR); } /** * qdio_establish - establish queues on a qdio subchannel + * @cdev: associated ccw device * @init_data: initialization data */ -int qdio_establish(struct qdio_initialize *init_data) +int qdio_establish(struct ccw_device *cdev, + struct qdio_initialize *init_data) { - struct qdio_irq *irq_ptr; - struct ccw_device *cdev = init_data->cdev; - unsigned long saveflags; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct subchannel_id schid; + struct ciw *ciw; + long timeout; int rc; - DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qestablish:%4x", schid.sch_no); - irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; - if (cdev->private->state != DEV_STATE_ONLINE) + if (init_data->no_input_qs > irq_ptr->max_input_qs || + init_data->no_output_qs > irq_ptr->max_output_qs) + return -EINVAL; + + /* Needed as error_handler: */ + if (!init_data->input_handler) + return -EINVAL; + + if (init_data->no_output_qs && !init_data->output_handler) + return -EINVAL; + + if (!init_data->input_sbal_addr_array || + !init_data->output_sbal_addr_array) + return -EINVAL; + + if (!init_data->irq_poll) return -EINVAL; + ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE); + if (!ciw) { + DBF_ERROR("%4x NO EQ", schid.sch_no); + return -EIO; + } + mutex_lock(&irq_ptr->setup_mutex); - qdio_setup_irq(init_data); + qdio_trace_init_data(irq_ptr, init_data); + qdio_setup_irq(irq_ptr, init_data); rc = qdio_establish_thinint(irq_ptr); - if (rc) { - mutex_unlock(&irq_ptr->setup_mutex); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - return rc; - } + if (rc) + goto err_thinint; /* establish q */ - irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd; - irq_ptr->ccw.flags = CCW_FLAG_SLI; - irq_ptr->ccw.count = irq_ptr->equeue.count; - irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); + irq_ptr->ccw->cmd_code = ciw->cmd; + irq_ptr->ccw->flags = CCW_FLAG_SLI; + irq_ptr->ccw->count = ciw->count; + irq_ptr->ccw->cda = virt_to_dma32(irq_ptr->qdr); - spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + spin_lock_irq(get_ccwdev_lock(cdev)); ccw_device_set_options_mask(cdev, 0); - rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); + rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); + spin_unlock_irq(get_ccwdev_lock(cdev)); if (rc) { DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4x", rc); + goto err_ccw_start; } - spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); - if (rc) { - mutex_unlock(&irq_ptr->setup_mutex); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - return rc; + timeout = wait_event_interruptible_timeout(cdev->private->wait_q, + irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || + irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ); + if (timeout <= 0) { + rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; + goto err_ccw_timeout; } - wait_event_interruptible_timeout(cdev->private->wait_q, - irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || - irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ); - if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) { - mutex_unlock(&irq_ptr->setup_mutex); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - return -EIO; + rc = -EIO; + goto err_ccw_error; } qdio_setup_ssqd_info(irq_ptr); - qdio_detect_hsicq(irq_ptr); - /* qebsm is now setup if available, initialize buffer states */ qdio_init_buf_states(irq_ptr); mutex_unlock(&irq_ptr->setup_mutex); - qdio_print_subchannel_info(irq_ptr, cdev); - qdio_setup_debug_entries(irq_ptr, cdev); + qdio_print_subchannel_info(irq_ptr); + qdio_setup_debug_entries(irq_ptr); return 0; + +err_ccw_timeout: + qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR); +err_ccw_error: +err_ccw_start: + qdio_shutdown_thinint(irq_ptr); +err_thinint: + qdio_shutdown_irq(irq_ptr); + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); + mutex_unlock(&irq_ptr->setup_mutex); + return rc; } EXPORT_SYMBOL_GPL(qdio_establish); @@ -1409,18 +1143,22 @@ EXPORT_SYMBOL_GPL(qdio_establish); */ int qdio_activate(struct ccw_device *cdev) { - struct qdio_irq *irq_ptr; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct subchannel_id schid; + struct ciw *ciw; int rc; - unsigned long saveflags; - DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qactivate:%4x", schid.sch_no); - irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; + ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE); + if (!ciw) { + DBF_ERROR("%4x NO AQ", schid.sch_no); + return -EIO; + } mutex_lock(&irq_ptr->setup_mutex); if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { @@ -1428,27 +1166,22 @@ int qdio_activate(struct ccw_device *cdev) goto out; } - irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; - irq_ptr->ccw.flags = CCW_FLAG_SLI; - irq_ptr->ccw.count = irq_ptr->aqueue.count; - irq_ptr->ccw.cda = 0; + irq_ptr->ccw->cmd_code = ciw->cmd; + irq_ptr->ccw->flags = CCW_FLAG_SLI; + irq_ptr->ccw->count = ciw->count; + irq_ptr->ccw->cda = 0; - spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + spin_lock_irq(get_ccwdev_lock(cdev)); ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); - rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, + rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ACTIVATE, 0, DOIO_DENY_PREFETCH); + spin_unlock_irq(get_ccwdev_lock(cdev)); if (rc) { DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4x", rc); - } - spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); - - if (rc) goto out; - - if (is_thinint_irq(irq_ptr)) - tiqdio_add_input_queues(irq_ptr); + } /* wait for subchannel to become active */ msleep(5); @@ -1468,85 +1201,73 @@ out: } EXPORT_SYMBOL_GPL(qdio_activate); -static inline int buf_in_between(int bufnr, int start, int count) -{ - int end = add_buf(start, count); - - if (end > start) { - if (bufnr >= start && bufnr < end) - return 1; - else - return 0; - } - - /* wrap-around case */ - if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) || - (bufnr < end)) - return 1; - else - return 0; -} - /** * handle_inbound - reset processed input buffers * @q: queue containing the buffers - * @callflags: flags * @bufnr: first buffer to process * @count: how many buffers are emptied */ -static int handle_inbound(struct qdio_q *q, unsigned int callflags, - int bufnr, int count) +static int handle_inbound(struct qdio_q *q, int bufnr, int count) { - int used, diff; + int overlap; qperf_inc(q, inbound_call); - if (!q->u.in.polling) - goto set; - - /* protect against stop polling setting an ACK for an emptied slsb */ - if (count == QDIO_MAX_BUFFERS_PER_Q) { - /* overwriting everything, just delete polling status */ - q->u.in.polling = 0; - q->u.in.ack_count = 0; - goto set; - } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) { - if (is_qebsm(q)) { - /* partial overwrite, just update ack_start */ - diff = add_buf(bufnr, count); - diff = sub_buf(diff, q->u.in.ack_start); - q->u.in.ack_count -= diff; - if (q->u.in.ack_count <= 0) { - q->u.in.polling = 0; - q->u.in.ack_count = 0; - goto set; - } - q->u.in.ack_start = add_buf(q->u.in.ack_start, diff); - } - else - /* the only ACK will be deleted, so stop polling */ - q->u.in.polling = 0; + /* If any processed SBALs are returned to HW, adjust our tracking: */ + overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr), + q->u.in.batch_count); + if (overlap > 0) { + q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap); + q->u.in.batch_count -= overlap; } -set: count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); - used = atomic_add_return(count, &q->nr_buf_used) - count; + atomic_add(count, &q->nr_buf_used); - if (need_siga_in(q)) + if (qdio_need_siga_in(q->irq_ptr)) return qdio_siga_input(q); return 0; } /** + * qdio_add_bufs_to_input_queue - process buffers on an Input Queue + * @cdev: associated ccw_device for the qdio subchannel + * @q_nr: queue number + * @bufnr: buffer number + * @count: how many buffers to process + */ +int qdio_add_bufs_to_input_queue(struct ccw_device *cdev, unsigned int q_nr, + unsigned int bufnr, unsigned int count) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) + return -EINVAL; + + if (!irq_ptr) + return -ENODEV; + + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addi b:%02x c:%02x", bufnr, count); + + if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) + return -EIO; + if (!count) + return 0; + + return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count); +} +EXPORT_SYMBOL_GPL(qdio_add_bufs_to_input_queue); + +/** * handle_outbound - process filled outbound buffers * @q: queue containing the buffers - * @callflags: flags * @bufnr: first buffer to process * @count: how many buffers are filled + * @aob: asynchronous operation block */ -static int handle_outbound(struct qdio_q *q, unsigned int callflags, - int bufnr, int count) +static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int count, + struct qaob *aob) { unsigned char state = 0; int used, rc = 0; @@ -1559,100 +1280,77 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, if (used == QDIO_MAX_BUFFERS_PER_Q) qperf_inc(q, outbound_queue_full); - if (callflags & QDIO_FLAG_PCI_OUT) { - q->u.out.pci_out_enabled = 1; - qperf_inc(q, pci_request_int); - } else - q->u.out.pci_out_enabled = 0; - if (queue_type(q) == QDIO_IQDIO_QFMT) { - unsigned long phys_aob = 0; - - /* One SIGA-W per buffer required for unicast HSI */ - WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); - - phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); - - rc = qdio_kick_outbound_q(q, phys_aob); - } else if (need_siga_sync(q)) { - rc = qdio_siga_sync_q(q); + dma64_t phys_aob = aob ? virt_to_dma64(aob) : 0; + + WARN_ON_ONCE(!IS_ALIGNED(dma64_to_u64(phys_aob), 256)); + rc = qdio_kick_outbound_q(q, count, phys_aob); + } else if (qdio_need_siga_sync(q->irq_ptr)) { + rc = qdio_sync_output_queue(q); + } else if (count < QDIO_MAX_BUFFERS_PER_Q && + get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 && + state == SLSB_CU_OUTPUT_PRIMED) { + /* The previous buffer is not processed yet, tack on. */ + qperf_inc(q, fast_requeue); } else { - /* try to fast requeue buffers */ - get_buf_state(q, prev_buf(bufnr), &state, 0); - if (state != SLSB_CU_OUTPUT_PRIMED) - rc = qdio_kick_outbound_q(q, 0); - else - qperf_inc(q, fast_requeue); + rc = qdio_kick_outbound_q(q, count, 0); } - /* in case of SIGA errors we must process the error immediately */ - if (used >= q->u.out.scan_threshold || rc) - tasklet_schedule(&q->tasklet); - else - /* free the SBALs in case of no further traffic */ - if (!timer_pending(&q->u.out.timer)) - mod_timer(&q->u.out.timer, jiffies + HZ); return rc; } /** - * do_QDIO - process input or output buffers + * qdio_add_bufs_to_output_queue - process buffers on an Output Queue * @cdev: associated ccw_device for the qdio subchannel - * @callflags: input or output and special flags from the program * @q_nr: queue number * @bufnr: buffer number * @count: how many buffers to process + * @aob: asynchronous operation block */ -int do_QDIO(struct ccw_device *cdev, unsigned int callflags, - int q_nr, unsigned int bufnr, unsigned int count) +int qdio_add_bufs_to_output_queue(struct ccw_device *cdev, unsigned int q_nr, + unsigned int bufnr, unsigned int count, + struct qaob *aob) { - struct qdio_irq *irq_ptr; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) return -EINVAL; - irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; - DBF_DEV_EVENT(DBF_INFO, irq_ptr, - "do%02x b:%02x c:%02x", callflags, bufnr, count); + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addo b:%02x c:%02x", bufnr, count); if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) return -EIO; if (!count) return 0; - if (callflags & QDIO_FLAG_SYNC_INPUT) - return handle_inbound(irq_ptr->input_qs[q_nr], - callflags, bufnr, count); - else if (callflags & QDIO_FLAG_SYNC_OUTPUT) - return handle_outbound(irq_ptr->output_qs[q_nr], - callflags, bufnr, count); - return -EINVAL; + + return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob); } -EXPORT_SYMBOL_GPL(do_QDIO); +EXPORT_SYMBOL_GPL(qdio_add_bufs_to_output_queue); /** - * qdio_start_irq - process input buffers + * qdio_start_irq - enable interrupt processing for the device * @cdev: associated ccw_device for the qdio subchannel - * @nr: input queue number * * Return codes * 0 - success * 1 - irqs not started since new data is available */ -int qdio_start_irq(struct ccw_device *cdev, int nr) +int qdio_start_irq(struct ccw_device *cdev) { struct qdio_q *q; struct qdio_irq *irq_ptr = cdev->private->qdio_data; + unsigned int i; if (!irq_ptr) return -ENODEV; - q = irq_ptr->input_qs[nr]; - clear_nonshared_ind(irq_ptr); - qdio_stop_polling(q); - clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); + for_each_input_queue(irq_ptr, q, i) + qdio_stop_polling(q); + + clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state); /* * We need to check again to not lose initiative after @@ -1660,13 +1358,16 @@ int qdio_start_irq(struct ccw_device *cdev, int nr) */ if (test_nonshared_ind(irq_ptr)) goto rescan; - if (!qdio_inbound_q_done(q)) - goto rescan; + + for_each_input_queue(irq_ptr, q, i) { + if (!qdio_inbound_q_done(q, q->first_to_check)) + goto rescan; + } + return 0; rescan: - if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, - &q->u.in.queue_irq_state)) + if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state)) return 0; else return 1; @@ -1675,77 +1376,21 @@ rescan: EXPORT_SYMBOL(qdio_start_irq); /** - * qdio_get_next_buffers - process input buffers - * @cdev: associated ccw_device for the qdio subchannel - * @nr: input queue number - * @bufnr: first filled buffer number - * @error: buffers are in error state - * - * Return codes - * < 0 - error - * = 0 - no new buffers found - * > 0 - number of processed buffers - */ -int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, - int *error) -{ - struct qdio_q *q; - int start, end; - struct qdio_irq *irq_ptr = cdev->private->qdio_data; - - if (!irq_ptr) - return -ENODEV; - q = irq_ptr->input_qs[nr]; - - /* - * Cannot rely on automatic sync after interrupt since queues may - * also be examined without interrupt. - */ - if (need_siga_sync(q)) - qdio_sync_queues(q); - - /* check the PCI capable outbound queues. */ - qdio_check_outbound_after_thinint(q); - - if (!qdio_inbound_q_moved(q)) - return 0; - - /* Note: upper-layer MUST stop processing immediately here ... */ - if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) - return -EIO; - - start = q->first_to_kick; - end = q->first_to_check; - *bufnr = start; - *error = q->qdio_error; - - /* for the next time */ - q->first_to_kick = end; - q->qdio_error = 0; - return sub_buf(end, start); -} -EXPORT_SYMBOL(qdio_get_next_buffers); - -/** * qdio_stop_irq - disable interrupt processing for the device * @cdev: associated ccw_device for the qdio subchannel - * @nr: input queue number * * Return codes * 0 - interrupts were already disabled * 1 - interrupts successfully disabled */ -int qdio_stop_irq(struct ccw_device *cdev, int nr) +int qdio_stop_irq(struct ccw_device *cdev) { - struct qdio_q *q; struct qdio_irq *irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; - q = irq_ptr->input_qs[nr]; - if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, - &q->u.in.queue_irq_state)) + if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state)) return 0; else return 1; @@ -1762,16 +1407,11 @@ static int __init init_QDIO(void) rc = qdio_setup_init(); if (rc) goto out_debug; - rc = tiqdio_allocate_memory(); + rc = qdio_thinint_init(); if (rc) goto out_cache; - rc = tiqdio_register_thinints(); - if (rc) - goto out_ti; return 0; -out_ti: - tiqdio_free_memory(); out_cache: qdio_setup_exit(); out_debug: @@ -1781,8 +1421,7 @@ out_debug: static void __exit exit_QDIO(void) { - tiqdio_unregister_thinints(); - tiqdio_free_memory(); + qdio_thinint_exit(); qdio_setup_exit(); qdio_debug_exit(); } |
