summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJan Glauber <jang@linux.vnet.ibm.com>2008-07-17 17:16:48 +0200
committerHeiko Carstens <heiko.carstens@de.ibm.com>2008-07-17 17:22:10 +0200
commit779e6e1c724d30e0fd1baca78b852e41e3a23c1d (patch)
treeb7fc4f0f01b66c3c65226cc627edd501e00ab44f /drivers
parentdae39843f478d181da5b5e1c2c703dfcaaf838c1 (diff)
[S390] qdio: new qdio driver.
List of major changes: - split qdio driver into several files - seperation of thin interrupt code - improved handling for multiple thin interrupt devices - inbound and outbound processing now always runs in tasklet context - significant less tasklet schedules per interrupt needed - merged qebsm with non-qebsm handling - cleanup qdio interface and added kerneldoc - coding style Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com> Reviewed-by: Utz Bacher <utz.bacher@de.ibm.com> Reviewed-by: Ursula Braun <braunu@de.ibm.com> Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/qdio.c3929
-rw-r--r--drivers/s390/cio/qdio.h835
-rw-r--r--drivers/s390/cio/qdio_debug.c240
-rw-r--r--drivers/s390/cio/qdio_debug.h91
-rw-r--r--drivers/s390/cio/qdio_main.c1755
-rw-r--r--drivers/s390/cio/qdio_perf.c151
-rw-r--r--drivers/s390/cio/qdio_perf.h54
-rw-r--r--drivers/s390/cio/qdio_setup.c521
-rw-r--r--drivers/s390/cio/qdio_thinint.c380
-rw-r--r--drivers/s390/net/qeth_core.h12
-rw-r--r--drivers/s390/net/qeth_core_main.c87
-rw-r--r--drivers/s390/net/qeth_l2_main.c26
-rw-r--r--drivers/s390/net/qeth_l3_main.c25
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c12
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c42
18 files changed, 3561 insertions, 4608 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index 91e9e3f3073a..bd79bd165396 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -9,4 +9,6 @@ ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o
+
+qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
deleted file mode 100644
index 2bf36e14b102..000000000000
--- a/drivers/s390/cio/qdio.c
+++ /dev/null
@@ -1,3929 +0,0 @@
-/*
- *
- * linux/drivers/s390/cio/qdio.c
- *
- * Linux for S/390 QDIO base support, Hipersocket base support
- * version 2
- *
- * Copyright 2000,2002 IBM Corporation
- * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
- * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
- *
- * Restriction: only 63 iqdio subchannels would have its own indicator,
- * after that, subsequent subchannels share one indicator
- *
- *
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/timer.h>
-#include <linux/mempool.h>
-#include <linux/semaphore.h>
-
-#include <asm/ccwdev.h>
-#include <asm/io.h>
-#include <asm/atomic.h>
-#include <asm/timex.h>
-
-#include <asm/debug.h>
-#include <asm/s390_rdev.h>
-#include <asm/qdio.h>
-#include <asm/airq.h>
-
-#include "cio.h"
-#include "css.h"
-#include "device.h"
-#include "qdio.h"
-#include "ioasm.h"
-#include "chsc.h"
-
-/****************** MODULE PARAMETER VARIABLES ********************/
-MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
-MODULE_DESCRIPTION("QDIO base support version 2, " \
- "Copyright 2000 IBM Corporation");
-MODULE_LICENSE("GPL");
-
-/******************** HERE WE GO ***********************************/
-
-static const char version[] = "QDIO base support version 2";
-
-static int qdio_performance_stats = 0;
-static int proc_perf_file_registration;
-static struct qdio_perf_stats perf_stats;
-
-static int hydra_thinints;
-static int is_passthrough = 0;
-static int omit_svs;
-
-static int indicator_used[INDICATORS_PER_CACHELINE];
-static __u32 * volatile indicators;
-static __u32 volatile spare_indicator;
-static atomic_t spare_indicator_usecount;
-#define QDIO_MEMPOOL_SCSSC_ELEMENTS 2
-static mempool_t *qdio_mempool_scssc;
-static struct kmem_cache *qdio_q_cache;
-
-static debug_info_t *qdio_dbf_setup;
-static debug_info_t *qdio_dbf_sbal;
-static debug_info_t *qdio_dbf_trace;
-static debug_info_t *qdio_dbf_sense;
-#ifdef CONFIG_QDIO_DEBUG
-static debug_info_t *qdio_dbf_slsb_out;
-static debug_info_t *qdio_dbf_slsb_in;
-#endif /* CONFIG_QDIO_DEBUG */
-
-/* iQDIO stuff: */
-static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
- during a while loop */
-static DEFINE_SPINLOCK(ttiq_list_lock);
-static void *tiqdio_ind;
-static void tiqdio_tl(unsigned long);
-static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
-
-/* not a macro, as one of the arguments is atomic_read */
-static inline int
-qdio_min(int a,int b)
-{
- if (a<b)
- return a;
- else
- return b;
-}
-
-/***************** SCRUBBER HELPER ROUTINES **********************/
-#ifdef CONFIG_64BIT
-static inline void qdio_perf_stat_inc(atomic64_t *count)
-{
- if (qdio_performance_stats)
- atomic64_inc(count);
-}
-
-static inline void qdio_perf_stat_dec(atomic64_t *count)
-{
- if (qdio_performance_stats)
- atomic64_dec(count);
-}
-#else /* CONFIG_64BIT */
-static inline void qdio_perf_stat_inc(atomic_t *count)
-{
- if (qdio_performance_stats)
- atomic_inc(count);
-}
-
-static inline void qdio_perf_stat_dec(atomic_t *count)
-{
- if (qdio_performance_stats)
- atomic_dec(count);
-}
-#endif /* CONFIG_64BIT */
-
-static inline __u64
-qdio_get_micros(void)
-{
- return (get_clock() >> 12); /* time>>12 is microseconds */
-}
-
-/*
- * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve
- * the q in any case, so that we'll not be interrupted when we are in
- * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost
- * ever works (last famous words)
- */
-static inline int
-qdio_reserve_q(struct qdio_q *q)
-{
- return atomic_add_return(1,&q->use_count) - 1;
-}
-
-static inline void
-qdio_release_q(struct qdio_q *q)
-{
- atomic_dec(&q->use_count);
-}
-
-/*check ccq */
-static int
-qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
-{
- char dbf_text[15];
-
- if (ccq == 0 || ccq == 32)
- return 0;
- if (ccq == 96 || ccq == 97)
- return 1;
- /*notify devices immediately*/
- sprintf(dbf_text,"%d", ccq);
- QDIO_DBF_TEXT2(1,trace,dbf_text);
- return -EIO;
-}
-/* EQBS: extract buffer states */
-static int
-qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
- unsigned int *start, unsigned int *cnt)
-{
- struct qdio_irq *irq;
- unsigned int tmp_cnt, q_no, ccq;
- int rc ;
- char dbf_text[15];
-
- ccq = 0;
- tmp_cnt = *cnt;
- irq = (struct qdio_irq*)q->irq_ptr;
- q_no = q->q_no;
- if(!q->is_input_q)
- q_no += irq->no_input_qs;
-again:
- ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
- rc = qdio_check_ccq(q, ccq);
- if ((ccq == 96) && (tmp_cnt != *cnt))
- rc = 0;
- if (rc == 1) {
- QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
- goto again;
- }
- if (rc < 0) {
- QDIO_DBF_TEXT2(1,trace,"eqberr");
- sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
- QDIO_DBF_TEXT2(1,trace,dbf_text);
- q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
- QDIO_STATUS_LOOK_FOR_ERROR,
- 0, 0, 0, -1, -1, q->int_parm);
- return 0;
- }
- return (tmp_cnt - *cnt);
-}
-
-/* SQBS: set buffer states */
-static int
-qdio_do_sqbs(struct qdio_q *q, unsigned char state,
- unsigned int *start, unsigned int *cnt)
-{
- struct qdio_irq *irq;
- unsigned int tmp_cnt, q_no, ccq;
- int rc;
- char dbf_text[15];
-
- ccq = 0;
- tmp_cnt = *cnt;
- irq = (struct qdio_irq*)q->irq_ptr;
- q_no = q->q_no;
- if(!q->is_input_q)
- q_no += irq->no_input_qs;
-again:
- ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
- rc = qdio_check_ccq(q, ccq);
- if (rc == 1) {
- QDIO_DBF_TEXT5(1,trace,"sqAGAIN");
- goto again;
- }
- if (rc < 0) {
- QDIO_DBF_TEXT3(1,trace,"sqberr");
- sprintf(dbf_text,"%2x,%2x",tmp_cnt,*cnt);
- QDIO_DBF_TEXT3(1,trace,dbf_text);
- sprintf(dbf_text,"%d,%d",ccq,q_no);
- QDIO_DBF_TEXT3(1,trace,dbf_text);
- q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
- QDIO_STATUS_LOOK_FOR_ERROR,
- 0, 0, 0, -1, -1, q->int_parm);
- return 0;
- }
- return (tmp_cnt - *cnt);
-}
-
-static inline int
-qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
- unsigned char state, unsigned int *count)
-{
- volatile char *slsb;
- struct qdio_irq *irq;
-
- irq = (struct qdio_irq*)q->irq_ptr;
- if (!irq->is_qebsm) {
- slsb = (char *)&q->slsb.acc.val[(*bufno)];
- xchg(slsb, state);
- return 1;
- }
- return qdio_do_sqbs(q, state, bufno, count);
-}
-
-#ifdef CONFIG_QDIO_DEBUG
-static inline void
-qdio_trace_slsb(struct qdio_q *q)
-{
- if (q->queue_type==QDIO_TRACE_QTYPE) {
- if (q->is_input_q)
- QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
- QDIO_MAX_BUFFERS_PER_Q);
- else
- QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
- QDIO_MAX_BUFFERS_PER_Q);
- }
-}
-#endif
-
-static inline int
-set_slsb(struct qdio_q *q, unsigned int *bufno,
- unsigned char state, unsigned int *count)
-{
- int rc;
-#ifdef CONFIG_QDIO_DEBUG
- qdio_trace_slsb(q);
-#endif
- rc = qdio_set_slsb(q, bufno, state, count);
-#ifdef CONFIG_QDIO_DEBUG
- qdio_trace_slsb(q);
-#endif
- return rc;
-}
-static inline int
-qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
- unsigned int gpr3)
-{
- int cc;
-
- QDIO_DBF_TEXT4(0,trace,"sigasync");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- qdio_perf_stat_inc(&perf_stats.siga_syncs);
-
- cc = do_siga_sync(q->schid, gpr2, gpr3);
- if (cc)
- QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
-
- return cc;
-}
-
-static inline int
-qdio_siga_sync_q(struct qdio_q *q)
-{
- if (q->is_input_q)
- return qdio_siga_sync(q, 0, q->mask);
- return qdio_siga_sync(q, q->mask, 0);
-}
-
-static int
-__do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
-{
- struct qdio_irq *irq;
- unsigned int fc = 0;
- unsigned long schid;
-
- irq = (struct qdio_irq *) q->irq_ptr;
- if (!irq->is_qebsm)
- schid = *((u32 *)&q->schid);
- else {
- schid = irq->sch_token;
- fc |= 0x80;
- }
- return do_siga_output(schid, q->mask, busy_bit, fc);
-}
-
-/*
- * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
- * an access exception
- */
-static int
-qdio_siga_output(struct qdio_q *q)
-{
- int cc;
- __u32 busy_bit;
- __u64 start_time=0;
-
- qdio_perf_stat_inc(&perf_stats.siga_outs);
-
- QDIO_DBF_TEXT4(0,trace,"sigaout");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- for (;;) {
- cc = __do_siga_output(q, &busy_bit);
-//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
- if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
- if (!start_time)
- start_time=NOW;
- if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE)
- break;
- } else
- break;
- }
-
- if ((cc==2) && (busy_bit))
- cc |= QDIO_SIGA_ERROR_B_BIT_SET;
-
- if (cc)
- QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
-
- return cc;
-}
-
-static int
-qdio_siga_input(struct qdio_q *q)
-{
- int cc;
-
- QDIO_DBF_TEXT4(0,trace,"sigain");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- qdio_perf_stat_inc(&perf_stats.siga_ins);
-
- cc = do_siga_input(q->schid, q->mask);
-
- if (cc)
- QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
-
- return cc;
-}
-
-/* locked by the locks in qdio_activate and qdio_cleanup */
-static __u32 *
-qdio_get_indicator(void)
-{
- int i;
-
- for (i = 0; i < INDICATORS_PER_CACHELINE; i++)
- if (!indicator_used[i]) {
- indicator_used[i]=1;
- return indicators+i;
- }
- atomic_inc(&spare_indicator_usecount);
- return (__u32 * volatile) &spare_indicator;
-}
-
-/* locked by the locks in qdio_activate and qdio_cleanup */
-static void
-qdio_put_indicator(__u32 *addr)
-{
- int i;
-
- if ( (addr) && (addr!=&spare_indicator) ) {
- i=addr-indicators;
- indicator_used[i]=0;
- }
- if (addr == &spare_indicator)
- atomic_dec(&spare_indicator_usecount);
-}
-
-static inline void
-tiqdio_clear_summary_bit(__u32 *location)
-{
- QDIO_DBF_TEXT5(0,trace,"clrsummb");
- QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
-
- xchg(location,0);
-}
-
-static inline void
-tiqdio_set_summary_bit(__u32 *location)
-{
- QDIO_DBF_TEXT5(0,trace,"setsummb");
- QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
-
- xchg(location,-1);
-}
-
-static inline void
-tiqdio_sched_tl(void)
-{
- tasklet_hi_schedule(&tiqdio_tasklet);
-}
-
-static void
-qdio_mark_tiq(struct qdio_q *q)
-{
- unsigned long flags;
-
- QDIO_DBF_TEXT4(0,trace,"mark iq");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- spin_lock_irqsave(&ttiq_list_lock,flags);
- if (unlikely(atomic_read(&q->is_in_shutdown)))
- goto out_unlock;
-
- if (!q->is_input_q)
- goto out_unlock;
-
- if ((q->list_prev) || (q->list_next))
- goto out_unlock;
-
- if (!tiq_list) {
- tiq_list=q;
- q->list_prev=q;
- q->list_next=q;
- } else {
- q->list_next=tiq_list;
- q->list_prev=tiq_list->list_prev;
- tiq_list->list_prev->list_next=q;
- tiq_list->list_prev=q;
- }
- spin_unlock_irqrestore(&ttiq_list_lock,flags);
-
- tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
- tiqdio_sched_tl();
- return;
-out_unlock:
- spin_unlock_irqrestore(&ttiq_list_lock,flags);
- return;
-}
-
-static inline void
-qdio_mark_q(struct qdio_q *q)
-{
- QDIO_DBF_TEXT4(0,trace,"mark q");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- if (unlikely(atomic_read(&q->is_in_shutdown)))
- return;
-
- tasklet_schedule(&q->tasklet);
-}
-
-static int
-qdio_stop_polling(struct qdio_q *q)
-{
-#ifdef QDIO_USE_PROCESSING_STATE
- unsigned int tmp, gsf, count = 1;
- unsigned char state = 0;
- struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
-
- if (!atomic_xchg(&q->polling,0))
- return 1;
-
- QDIO_DBF_TEXT4(0,trace,"stoppoll");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- /* show the card that we are not polling anymore */
- if (!q->is_input_q)
- return 1;
-
- tmp = gsf = GET_SAVED_FRONTIER(q);
- tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
- set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
-
- /*
- * we don't issue this SYNC_MEMORY, as we trust Rick T and
- * moreover will not use the PROCESSING state under VM, so
- * q->polling was 0 anyway
- */
- /*SYNC_MEMORY;*/
- if (irq->is_qebsm) {
- count = 1;
- qdio_do_eqbs(q, &state, &gsf, &count);
- } else
- state = q->slsb.acc.val[gsf];
- if (state != SLSB_P_INPUT_PRIMED)
- return 1;
- /*
- * set our summary bit again, as otherwise there is a
- * small window we can miss between resetting it and
- * checking for PRIMED state
- */
- if (q->is_thinint_q)
- tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
- return 0;
-
-#else /* QDIO_USE_PROCESSING_STATE */
- return 1;
-#endif /* QDIO_USE_PROCESSING_STATE */
-}
-
-/*
- * see the comment in do_QDIO and before qdio_reserve_q about the
- * sophisticated locking outside of unmark_q, so that we don't need to
- * disable the interrupts :-)
-*/
-static void
-qdio_unmark_q(struct qdio_q *q)
-{
- unsigned long flags;
-
- QDIO_DBF_TEXT4(0,trace,"unmark q");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- if ((!q->list_prev)||(!q->list_next))
- return;
-
- if ((q->is_thinint_q)&&(q->is_input_q)) {
- /* iQDIO */
- spin_lock_irqsave(&ttiq_list_lock,flags);
- /* in case cleanup has done this already and simultanously
- * qdio_unmark_q is called from the interrupt handler, we've
- * got to check this in this specific case again */
- if ((!q->list_prev)||(!q->list_next))
- goto out;
- if (q->list_next==q) {
- /* q was the only interesting q */
- tiq_list=NULL;
- q->list_next=NULL;
- q->list_prev=NULL;
- } else {
- q->list_next->list_prev=q->list_prev;
- q->list_prev->list_next=q->list_next;
- tiq_list=q->list_next;
- q->list_next=NULL;
- q->list_prev=NULL;
- }
-out:
- spin_unlock_irqrestore(&ttiq_list_lock,flags);
- }
-}
-
-static inline unsigned long
-tiqdio_clear_global_summary(void)
-{
- unsigned long time;
-
- QDIO_DBF_TEXT5(0,trace,"clrglobl");
-
- time = do_clear_global_summary();
-
- QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long));
-
- return time;
-}
-
-
-/************************* OUTBOUND ROUTINES *******************************/
-static int
-qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
-{
- struct qdio_irq *irq;
- unsigned char state;
- unsigned int cnt, count, ftc;
-
- irq = (struct qdio_irq *) q->irq_ptr;
- if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
- SYNC_MEMORY;
-
- ftc = q->first_to_check;
- count = qdio_min(atomic_read(&q->number_of_buffers_used),
- (QDIO_MAX_BUFFERS_PER_Q-1));
- if (count == 0)
- return q->first_to_check;
- cnt = qdio_do_eqbs(q, &state, &ftc, &count);
- if (cnt == 0)
- return q->first_to_check;
- switch (state) {
- case SLSB_P_OUTPUT_ERROR:
- QDIO_DBF_TEXT3(0,trace,"outperr");
- atomic_sub(cnt , &q->number_of_buffers_used);
- if (q->qdio_error)
- q->error_status_flags |=
- QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
- q->qdio_error = SLSB_P_OUTPUT_ERROR;
- q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
- q->first_to_check = ftc;
- break;
- case SLSB_P_OUTPUT_EMPTY:
- QDIO_DBF_TEXT5(0,trace,"outpempt");
- atomic_sub(cnt, &q->number_of_buffers_used);
- q->first_to_check = ftc;
- break;
- case SLSB_CU_OUTPUT_PRIMED:
- /* all buffers primed */
- QDIO_DBF_TEXT5(0,trace,"outpprim");
- break;
- default:
- break;
- }
- QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
- return q->first_to_check;
-}
-
-static int
-qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
-{
- struct qdio_irq *irq;
- unsigned char state;
- int tmp, ftc, count, cnt;
- char dbf_text[15];
-
-
- irq = (struct qdio_irq *) q->irq_ptr;
- ftc = q->first_to_check;
- count = qdio_min(atomic_read(&q->number_of_buffers_used),
- (QDIO_MAX_BUFFERS_PER_Q-1));
- if (count == 0)
- return q->first_to_check;
- cnt = qdio_do_eqbs(q, &state, &ftc, &count);
- if (cnt == 0)
- return q->first_to_check;
- switch (state) {
- case SLSB_P_INPUT_ERROR :
-#ifdef CONFIG_QDIO_DEBUG
- QDIO_DBF_TEXT3(1,trace,"inperr");
- sprintf(dbf_text,"%2x,%2x",ftc,count);
- QDIO_DBF_TEXT3(1,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
- if (q->qdio_error)
- q->error_status_flags |=
- QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
- q->qdio_error = SLSB_P_INPUT_ERROR;
- q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
- atomic_sub(cnt, &q->number_of_buffers_used);
- q->first_to_check = ftc;
- break;
- case SLSB_P_INPUT_PRIMED :
- QDIO_DBF_TEXT3(0,trace,"inptprim");
- sprintf(dbf_text,"%2x,%2x",ftc,count);
- QDIO_DBF_TEXT3(1,trace,dbf_text);
- tmp = 0;
- ftc = q->first_to_check;
-#ifdef QDIO_USE_PROCESSING_STATE
- if (cnt > 1) {
- cnt -= 1;
- tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
- if (!tmp)
- break;
- }
- cnt = 1;
- tmp += set_slsb(q, &ftc,
- SLSB_P_INPUT_PROCESSING, &cnt);
- atomic_set(&q->polling, 1);
-#else
- tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
-#endif
- atomic_sub(tmp, &q->number_of_buffers_used);
- q->first_to_check = ftc;
- break;
- case SLSB_CU_INPUT_EMPTY:
- case SLSB_P_INPUT_NOT_INIT:
- case SLSB_P_INPUT_PROCESSING:
- QDIO_DBF_TEXT5(0,trace,"inpnipro");
- break;
- default:
- break;
- }
- QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
- return q->first_to_check;
-}
-
-static int
-qdio_get_outbound_buffer_frontier(struct qdio_q *q)
-{
- struct qdio_irq *irq;
- volatile char *slsb;
- unsigned int count = 1;
- int first_not_to_check, f, f_mod_no;
- char dbf_text[15];
-
- QDIO_DBF_TEXT4(0,trace,"getobfro");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- irq = (struct qdio_irq *) q->irq_ptr;
- if (irq->is_qebsm)
- return qdio_qebsm_get_outbound_buffer_frontier(q);
-
- slsb=&q->slsb.acc.val[0];
- f_mod_no=f=q->first_to_check;
- /*
- * f points to already processed elements, so f+no_used is correct...
- * ... but: we don't check 128 buffers, as otherwise
- * qdio_has_outbound_q_moved would return 0
- */
- first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
- (QDIO_MAX_BUFFERS_PER_Q-1));
-
- if (((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) ||
- (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH))
- SYNC_MEMORY;
-
-check_next:
- if (f==first_not_to_check)
- goto out;
-
- switch(slsb[f_mod_no]) {
-
- /* the adapter has not fetched the output yet */
- case SLSB_CU_OUTPUT_PRIMED:
- QDIO_DBF_TEXT5(0,trace,"outpprim");
- break;
-
- /* the adapter got it */
- case SLSB_P_OUTPUT_EMPTY:
- atomic_dec(&q->number_of_buffers_used);
- f++;
- f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
- QDIO_DBF_TEXT5(0,trace,"outpempt");
- goto check_next;
-
- case SLSB_P_OUTPUT_ERROR:
- QDIO_DBF_TEXT3(0,trace,"outperr");
- sprintf(dbf_text,"%x-%x-%x",f_mod_no,
- q->sbal[f_mod_no]->element[14].sbalf.value,
- q->sbal[f_mod_no]->element[15].sbalf.value);
- QDIO_DBF_TEXT3(1,trace,dbf_text);
- QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
-
- /* kind of process the buffer */
- set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
-
- /*
- * we increment the frontier, as this buffer
- * was processed obviously
- */
- atomic_dec(&q->number_of_buffers_used);
- f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
-
- if (q->qdio_error)
- q->error_status_flags|=
- QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
- q->qdio_error=SLSB_P_OUTPUT_ERROR;
- q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
-
- break;
-
- /* no new buffers */
- default:
- QDIO_DBF_TEXT5(0,trace,"outpni");
- }
-out:
- return (q->first_to_check=f_mod_no);
-}
-
-/* all buffers are processed */
-static int
-qdio_is_outbound_q_done(struct qdio_q *q)
-{
- int no_used;
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15];
-#endif
-
- no_used=atomic_read(&q->number_of_buffers_used);
-
-#ifdef CONFIG_QDIO_DEBUG
- if (no_used) {
- sprintf(dbf_text,"oqisnt%02x",no_used);
- QDIO_DBF_TEXT4(0,trace,dbf_text);
- } else {
- QDIO_DBF_TEXT4(0,trace,"oqisdone");
- }
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-#endif /* CONFIG_QDIO_DEBUG */
- return (no_used==0);
-}
-
-static int
-qdio_has_outbound_q_moved(struct qdio_q *q)
-{
- int i;
-
- i=qdio_get_outbound_buffer_frontier(q);
-
- if ( (i!=GET_SAVED_FRONTIER(q)) ||
- (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
- SAVE_FRONTIER(q,i);
- QDIO_DBF_TEXT4(0,trace,"oqhasmvd");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
- return 1;
- } else {
- QDIO_DBF_TEXT4(0,trace,"oqhsntmv");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
- return 0;
- }
-}
-
-static void
-qdio_kick_outbound_q(struct qdio_q *q)
-{
- int result;
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15];
-
- QDIO_DBF_TEXT4(0,trace,"kickoutq");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-#endif /* CONFIG_QDIO_DEBUG */
-
- if (!q->siga_out)
- return;
-
- /* here's the story with cc=2 and busy bit set (thanks, Rick):
- * VM's CP could present us cc=2 and busy bit set on SIGA-write
- * during reconfiguration of their Guest LAN (only in HIPERS mode,
- * QDIO mode is asynchronous -- cc=2 and busy bit there will take
- * the queues down immediately; and not being under VM we have a
- * problem on cc=2 and busy bit set right away).
- *
- * Therefore qdio_siga_output will try for a short time constantly,
- * if such a condition occurs. If it doesn't change, it will
- * increase the busy_siga_counter and save the timestamp, and
- * schedule the queue for later processing (via mark_q, using the
- * queue tasklet). __qdio_outbound_processing will check out the
- * counter. If non-zero, it will call qdio_kick_outbound_q as often
- * as the value of the counter. This will attempt further SIGA
- * instructions. For each successful SIGA, the counter is
- * decreased, for failing SIGAs the counter remains the same, after
- * all.
- * After some time of no movement, qdio_kick_outbound_q will
- * finally fail and reflect corresponding error codes to call
- * the upper layer module and have it take the queues down.
- *
- * Note that this is a change from the original HiperSockets design
- * (saying cc=2 and busy bit means take the queues down), but in
- * these days Guest LAN didn't exist... excessive cc=2 with busy bit
- * conditions will still take the queues down, but the threshold is
- * higher due to the Guest LAN environment.
- */
-
-
- result=qdio_siga_output(q);
-
- switch (result) {
- case 0:
- /* went smooth this time, reset timestamp */
-#ifdef CONFIG_QDIO_DEBUG
- QDIO_DBF_TEXT3(0,trace,"cc2reslv");
- sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
- atomic_read(&q->busy_siga_counter));
- QDIO_DBF_TEXT3(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
- q->timing.busy_start=0;
- break;
- case (2|QDIO_SIGA_ERROR_B_BIT_SET):
- /* cc=2 and busy bit: */
- atomic_inc(&q->busy_siga_counter);
-
- /* if the last siga was successful, save
- * timestamp here */
- if (!q->timing.busy_start)
- q->timing.busy_start=NOW;
-
- /* if we're in time, don't touch error_status_flags
- * and siga_error */
- if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
- qdio_mark_q(q);
- break;
- }
- QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
-#ifdef CONFIG_QDIO_DEBUG
- sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
- atomic_read(&q->busy_siga_counter));
- QDIO_DBF_TEXT3(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
- /* else fallthrough and report error */
- default:
- /* for plain cc=1, 2 or 3: */
- if (q->siga_error)
- q->error_status_flags|=
- QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
- q->error_status_flags|=
- QDIO_STATUS_LOOK_FOR_ERROR;
- q->siga_error=result;
- }
-}
-
-static void
-qdio_kick_outbound_handler(struct qdio_q *q)
-{
- int start, end, real_end, count;
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15];
-#endif
-
- start = q->first_element_to_kick;
- /* last_move_ftc was just updated */
- real_end = GET_SAVED_FRONTIER(q);
- end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)&
- (QDIO_MAX_BUFFERS_PER_Q-1);
- count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
- (QDIO_MAX_BUFFERS_PER_Q-1);
-
-#ifdef CONFIG_QDIO_DEBUG
- QDIO_DBF_TEXT4(0,trace,"kickouth");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- sprintf(dbf_text,"s=%2xc=%2x",start,count);
- QDIO_DBF_TEXT4(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
-
- if (q->state==QDIO_IRQ_STATE_ACTIVE)
- q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
- q->error_status_flags,
- q->qdio_error,q->siga_error,q->q_no,start,count,
- q->int_parm);
-
- /* for the next time: */
- q->first_element_to_kick=real_end;
- q->qdio_error=0;
- q->siga_error=0;
- q->error_status_flags=0;
-}
-
-static void
-__qdio_outbound_processing(struct qdio_q *q)
-{
- int siga_attempts;
-
- QDIO_DBF_TEXT4(0,trace,"qoutproc");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- if (unlikely(qdio_reserve_q(q))) {
- qdio_release_q(q);
- qdio_perf_stat_inc(&perf_stats.outbound_tl_runs_resched);
- /* as we're sissies, we'll check next time */
- if (likely(!atomic_read(&q->is_in_shutdown))) {
- qdio_mark_q(q);
- QDIO_DBF_TEXT4(0,trace,"busy,agn");
- }
- return;
- }
- qdio_perf_stat_inc(&perf_stats.outbound_tl_runs);
- qdio_perf_stat_inc(&perf_stats.tl_runs);
-
- /* see comment in qdio_kick_outbound_q */
- siga_attempts=atomic_read(&q->busy_siga_counter);
- while (siga_attempts) {
- atomic_dec(&q->busy_siga_counter);
- qdio_kick_outbound_q(q);
- siga_attempts--;
- }
-
- if (qdio_has_outbound_q_moved(q))
- qdio_kick_outbound_handler(q);
-
- if (q->queue_type == QDIO_ZFCP_QFMT) {
- if ((!q->hydra_gives_outbound_pcis) &&
- (!qdio_is_outbound_q_done(q)))
- qdio_mark_q(q);
- }
- else if (((!q->is_iqdio_q) && (!q->is_pci_out)) ||
- (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) {
- /*
- * make sure buffer switch from PRIMED to EMPTY is noticed
- * and outbound_handler is called
- */
- if (qdio_is_outbound_q_done(q)) {
- del_timer(&q->timer);
- } else {
- if (!timer_pending(&q->timer))
- mod_timer(&q->timer, jiffies +
- QDIO_FORCE_CHECK_TIMEOUT);
- }
- }
-
- qdio_release_q(q);
-}
-
-static void
-qdio_outbound_processing(unsigned long q)
-{
- __qdio_outbound_processing((struct qdio_q *) q);
-}
-
-/************************* INBOUND ROUTINES *******************************/
-
-
-static int
-qdio_get_inbound_buffer_frontier(struct qdio_q *q)
-{
- struct qdio_irq *irq;
- int f,f_mod_no;
- volatile char *slsb;
- unsigned int count = 1;
- int first_not_to_check;
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15];
-#endif /* CONFIG_QDIO_DEBUG */
-#ifdef QDIO_USE_PROCESSING_STATE
- int last_position=-1;
-#endif /* QDIO_USE_PROCESSING_STATE */
-
- QDIO_DBF_TEXT4(0,trace,"getibfro");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- irq = (struct qdio_irq *) q->irq_ptr;
- if (irq->is_qebsm)
- return qdio_qebsm_get_inbound_buffer_frontier(q);
-
- slsb=&q->slsb.acc.val[0];
- f_mod_no=f=q->first_to_check;
- /*
- * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved
- * would return 0
- */
- first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
- (QDIO_MAX_BUFFERS_PER_Q-1));
-
- /*
- * we don't use this one, as a PCI or we after a thin interrupt
- * will sync the queues
- */
- /* SYNC_MEMORY;*/
-
-check_next:
- f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
- if (f==first_not_to_check)
- goto out;
- switch (slsb[f_mod_no]) {
-
- /* CU_EMPTY means frontier is reached */
- case SLSB_CU_INPUT_EMPTY:
- QDIO_DBF_TEXT5(0,trace,"inptempt");
- break;
-
- /* P_PRIMED means set slsb to P_PROCESSING and move on */
- case SLSB_P_INPUT_PRIMED:
- QDIO_DBF_TEXT5(0,trace,"inptprim");
-
-#ifdef QDIO_USE_PROCESSING_STATE
- /*
- * as soon as running under VM, polling the input queues will
- * kill VM in terms of CP overhead
- */
- if (q->siga_sync) {
- set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
- } else {
- /* set the previous buffer to NOT_INIT. The current
- * buffer will be set to PROCESSING at the end of
- * this function to avoid further interrupts. */
- if (last_position>=0)
- set_slsb(q, &last_position,
- SLSB_P_INPUT_NOT_INIT, &count);
- atomic_set(&q->polling,1);
- last_position=f_mod_no;
- }
-#else /* QDIO_USE_PROCESSING_STATE */
- set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
-#endif /* QDIO_USE_PROCESSING_STATE */
- /*
- * not needed, as the inbound queue will be synced on the next
- * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s
- */
- /*SYNC_MEMORY;*/
- f++;
- atomic_dec(&q->number_of_buffers_used);
- goto check_next;
-
- case SLSB_P_INPUT_NOT_INIT:
- case SLSB_P_INPUT_PROCESSING:
- QDIO_DBF_TEXT5(0,trace,"inpnipro");
- break;
-
- /* P_ERROR means frontier is reached, break and report error */
- case SLSB_P_INPUT_ERROR:
-#ifdef CONFIG_QDIO_DEBUG
- sprintf(dbf_text,"inperr%2x",f_mod_no);
- QDIO_DBF_TEXT3(1,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
- QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
-
- /* kind of process the buffer */
- set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
-
- if (q->qdio_error)
- q->error_status_flags|=
- QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
- q->qdio_error=SLSB_P_INPUT_ERROR;
- q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
-
- /* we increment the frontier, as this buffer
- * was processed obviously */
- f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
- atomic_dec(&q->number_of_buffers_used);
-
-#ifdef QDIO_USE_PROCESSING_STATE
- last_position=-1;
-#endif /* QDIO_USE_PROCESSING_STATE */
-
- break;
-
- /* everything else means frontier not changed (HALTED or so) */
- default:
- break;
- }
-out:
- q->first_to_check=f_mod_no;
-
-#ifdef QDIO_USE_PROCESSING_STATE
- if (last_position>=0)
- set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count);
-#endif /* QDIO_USE_PROCESSING_STATE */
-
- QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
-
- return q->first_to_check;
-}
-
-static int
-qdio_has_inbound_q_moved(struct qdio_q *q)
-{
- int i;
-
- i=qdio_get_inbound_buffer_frontier(q);
- if ( (i!=GET_SAVED_FRONTIER(q)) ||
- (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
- SAVE_FRONTIER(q,i);
- if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis))
- SAVE_TIMESTAMP(q);
-
- QDIO_DBF_TEXT4(0,trace,"inhasmvd");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
- return 1;
- } else {
- QDIO_DBF_TEXT4(0,trace,"inhsntmv");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
- return 0;
- }
-}
-
-/* means, no more buffers to be filled */
-static int
-tiqdio_is_inbound_q_done(struct qdio_q *q)
-{
- int no_used;
- unsigned int start_buf, count;
- unsigned char state = 0;
- struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
-
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15];
-#endif
-
- no_used=atomic_read(&q->number_of_buffers_used);
-
- /* propagate the change from 82 to 80 through VM */
- SYNC_MEMORY;
-
-#ifdef CONFIG_QDIO_DEBUG
- if (no_used) {
- sprintf(dbf_text,"iqisnt%02x",no_used);
- QDIO_DBF_TEXT4(0,trace,dbf_text);
- } else {
- QDIO_DBF_TEXT4(0,trace,"iniqisdo");
- }
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-#endif /* CONFIG_QDIO_DEBUG */
-
- if (!no_used)
- return 1;
- if (irq->is_qebsm) {
- count = 1;
- start_buf = q->first_to_check;
- qdio_do_eqbs(q, &state, &start_buf, &count);
- } else
- state = q->slsb.acc.val[q->first_to_check];
- if (state != SLSB_P_INPUT_PRIMED)
- /*
- * nothing more to do, if next buffer is not PRIMED.
- * note that we did a SYNC_MEMORY before, that there
- * has been a sychnronization.
- * we will return 0 below, as there is nothing to do
- * (stop_polling not necessary, as we have not been
- * using the PROCESSING state
- */
- return 0;
-
- /*
- * ok, the next input buffer is primed. that means, that device state
- * change indicator and adapter local summary are set, so we will find
- * it next time.
- * we will return 0 below, as there is nothing to do, except scheduling
- * ourselves for the next time.
- */
- tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
- tiqdio_sched_tl();
- return 0;
-}
-
-static int
-qdio_is_inbound_q_done(struct qdio_q *q)
-{
- int no_used;
- unsigned int start_buf, count;
- unsigned char state = 0;
- struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
-
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15];
-#endif
-
- no_used=atomic_read(&q->number_of_buffers_used);
-
- /*
- * we need that one for synchronization with the adapter, as it
- * does a kind of PCI avoidance
- */
- SYNC_MEMORY;
-
- if (!no_used) {
- QDIO_DBF_TEXT4(0,trace,"inqisdnA");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
- return 1;
- }
- if (irq->is_qebsm) {
- count = 1;
- start_buf = q->first_to_check;
- qdio_do_eqbs(q, &state, &start_buf, &count);
- } else
- state = q->slsb.acc.val[q->first_to_check];
- if (state == SLSB_P_INPUT_PRIMED) {
- /* we got something to do */
- QDIO_DBF_TEXT4(0,trace,"inqisntA");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
- return 0;
- }
-
- /* on VM, we don't poll, so the q is always done here */
- if (q->siga_sync)
- return 1;
- if (q->hydra_gives_outbound_pcis)
- return 1;
-
- /*
- * at this point we know, that inbound first_to_check
- * has (probably) not moved (see qdio_inbound_processing)
- */
- if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
-#ifdef CONFIG_QDIO_DEBUG
- QDIO_DBF_TEXT4(0,trace,"inqisdon");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
- sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
- QDIO_DBF_TEXT4(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
- return 1;
- } else {
-#ifdef CONFIG_QDIO_DEBUG
- QDIO_DBF_TEXT4(0,trace,"inqisntd");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
- sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
- QDIO_DBF_TEXT4(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
- return 0;
- }
-}
-
-static void
-qdio_kick_inbound_handler(struct qdio_q *q)
-{
- int count, start, end, real_end, i;
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15];
-#endif
-
- QDIO_DBF_TEXT4(0,trace,"kickinh");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- start=q->first_element_to_kick;
- real_end=q->first_to_check;
- end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1);
-
- i=start;
- count=0;
- while (1) {
- count++;
- if (i==end)
- break;
- i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
- }
-
-#ifdef CONFIG_QDIO_DEBUG
- sprintf(dbf_text,"s=%2xc=%2x",start,count);
- QDIO_DBF_TEXT4(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
-
- if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
- q->handler(q->cdev,
- QDIO_STATUS_INBOUND_INT|q->error_status_flags,
- q->qdio_error,q->siga_error,q->q_no,start,count,
- q->int_parm);
-
- /* for the next time: */
- q->first_element_to_kick=real_end;
- q->qdio_error=0;
- q->siga_error=0;
- q->error_status_flags=0;
-
- qdio_perf_stat_inc(&perf_stats.inbound_cnt);
-}
-
-static void
-__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
-{
- struct qdio_irq *irq_ptr;
- struct qdio_q *oq;
- int i;
-
- QDIO_DBF_TEXT4(0,trace,"iqinproc");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- /*
- * we first want to reserve the q, so that we know, that we don't
- * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might
- * be set
- */
- if (unlikely(qdio_reserve_q(q))) {
- qdio_release_q(q);
- qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
- /*
- * as we might just be about to stop polling, we make
- * sure that we check again at least once more
- */
- tiqdio_sched_tl();
- return;
- }
- qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs);
- if (unlikely(atomic_read(&q->is_in_shutdown))) {
- qdio_unmark_q(q);
- goto out;
- }
-
- /*
- * we reset spare_ind_was_set, when the queue does not use the
- * spare indicator
- */
- if (spare_ind_was_set)
- spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator);
-
- if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set)
- goto out;
- /*
- * q->dev_st_chg_ind is the indicator, be it shared or not.
- * only clear it, if indicator is non-shared
- */
- if (q->dev_st_chg_ind != &spare_indicator)
- tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
-
- if (q->hydra_gives_outbound_pcis) {
- if (!q->siga_sync_done_on_thinints) {
- SYNC_MEMORY_ALL;
- } else if (!q->siga_sync_done_on_outb_tis) {
- SYNC_MEMORY_ALL_OUTB;
- }
- } else {
- SYNC_MEMORY;
- }
- /*
- * maybe we have to do work on our outbound queues... at least
- * we have to check the outbound-int-capable thinint-capable
- * queues
- */
- if (q->hydra_gives_outbound_pcis) {
- irq_ptr = (struct qdio_irq*)q->irq_ptr;
- for (i=0;i<irq_ptr->no_output_qs;i++) {
- oq = irq_ptr->output_qs[i];
- if (!qdio_is_outbound_q_done(oq)) {
- qdio_perf_stat_dec(&perf_stats.tl_runs);
- __qdio_outbound_processing(oq);
- }
- }
- }
-
- if (!qdio_has_inbound_q_moved(q))
- goto out;
-
- qdio_kick_inbound_handler(q);
- if (tiqdio_is_inbound_q_done(q))
- if (!qdio_stop_polling(q)) {
- /*
- * we set the flags to get into the stuff next time,
- * see also comment in qdio_stop_polling
- */
- tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
- tiqdio_sched_tl();
- }
-out:
- qdio_release_q(q);
-}
-
-static void
-tiqdio_inbound_processing(unsigned long q)
-{
- __tiqdio_inbound_processing((struct qdio_q *) q,
- atomic_read(&spare_indicator_usecount));
-}
-
-static void
-__qdio_inbound_processing(struct qdio_q *q)
-{
- int q_laps=0;
-
- QDIO_DBF_TEXT4(0,trace,"qinproc");
- QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-
- if (unlikely(qdio_reserve_q(q))) {
- qdio_release_q(q);
- qdio_perf_stat_inc(&perf_stats.inbound_tl_runs_resched);
- /* as we're sissies, we'll check next time */
- if (likely(!atomic_read(&q->is_in_shutdown))) {
- qdio_mark_q(q);
- QDIO_DBF_TEXT4(0,trace,"busy,agn");
- }
- return;
- }
- qdio_perf_stat_inc(&perf_stats.inbound_tl_runs);
- qdio_perf_stat_inc(&perf_stats.tl_runs);
-
-again:
- if (qdio_has_inbound_q_moved(q)) {
- qdio_kick_inbound_handler(q);
- if (!qdio_stop_polling(q)) {
- q_laps++;
- if (q_laps<QDIO_Q_LAPS)
- goto again;
- }
- qdio_mark_q(q);
- } else {
- if (!qdio_is_inbound_q_done(q))
- /* means poll time is not yet over */
- qdio_mark_q(q);
- }
-
- qdio_release_q(q);
-}
-
-static void
-qdio_inbound_processing(unsigned long q)
-{
- __qdio_inbound_processing((struct qdio_q *) q);
-}
-
-/************************* MAIN ROUTINES *******************************/
-
-#ifdef QDIO_USE_PROCESSING_STATE
-static int
-tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
-{
- if (!q) {
- tiqdio_sched_tl();
- return 0;
- }
-
- /*
- * under VM, we have not used the PROCESSING state, so no
- * need to stop polling
- */
- if (q->siga_sync)
- return 2;
-
- if (unlikely(qdio_reserve_q(q))) {
- qdio_release_q(q);
- qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
- /*
- * as we might just be about to stop polling, we make
- * sure that we check again at least once more
- */
-
- /*
- * sanity -- we'd get here without setting the
- * dev st chg ind
- */
- tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
- tiqdio_sched_tl();
- return 0;
- }
- if (qdio_stop_polling(q)) {
- qdio_release_q(q);
- return 2;
- }
- if (q_laps<QDIO_Q_LAPS-1) {
- qdio_release_q(q);
- return 3;
- }
- /*
- * we set the flags to get into the stuff
- * next time, see also comment in qdio_stop_polling
- */
- tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
- tiqdio_sched_tl();
- qdio_release_q(q);
- return 1;
-
-}
-#endif /* QDIO_USE_PROCESSING_STATE */
-
-static void
-tiqdio_inbound_checks(void)
-{
- struct qdio_q *q;
- int spare_ind_was_set=0;
-#ifdef QDIO_USE_PROCESSING_STATE
- int q_laps=0;
-#endif /* QDIO_USE_PROCESSING_STATE */
-
- QDIO_DBF_TEXT4(0,trace,"iqdinbck");
- QDIO_DBF_TEXT5(0,trace,"iqlocsum");
-
-#ifdef QDIO_USE_PROCESSING_STATE
-again:
-#endif /* QDIO_USE_PROCESSING_STATE */
-
- /* when the spare indicator is used and set, save that and clear it */
- if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) {
- spare_ind_was_set = 1;
- tiqdio_clear_summary_bit((__u32*)&spare_indicator);
- }
-
- q=(struct qdio_q*)tiq_list;
- do {
- if (!q)
- break;
- __tiqdio_inbound_processing(q, spare_ind_was_set);
- q=(struct qdio_q*)q->list_next;
- } while (q!=(struct qdio_q*)tiq_list);
-
-#ifdef QDIO_USE_PROCESSING_STATE
- q=(struct qdio_q*)tiq_list;
- do {
- int ret;
-
- ret = tiqdio_reset_processing_state(q, q_laps);
- switch (ret) {
- case 0:
- return;
- case 1:
- q_laps++;
- case 2:
- q = (struct qdio_q*)q->list_next;
- break;
- default:
- q_laps++;
- goto again;
- }
- } while (q!=(struct qdio_q*)tiq_list);
-#endif /* QDIO_USE_PROCESSING_STATE */
-}
-
-static void
-tiqdio_tl(unsigned long data)
-{
- QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
-
- qdio_perf_stat_inc(&perf_stats.tl_runs);
-
- tiqdio_inbound_checks();
-}
-
-/********************* GENERAL HELPER_ROUTINES ***********************/
-
-static void
-qdio_release_irq_memory(struct qdio_irq *irq_ptr)
-{
- int i;
- struct qdio_q *q;
-
- for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
- q = irq_ptr->input_qs[i];
- if (q) {
- free_page((unsigned long) q->slib);
- kmem_cache_free(qdio_q_cache, q);
- }
- q = irq_ptr->output_qs[i];
- if (q) {
- free_page((unsigned long) q->slib);
- kmem_cache_free(qdio_q_cache, q);
- }
- }
- free_page((unsigned long) irq_ptr->qdr);
- free_page((unsigned long) irq_ptr);
-}
-
-static void
-qdio_set_impl_params(struct qdio_irq *irq_ptr,
- unsigned int qib_param_field_format,
- /* pointer to 128 bytes or NULL, if no param field */
- unsigned char *qib_param_field,
- /* pointer to no_queues*128 words of data or NULL */
- unsigned int no_input_qs,
- unsigned int no_output_qs,
- unsigned long *input_slib_elements,
- unsigned long *output_slib_elements)
-{
- int i,j;
-
- if (!irq_ptr)
- return;
-
- irq_ptr->qib.pfmt=qib_param_field_format;
- if (qib_param_field)
- memcpy(irq_ptr->qib.parm,qib_param_field,
- QDIO_MAX_BUFFERS_PER_Q);
-
- if (input_slib_elements)
- for (i=0;i<no_input_qs;i++) {
- for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
- irq_ptr->input_qs[i]->slib->slibe[j].parms=
- input_slib_elements[
- i*QDIO_MAX_BUFFERS_PER_Q+j];
- }
- if (output_slib_elements)
- for (i=0;i<no_output_qs;i++) {
- for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
- irq_ptr->output_qs[i]->slib->slibe[j].parms=
- output_slib_elements[
- i*QDIO_MAX_BUFFERS_PER_Q+j];
- }
-}
-
-static int
-qdio_alloc_qs(struct qdio_irq *irq_ptr,
- int no_input_qs, int no_output_qs)
-{
- int i;
- struct qdio_q *q;
-
- for (i = 0; i < no_input_qs; i++) {
- q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
- if (!q)
- return -ENOMEM;
- memset(q, 0, sizeof(*q));
-
- q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
- if (!q->slib) {
- kmem_cache_free(qdio_q_cache, q);
- return -ENOMEM;
- }
- irq_ptr->input_qs[i]=q;
- }
-
- for (i = 0; i < no_output_qs; i++) {
- q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
- if (!q)
- return -ENOMEM;
- memset(q, 0, sizeof(*q));
-
- q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
- if (!q->slib) {
- kmem_cache_free(qdio_q_cache, q);
- return -ENOMEM;
- }
- irq_ptr->output_qs[i]=q;
- }
- return 0;
-}
-
-static void
-qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
- int no_input_qs, int no_output_qs,
- qdio_handler_t *input_handler,
- qdio_handler_t *output_handler,
- unsigned long int_parm,int q_format,
- unsigned long flags,
- void **inbound_sbals_array,
- void **outbound_sbals_array)
-{
- struct qdio_q *q;
- int i,j;
- char dbf_text[20]; /* see qdio_initialize */
- void *ptr;
- int available;
-
- sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- for (i=0;i<no_input_qs;i++) {
- q=irq_ptr->input_qs[i];
-
- memset(q,0,((char*)&q->slib)-((char*)q));
- sprintf(dbf_text,"in-q%4x",i);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
-
- memset(q->slib,0,PAGE_SIZE);
- q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
-
- available=0;
-
- for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
- q->sbal[j]=*(inbound_sbals_array++);
-
- q->queue_type=q_format;
- q->int_parm=int_parm;
- q->schid = irq_ptr->schid;
- q->irq_ptr = irq_ptr;
- q->cdev = cdev;
- q->mask=1<<(31-i);
- q->q_no=i;
- q->is_input_q=1;
- q->first_to_check=0;
- q->last_move_ftc=0;
- q->handler=input_handler;
- q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
-
- /* q->is_thinint_q isn't valid at this time, but
- * irq_ptr->is_thinint_irq is
- */
- if (irq_ptr->is_thinint_irq)
- tasklet_init(&q->tasklet, tiqdio_inbound_processing,
- (unsigned long) q);
- else
- tasklet_init(&q->tasklet, qdio_inbound_processing,
- (unsigned long) q);
-
- /* actually this is not used for inbound queues. yet. */
- atomic_set(&q->busy_siga_counter,0);
- q->timing.busy_start=0;
-
-/* for (j=0;j<QDIO_STATS_NUMBER;j++)
- q->timing.last_transfer_times[j]=(qdio_get_micros()/
- QDIO_STATS_NUMBER)*j;
- q->timing.last_transfer_index=QDIO_STATS_NUMBER-1;
-*/
-
- /* fill in slib */
- if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba=
- (unsigned long)(q->slib);
- q->slib->sla=(unsigned long)(q->sl);
- q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
-
- /* fill in sl */
- for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
- q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
-
- QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
- ptr=(void*)q->sl;
- QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
- ptr=(void*)&q->slsb;
- QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
- ptr=(void*)q->sbal[0];
- QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
-
- /* fill in slsb */
- if (!irq_ptr->is_qebsm) {
- unsigned int count = 1;
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
- set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
- }
- }
-
- for (i=0;i<no_output_qs;i++) {
- q=irq_ptr->output_qs[i];
- memset(q,0,((char*)&q->slib)-((char*)q));
-
- sprintf(dbf_text,"outq%4x",i);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
-
- memset(q->slib,0,PAGE_SIZE);
- q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
-
- available=0;
-
- for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
- q->sbal[j]=*(outbound_sbals_array++);
-
- q->queue_type=q_format;
- if ((q->queue_type == QDIO_IQDIO_QFMT) &&
- (no_output_qs > 1) &&
- (i == no_output_qs-1))
- q->queue_type = QDIO_IQDIO_QFMT_ASYNCH;
- q->int_parm=int_parm;
- q->is_input_q=0;
- q->is_pci_out = 0;
- q->schid = irq_ptr->schid;
- q->cdev = cdev;
- q->irq_ptr = irq_ptr;
- q->mask=1<<(31-i);
- q->q_no=i;
- q->first_to_check=0;
- q->last_move_ftc=0;
- q->handler=output_handler;
-
- tasklet_init(&q->tasklet, qdio_outbound_processing,
- (unsigned long) q);
- setup_timer(&q->timer, qdio_outbound_processing,
- (unsigned long) q);
-
- atomic_set(&q->busy_siga_counter,0);
- q->timing.busy_start=0;
-
- /* fill in slib */
- if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba=
- (unsigned long)(q->slib);
- q->slib->sla=(unsigned long)(q->sl);
- q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
-
- /* fill in sl */
- for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
- q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
-
- QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
- ptr=(void*)q->sl;
- QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
- ptr=(void*)&q->slsb;
- QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
- ptr=(void*)q->sbal[0];
- QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
-
- /* fill in slsb */
- if (!irq_ptr->is_qebsm) {
- unsigned int count = 1;
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
- set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
- }
- }
-}
-
-static void
-qdio_fill_thresholds(struct qdio_irq *irq_ptr,
- unsigned int no_input_qs,
- unsigned int no_output_qs,
- unsigned int min_input_threshold,
- unsigned int max_input_threshold,
- unsigned int min_output_threshold,
- unsigned int max_output_threshold)
-{
- int i;
- struct qdio_q *q;
-
- for (i=0;i<no_input_qs;i++) {
- q=irq_ptr->input_qs[i];
- q->timing.threshold=max_input_threshold;
-/* for (j=0;j<QDIO_STATS_CLASSES;j++) {
- q->threshold_classes[j].threshold=
- min_input_threshold+
- (max_input_threshold-min_input_threshold)/
- QDIO_STATS_CLASSES;
- }
- qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
- }
- for (i=0;i<no_output_qs;i++) {
- q=irq_ptr->output_qs[i];
- q->timing.threshold=max_output_threshold;
-/* for (j=0;j<QDIO_STATS_CLASSES;j++) {
- q->threshold_classes[j].threshold=
- min_output_threshold+
- (max_output_threshold-min_output_threshold)/
- QDIO_STATS_CLASSES;
- }
- qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
- }
-}
-
-static void tiqdio_thinint_handler(void *ind, void *drv_data)
-{
- QDIO_DBF_TEXT4(0,trace,"thin_int");
-
- qdio_perf_stat_inc(&perf_stats.thinints);
-
- /* SVS only when needed:
- * issue SVS to benefit from iqdio interrupt avoidance
- * (SVS clears AISOI)*/
- if (!omit_svs)
- tiqdio_clear_global_summary();
-
- tiqdio_inbound_checks();
-}
-
-static void
-qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
-{
- int i;
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15];
-
- QDIO_DBF_TEXT5(0,trace,"newstate");
- sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
- QDIO_DBF_TEXT5(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
-
- irq_ptr->state=state;
- for (i=0;i<irq_ptr->no_input_qs;i++)
- irq_ptr->input_qs[i]->state=state;
- for (i=0;i<irq_ptr->no_output_qs;i++)
- irq_ptr->output_qs[i]->state=state;
- mb();
-}
-
-static void
-qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
-{
- char dbf_text[15];
-
- if (irb->esw.esw0.erw.cons) {
- sprintf(dbf_text,"sens%4x",schid.sch_no);
- QDIO_DBF_TEXT2(1,trace,dbf_text);
- QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
-
- QDIO_PRINT_WARN("sense data available on qdio channel.\n");
- QDIO_HEXDUMP16(WARN,"irb: ",irb);
- QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw);
- }
-
-}
-
-static void
-qdio_handle_pci(struct qdio_irq *irq_ptr)
-{
- int i;
- struct qdio_q *q;
-
- qdio_perf_stat_inc(&perf_stats.pcis);
- for (i=0;i<irq_ptr->no_input_qs;i++) {
- q=irq_ptr->input_qs[i];
- if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
- qdio_mark_q(q);
- else {
- qdio_perf_stat_dec(&perf_stats.tl_runs);
- __qdio_inbound_processing(q);
- }
- }
- if (!irq_ptr->hydra_gives_outbound_pcis)
- return;
- for (i=0;i<irq_ptr->no_output_qs;i++) {
- q=irq_ptr->output_qs[i];
- if (qdio_is_outbound_q_done(q))
- continue;
- qdio_perf_stat_dec(&perf_stats.tl_runs);
- if (!irq_ptr->sync_done_on_outb_pcis)
- SYNC_MEMORY;
- __qdio_outbound_processing(q);
- }
-}
-
-static void qdio_establish_handle_irq(struct ccw_device*, int, int);
-
-static void
-qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
- int cstat, int dstat)
-{
- struct qdio_irq *irq_ptr;
- struct qdio_q *q;
- char dbf_text[15];
-
- irq_ptr = cdev->private->qdio_data;
-
- QDIO_DBF_TEXT2(1, trace, "ick2");
- sprintf(dbf_text,"%s", cdev->dev.bus_id);
- QDIO_DBF_TEXT2(1,trace,dbf_text);
- QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int));
- QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
- QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
- QDIO_PRINT_ERR("received check condition on activate " \
- "queues on device %s (cs=x%x, ds=x%x).\n",
- cdev->dev.bus_id, cstat, dstat);
- if (irq_ptr->no_input_qs) {
- q=irq_ptr->input_qs[0];
- } else if (irq_ptr->no_output_qs) {
- q=irq_ptr->output_qs[0];
- } else {
- QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n",
- cdev->dev.bus_id);
- goto omit_handler_call;
- }
- q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
- QDIO_STATUS_LOOK_FOR_ERROR,
- 0,0,0,-1,-1,q->int_parm);
-omit_handler_call:
- qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED);
-
-}
-
-static void
-qdio_call_shutdown(struct work_struct *work)
-{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
- put_device(&cdev->dev);
-}
-
-static void
-qdio_timeout_handler(struct ccw_device *cdev)
-{
- struct qdio_irq *irq_ptr;
- char dbf_text[15];
-
- QDIO_DBF_TEXT2(0, trace, "qtoh");
- sprintf(dbf_text, "%s", cdev->dev.bus_id);
- QDIO_DBF_TEXT2(0, trace, dbf_text);
-
- irq_ptr = cdev->private->qdio_data;
- sprintf(dbf_text, "state:%d", irq_ptr->state);
- QDIO_DBF_TEXT2(0, trace, dbf_text);
-
- switch (irq_ptr->state) {
- case QDIO_IRQ_STATE_INACTIVE:
- QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
- irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT2(1,setup,"eq:timeo");
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
- break;
- case QDIO_IRQ_STATE_CLEANUP:
- QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
- "irq=0.%x.%x.\n",
- irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
- break;
- case QDIO_IRQ_STATE_ESTABLISHED:
- case QDIO_IRQ_STATE_ACTIVE:
- /* I/O has been terminated by common I/O layer. */
- QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
- irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT2(1, trace, "cio:term");
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
- if (get_device(&cdev->dev)) {
- /* Can't call shutdown from interrupt context. */
- PREPARE_WORK(&cdev->private->kick_work,
- qdio_call_shutdown);
- queue_work(ccw_device_work, &cdev->private->kick_work);
- }
- break;
- default:
- BUG();
- }
- wake_up(&cdev->private->wait_q);
-}
-
-static void
-qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
-{
- struct qdio_irq *irq_ptr;
- int cstat,dstat;
- char dbf_text[15];
-
-#ifdef CONFIG_QDIO_DEBUG
- QDIO_DBF_TEXT4(0, trace, "qint");
- sprintf(dbf_text, "%s", cdev->dev.bus_id);
- QDIO_DBF_TEXT4(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
-
- if (!intparm) {
- QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
- "handler, device %s\n", cdev->dev.bus_id);
- return;
- }
-
- irq_ptr = cdev->private->qdio_data;
- if (!irq_ptr) {
- QDIO_DBF_TEXT2(1, trace, "uint");
- sprintf(dbf_text,"%s", cdev->dev.bus_id);
- QDIO_DBF_TEXT2(1,trace,dbf_text);
- QDIO_PRINT_ERR("received interrupt on unused device %s!\n",
- cdev->dev.bus_id);
- return;
- }
-
- if (IS_ERR(irb)) {
- /* Currently running i/o is in error. */
- switch (PTR_ERR(irb)) {
- case -EIO:
- QDIO_PRINT_ERR("i/o error on device %s\n",
- cdev->dev.bus_id);
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
- wake_up(&cdev->private->wait_q);
- return;
- case -ETIMEDOUT:
- qdio_timeout_handler(cdev);
- return;
- default:
- QDIO_PRINT_ERR("unknown error state %ld on device %s\n",
- PTR_ERR(irb), cdev->dev.bus_id);
- return;
- }
- }
-
- qdio_irq_check_sense(irq_ptr->schid, irb);
-
-#ifdef CONFIG_QDIO_DEBUG
- sprintf(dbf_text, "state:%d", irq_ptr->state);
- QDIO_DBF_TEXT4(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
-
- cstat = irb->scsw.cmd.cstat;
- dstat = irb->scsw.cmd.dstat;
-
- switch (irq_ptr->state) {
- case QDIO_IRQ_STATE_INACTIVE:
- qdio_establish_handle_irq(cdev, cstat, dstat);
- break;
-
- case QDIO_IRQ_STATE_CLEANUP:
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
- break;
-
- case QDIO_IRQ_STATE_ESTABLISHED:
- case QDIO_IRQ_STATE_ACTIVE:
- if (cstat & SCHN_STAT_PCI) {
- qdio_handle_pci(irq_ptr);
- break;
- }
-
- if ((cstat&~SCHN_STAT_PCI)||dstat) {
- qdio_handle_activate_check(cdev, intparm, cstat, dstat);
- break;
- }
- default:
- QDIO_PRINT_ERR("got interrupt for queues in state %d on " \
- "device %s?!\n",
- irq_ptr->state, cdev->dev.bus_id);
- }
- wake_up(&cdev->private->wait_q);
-
-}
-
-int
-qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
- unsigned int queue_number)
-{
- int cc = 0;
- struct qdio_q *q;
- struct qdio_irq *irq_ptr;
- void *ptr;
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15]="SyncXXXX";
-#endif
-
- irq_ptr = cdev->private->qdio_data;
- if (!irq_ptr)
- return -ENODEV;
-
-#ifdef CONFIG_QDIO_DEBUG
- *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
- QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
- *((int*)(&dbf_text[0]))=flags;
- *((int*)(&dbf_text[4]))=queue_number;
- QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
-#endif /* CONFIG_QDIO_DEBUG */
-
- if (flags&QDIO_FLAG_SYNC_INPUT) {
- q=irq_ptr->input_qs[queue_number];
- if (!q)
- return -EINVAL;
- if (!(irq_ptr->is_qebsm))
- cc = do_siga_sync(q->schid, 0, q->mask);
- } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
- q=irq_ptr->output_qs[queue_number];
- if (!q)
- return -EINVAL;
- if (!(irq_ptr->is_qebsm))
- cc = do_siga_sync(q->schid, q->mask, 0);
- } else
- return -EINVAL;
-
- ptr=&cc;
- if (cc)
- QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int));
-
- return cc;
-}
-
-static int
-qdio_get_ssqd_information(struct subchannel_id *schid,
- struct qdio_chsc_ssqd **ssqd_area)
-{
- int result;
-
- QDIO_DBF_TEXT0(0, setup, "getssqd");
- *ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
- if (!ssqd_area) {
- QDIO_PRINT_WARN("Could not get memory for chsc on sch x%x.\n",
- schid->sch_no);
- return -ENOMEM;
- }
-
- (*ssqd_area)->request = (struct chsc_header) {
- .length = 0x0010,
- .code = 0x0024,
- };
- (*ssqd_area)->first_sch = schid->sch_no;
- (*ssqd_area)->last_sch = schid->sch_no;
- (*ssqd_area)->ssid = schid->ssid;
- result = chsc(*ssqd_area);
-
- if (result) {
- QDIO_PRINT_WARN("CHSC returned cc %i on sch 0.%x.%x.\n",
- result, schid->ssid, schid->sch_no);
- goto out;
- }
-
- if ((*ssqd_area)->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
- QDIO_PRINT_WARN("CHSC response is 0x%x on sch 0.%x.%x.\n",
- (*ssqd_area)->response.code,
- schid->ssid, schid->sch_no);
- goto out;
- }
- if (!((*ssqd_area)->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
- !((*ssqd_area)->flags & CHSC_FLAG_VALIDITY) ||
- ((*ssqd_area)->sch != schid->sch_no)) {
- QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
- "using all SIGAs.\n",
- schid->ssid, schid->sch_no);
- goto out;
- }
- return 0;
-out:
- return -EINVAL;
-}
-
-int
-qdio_get_ssqd_pct(struct ccw_device *cdev)
-{
- struct qdio_chsc_ssqd *ssqd_area;
- struct subchannel_id schid;
- char dbf_text[15];
- int rc;
- int pct = 0;
-
- QDIO_DBF_TEXT0(0, setup, "getpct");
- schid = ccw_device_get_subchannel_id(cdev);
- rc = qdio_get_ssqd_information(&schid, &ssqd_area);
- if (!rc)
- pct = (int)ssqd_area->pct;
- if (rc != -ENOMEM)
- mempool_free(ssqd_area, qdio_mempool_scssc);
- sprintf(dbf_text, "pct: %d", pct);
- QDIO_DBF_TEXT2(0, setup, dbf_text);
- return pct;
-}
-EXPORT_SYMBOL(qdio_get_ssqd_pct);
-
-static void
-qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned long token)
-{
- struct qdio_q *q;
- int i;
- unsigned int count, start_buf;
- char dbf_text[15];
-
- /*check if QEBSM is disabled */
- if (!(irq_ptr->is_qebsm) || !(irq_ptr->qdioac & 0x01)) {
- irq_ptr->is_qebsm = 0;
- irq_ptr->sch_token = 0;
- irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
- QDIO_DBF_TEXT0(0,setup,"noV=V");
- return;
- }
- irq_ptr->sch_token = token;
- /*input queue*/
- for (i = 0; i < irq_ptr->no_input_qs;i++) {
- q = irq_ptr->input_qs[i];
- count = QDIO_MAX_BUFFERS_PER_Q;
- start_buf = 0;
- set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
- }
- sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- /*output queue*/
- for (i = 0; i < irq_ptr->no_output_qs; i++) {
- q = irq_ptr->output_qs[i];
- count = QDIO_MAX_BUFFERS_PER_Q;
- start_buf = 0;
- set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
- }
-}
-
-static void
-qdio_get_ssqd_siga(struct qdio_irq *irq_ptr)
-{
- int rc;
- struct qdio_chsc_ssqd *ssqd_area;
-
- QDIO_DBF_TEXT0(0,setup,"getssqd");
- irq_ptr->qdioac = 0;
- rc = qdio_get_ssqd_information(&irq_ptr->schid, &ssqd_area);
- if (rc) {
- QDIO_PRINT_WARN("using all SIGAs for sch x%x.n",
- irq_ptr->schid.sch_no);
- irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
- CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
- CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
- irq_ptr->is_qebsm = 0;
- } else
- irq_ptr->qdioac = ssqd_area->qdioac1;
-
- qdio_check_subchannel_qebsm(irq_ptr, ssqd_area->sch_token);
- if (rc != -ENOMEM)
- mempool_free(ssqd_area, qdio_mempool_scssc);
-}
-
-static unsigned int
-tiqdio_check_chsc_availability(void)
-{
- char dbf_text[15];
-
- /* Check for bit 41. */
- if (!css_general_characteristics.aif) {
- QDIO_PRINT_WARN("Adapter interruption facility not " \
- "installed.\n");
- return -ENOENT;
- }
-
- /* Check for bits 107 and 108. */
- if (!css_chsc_characteristics.scssc ||
- !css_chsc_characteristics.scsscf) {
- QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \
- "not available.\n");
- return -ENOENT;
- }
-
- /* Check for OSA/FCP thin interrupts (bit 67). */
- hydra_thinints = css_general_characteristics.aif_osa;
- sprintf(dbf_text,"hydrati%1x", hydra_thinints);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
-
-#ifdef CONFIG_64BIT
- /* Check for QEBSM support in general (bit 58). */
- is_passthrough = css_general_characteristics.qebsm;
-#endif
- sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
-
- /* Check for aif time delay disablement fac (bit 56). If installed,
- * omit svs even under lpar (good point by rick again) */
- omit_svs = css_general_characteristics.aif_tdd;
- sprintf(dbf_text,"omitsvs%1x", omit_svs);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- return 0;
-}
-
-
-static unsigned int
-tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
-{
- unsigned long real_addr_local_summary_bit;
- unsigned long real_addr_dev_st_chg_ind;
- void *ptr;
- char dbf_text[15];
-
- unsigned int resp_code;
- int result;
-
- struct {
- struct chsc_header request;
- u16 operation_code;
- u16 reserved1;
- u32 reserved2;
- u32 reserved3;
- u64 summary_indicator_addr;
- u64 subchannel_indicator_addr;
- u32 ks:4;
- u32 kc:4;
- u32 reserved4:21;
- u32 isc:3;
- u32 word_with_d_bit;
- /* set to 0x10000000 to enable
- * time delay disablement facility */
- u32 reserved5;
- struct subchannel_id schid;
- u32 reserved6[1004];
- struct chsc_header response;
- u32 reserved7;
- } *scssc_area;
-
- if (!irq_ptr->is_thinint_irq)
- return -ENODEV;
-
- if (reset_to_zero) {
- real_addr_local_summary_bit=0;
- real_addr_dev_st_chg_ind=0;
- } else {
- real_addr_local_summary_bit=
- virt_to_phys((volatile void *)tiqdio_ind);
- real_addr_dev_st_chg_ind=
- virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
- }
-
- scssc_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
- if (!scssc_area) {
- QDIO_PRINT_WARN("No memory for setting indicators on " \
- "subchannel 0.%x.%x.\n",
- irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
- return -ENOMEM;
- }
- scssc_area->request = (struct chsc_header) {
- .length = 0x0fe0,
- .code = 0x0021,
- };
- scssc_area->operation_code = 0;
-
- scssc_area->summary_indicator_addr = real_addr_local_summary_bit;
- scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind;
- scssc_area->ks = QDIO_STORAGE_KEY;
- scssc_area->kc = QDIO_STORAGE_KEY;
- scssc_area->isc = TIQDIO_THININT_ISC;
- scssc_area->schid = irq_ptr->schid;
- /* enables the time delay disablement facility. Don't care
- * whether it is really there (i.e. we haven't checked for
- * it) */
- if (css_general_characteristics.aif_tdd)
- scssc_area->word_with_d_bit = 0x10000000;
- else
- QDIO_PRINT_WARN("Time delay disablement facility " \
- "not available\n");
-
- result = chsc(scssc_area);
- if (result) {
- QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
- "cc=%i.\n",
- irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
- result = -EIO;
- goto out;
- }
-
- resp_code = scssc_area->response.code;
- if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
- QDIO_PRINT_WARN("response upon setting indicators " \
- "is 0x%x.\n",resp_code);
- sprintf(dbf_text,"sidR%4x",resp_code);
- QDIO_DBF_TEXT1(0,trace,dbf_text);
- QDIO_DBF_TEXT1(0,setup,dbf_text);
- ptr=&scssc_area->response;
- QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
- result = -EIO;
- goto out;
- }
-
- QDIO_DBF_TEXT2(0,setup,"setscind");
- QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
- sizeof(unsigned long));
- QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
- result = 0;
-out:
- mempool_free(scssc_area, qdio_mempool_scssc);
- return result;
-
-}
-
-static unsigned int
-tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
-{
- unsigned int resp_code;
- int result;
- void *ptr;
- char dbf_text[15];
-
- struct {
- struct chsc_header request;
- u16 operation_code;
- u16 reserved1;
- u32 reserved2;
- u32 reserved3;
- u32 reserved4[2];
- u32 delay_target;
- u32 reserved5[1009];
- struct chsc_header response;
- u32 reserved6;
- } *scsscf_area;
-
- if (!irq_ptr->is_thinint_irq)
- return -ENODEV;
-
- scsscf_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
- if (!scsscf_area) {
- QDIO_PRINT_WARN("No memory for setting delay target on " \
- "subchannel 0.%x.%x.\n",
- irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
- return -ENOMEM;
- }
- scsscf_area->request = (struct chsc_header) {
- .length = 0x0fe0,
- .code = 0x1027,
- };
-
- scsscf_area->delay_target = delay_target<<16;
-
- result=chsc(scsscf_area);
- if (result) {
- QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
- "cc=%i. Continuing.\n",
- irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
- result);
- result = -EIO;
- goto out;
- }
-
- resp_code = scsscf_area->response.code;
- if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
- QDIO_PRINT_WARN("response upon setting delay target " \
- "is 0x%x. Continuing.\n",resp_code);
- sprintf(dbf_text,"sdtR%4x",resp_code);
- QDIO_DBF_TEXT1(0,trace,dbf_text);
- QDIO_DBF_TEXT1(0,setup,dbf_text);
- ptr=&scsscf_area->response;
- QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN);
- }
- QDIO_DBF_TEXT2(0,trace,"delytrgt");
- QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
- result = 0; /* not critical */
-out:
- mempool_free(scsscf_area, qdio_mempool_scssc);
- return result;
-}
-
-int
-qdio_cleanup(struct ccw_device *cdev, int how)
-{
- struct qdio_irq *irq_ptr;
- char dbf_text[15];
- int rc;
-
- irq_ptr = cdev->private->qdio_data;
- if (!irq_ptr)
- return -ENODEV;
-
- sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT1(0,trace,dbf_text);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
-
- rc = qdio_shutdown(cdev, how);
- if ((rc == 0) || (rc == -EINPROGRESS))
- rc = qdio_free(cdev);
- return rc;
-}
-
-int
-qdio_shutdown(struct ccw_device *cdev, int how)
-{
- struct qdio_irq *irq_ptr;
- int i;
- int result = 0;
- int rc;
- unsigned long flags;
- int timeout;
- char dbf_text[15];
-
- irq_ptr = cdev->private->qdio_data;
- if (!irq_ptr)
- return -ENODEV;
-
- down(&irq_ptr->setting_up_sema);
-
- sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT1(0,trace,dbf_text);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
-
- /* mark all qs as uninteresting */
- for (i=0;i<irq_ptr->no_input_qs;i++)
- atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1);
-
- for (i=0;i<irq_ptr->no_output_qs;i++)
- atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1);
-
- tasklet_kill(&tiqdio_tasklet);
-
- for (i=0;i<irq_ptr->no_input_qs;i++) {
- qdio_unmark_q(irq_ptr->input_qs[i]);
- tasklet_kill(&irq_ptr->input_qs[i]->tasklet);
- wait_event_interruptible_timeout(cdev->private->wait_q,
- !atomic_read(&irq_ptr->
- input_qs[i]->
- use_count),
- QDIO_NO_USE_COUNT_TIMEOUT);
- if (atomic_read(&irq_ptr->input_qs[i]->use_count))
- result=-EINPROGRESS;
- }
-
- for (i=0;i<irq_ptr->no_output_qs;i++) {
- tasklet_kill(&irq_ptr->output_qs[i]->tasklet);
- del_timer(&irq_ptr->output_qs[i]->timer);
- wait_event_interruptible_timeout(cdev->private->wait_q,
- !atomic_read(&irq_ptr->
- output_qs[i]->
- use_count),
- QDIO_NO_USE_COUNT_TIMEOUT);
- if (atomic_read(&irq_ptr->output_qs[i]->use_count))
- result=-EINPROGRESS;
- }
-
- /* cleanup subchannel */
- spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
- if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) {
- rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
- timeout=QDIO_CLEANUP_CLEAR_TIMEOUT;
- } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) {
- rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
- timeout=QDIO_CLEANUP_HALT_TIMEOUT;
- } else { /* default behaviour */
- rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
- timeout=QDIO_CLEANUP_HALT_TIMEOUT;
- }
- if (rc == -ENODEV) {
- /* No need to wait for device no longer present. */
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
- } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) {
- /*
- * Whoever put another handler there, has to cope with the
- * interrupt theirself. Might happen if qdio_shutdown was
- * called on already shutdown queues, but this shouldn't have
- * bad side effects.
- */
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
- } else if (rc == 0) {
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
- spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
-
- wait_event_interruptible_timeout(cdev->private->wait_q,
- irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
- irq_ptr->state == QDIO_IRQ_STATE_ERR,
- timeout);
- } else {
- QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
- "device %s\n", result, cdev->dev.bus_id);
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
- result = rc;
- goto out;
- }
- if (irq_ptr->is_thinint_irq) {
- qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
- tiqdio_set_subchannel_ind(irq_ptr,1);
- /* reset adapter interrupt indicators */
- }
-
- /* exchange int handlers, if necessary */
- if ((void*)cdev->handler == (void*)qdio_handler)
- cdev->handler=irq_ptr->original_int_handler;
-
- /* Ignore errors. */
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
-out:
- up(&irq_ptr->setting_up_sema);
- return result;
-}
-
-int
-qdio_free(struct ccw_device *cdev)
-{
- struct qdio_irq *irq_ptr;
- char dbf_text[15];
-
- irq_ptr = cdev->private->qdio_data;
- if (!irq_ptr)
- return -ENODEV;
-
- down(&irq_ptr->setting_up_sema);
-
- sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT1(0,trace,dbf_text);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
-
- cdev->private->qdio_data = NULL;
-
- up(&irq_ptr->setting_up_sema);
-
- qdio_release_irq_memory(irq_ptr);
- module_put(THIS_MODULE);
- return 0;
-}
-
-static void
-qdio_allocate_do_dbf(struct qdio_initialize *init_data)
-{
- char dbf_text[20]; /* if a printf printed out more than 8 chars */
-
- sprintf(dbf_text,"qfmt:%x",init_data->q_format);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8);
- sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*));
- QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*));
- QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*));
- sprintf(dbf_text,"miit%4x",init_data->min_input_threshold);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- sprintf(dbf_text,"mait%4x",init_data->max_input_threshold);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- sprintf(dbf_text,"miot%4x",init_data->min_output_threshold);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- sprintf(dbf_text,"maot%4x",init_data->max_output_threshold);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- sprintf(dbf_text,"niq:%4x",init_data->no_input_qs);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- sprintf(dbf_text,"noq:%4x",init_data->no_output_qs);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*));
- QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*));
- QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long));
- QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long));
- QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*));
- QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
-}
-
-static void
-qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
-{
- irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
- irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
-
- irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib);
-
- irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl);
-
- irq_ptr->qdr->qdf0[i].slsba=
- (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]);
-
- irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY;
- irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY;
- irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY;
- irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
-}
-
-static void
-qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
- int j, int iqfmt)
-{
- irq_ptr->output_qs[i]->is_iqdio_q = iqfmt;
- irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
-
- irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib);
-
- irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl);
-
- irq_ptr->qdr->qdf0[i+j].slsba=
- (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]);
-
- irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY;
- irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY;
- irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY;
- irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY;
-}
-
-
-static void
-qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
-{
- int i;
-
- for (i=0;i<irq_ptr->no_input_qs;i++) {
- irq_ptr->input_qs[i]->siga_sync=
- irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
- irq_ptr->input_qs[i]->siga_in=
- irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
- irq_ptr->input_qs[i]->siga_out=
- irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
- irq_ptr->input_qs[i]->siga_sync_done_on_thinints=
- irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
- irq_ptr->input_qs[i]->hydra_gives_outbound_pcis=
- irq_ptr->hydra_gives_outbound_pcis;
- irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis=
- ((irq_ptr->qdioac&
- (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
- CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
- (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
- CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
-
- }
-}
-
-static void
-qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
-{
- int i;
-
- for (i=0;i<irq_ptr->no_output_qs;i++) {
- irq_ptr->output_qs[i]->siga_sync=
- irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
- irq_ptr->output_qs[i]->siga_in=
- irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
- irq_ptr->output_qs[i]->siga_out=
- irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
- irq_ptr->output_qs[i]->siga_sync_done_on_thinints=
- irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
- irq_ptr->output_qs[i]->hydra_gives_outbound_pcis=
- irq_ptr->hydra_gives_outbound_pcis;
- irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis=
- ((irq_ptr->qdioac&
- (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
- CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
- (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
- CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
-
- }
-}
-
-static int
-qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
- int dstat)
-{
- char dbf_text[15];
- struct qdio_irq *irq_ptr;
-
- irq_ptr = cdev->private->qdio_data;
-
- if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
- sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT2(1,trace,dbf_text);
- QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
- QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
- QDIO_PRINT_ERR("received check condition on establish " \
- "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
- irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
- cstat,dstat);
- qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
- }
-
- if (!(dstat & DEV_STAT_DEV_END)) {
- QDIO_DBF_TEXT2(1,setup,"eq:no de");
- QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
- QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
- QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
- "device end: dstat=%02x, cstat=%02x\n",
- irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
- dstat, cstat);
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
- return 1;
- }
-
- if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) {
- QDIO_DBF_TEXT2(1,setup,"eq:badio");
- QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
- QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
- QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
- "the following devstat: dstat=%02x, "
- "cstat=%02x\n", irq_ptr->schid.ssid,
- irq_ptr->schid.sch_no, dstat, cstat);
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
- return 1;
- }
- return 0;
-}
-
-static void
-qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
-{
- struct qdio_irq *irq_ptr;
- char dbf_text[15];
-
- irq_ptr = cdev->private->qdio_data;
-
- sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- QDIO_DBF_TEXT0(0,trace,dbf_text);
-
- if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat))
- return;
-
- qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
-}
-
-int
-qdio_initialize(struct qdio_initialize *init_data)
-{
- int rc;
- char dbf_text[15];
-
- sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- QDIO_DBF_TEXT0(0,trace,dbf_text);
-
- rc = qdio_allocate(init_data);
- if (rc == 0) {
- rc = qdio_establish(init_data);
- if (rc != 0)
- qdio_free(init_data->cdev);
- }
-
- return rc;
-}
-
-
-int
-qdio_allocate(struct qdio_initialize *init_data)
-{
- struct qdio_irq *irq_ptr;
- char dbf_text[15];
-
- sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- QDIO_DBF_TEXT0(0,trace,dbf_text);
- if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
- (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
- ((init_data->no_input_qs) && (!init_data->input_handler)) ||
- ((init_data->no_output_qs) && (!init_data->output_handler)) )
- return -EINVAL;
-
- if (!init_data->input_sbal_addr_array)
- return -EINVAL;
-
- if (!init_data->output_sbal_addr_array)
- return -EINVAL;
-
- qdio_allocate_do_dbf(init_data);
-
- /* create irq */
- irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
-
- QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
- QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
-
- if (!irq_ptr) {
- QDIO_PRINT_ERR("allocation of irq_ptr failed!\n");
- return -ENOMEM;
- }
-
- init_MUTEX(&irq_ptr->setting_up_sema);
-
- /* QDR must be in DMA area since CCW data address is only 32 bit */
- irq_ptr->qdr = (struct qdr *) __get_free_page(GFP_KERNEL | GFP_DMA);
- if (!(irq_ptr->qdr)) {
- free_page((unsigned long) irq_ptr);
- QDIO_PRINT_ERR("allocation of irq_ptr->qdr failed!\n");
- return -ENOMEM;
- }
- QDIO_DBF_TEXT0(0,setup,"qdr:");
- QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*));
-
- if (qdio_alloc_qs(irq_ptr,
- init_data->no_input_qs,
- init_data->no_output_qs)) {
- QDIO_PRINT_ERR("queue allocation failed!\n");
- qdio_release_irq_memory(irq_ptr);
- return -ENOMEM;
- }
-
- init_data->cdev->private->qdio_data = irq_ptr;
-
- qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
-
- return 0;
-}
-
-static int qdio_fill_irq(struct qdio_initialize *init_data)
-{
- int i;
- char dbf_text[15];
- struct ciw *ciw;
- int is_iqdio;
- struct qdio_irq *irq_ptr;
-
- irq_ptr = init_data->cdev->private->qdio_data;
-
- memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr));
-
- /* wipes qib.ac, required by ar7063 */
- memset(irq_ptr->qdr,0,sizeof(struct qdr));
-
- irq_ptr->int_parm=init_data->int_parm;
-
- irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
- irq_ptr->no_input_qs=init_data->no_input_qs;
- irq_ptr->no_output_qs=init_data->no_output_qs;
-
- if (init_data->q_format==QDIO_IQDIO_QFMT) {
- irq_ptr->is_iqdio_irq=1;
- irq_ptr->is_thinint_irq=1;
- } else {
- irq_ptr->is_iqdio_irq=0;
- irq_ptr->is_thinint_irq=hydra_thinints;
- }
- sprintf(dbf_text,"is_i_t%1x%1x",
- irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq);
- QDIO_DBF_TEXT2(0,setup,dbf_text);
-
- if (irq_ptr->is_thinint_irq) {
- irq_ptr->dev_st_chg_ind = qdio_get_indicator();
- QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
- if (!irq_ptr->dev_st_chg_ind) {
- QDIO_PRINT_WARN("no indicator location available " \
- "for irq 0.%x.%x\n",
- irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
- qdio_release_irq_memory(irq_ptr);
- return -ENOBUFS;
- }
- }
-
- /* defaults */
- irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD;
- irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT;
- irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD;
- irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT;
-
- qdio_fill_qs(irq_ptr, init_data->cdev,
- init_data->no_input_qs,
- init_data->no_output_qs,
- init_data->input_handler,
- init_data->output_handler,init_data->int_parm,
- init_data->q_format,init_data->flags,
- init_data->input_sbal_addr_array,
- init_data->output_sbal_addr_array);
-
- if (!try_module_get(THIS_MODULE)) {
- QDIO_PRINT_CRIT("try_module_get() failed!\n");
- qdio_release_irq_memory(irq_ptr);
- return -EINVAL;
- }
-
- qdio_fill_thresholds(irq_ptr,init_data->no_input_qs,
- init_data->no_output_qs,
- init_data->min_input_threshold,
- init_data->max_input_threshold,
- init_data->min_output_threshold,
- init_data->max_output_threshold);
-
- /* fill in qdr */
- irq_ptr->qdr->qfmt=init_data->q_format;
- irq_ptr->qdr->iqdcnt=init_data->no_input_qs;
- irq_ptr->qdr->oqdcnt=init_data->no_output_qs;
- irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */
- irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4;
-
- irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib;
- irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
-
- /* fill in qib */
- irq_ptr->is_qebsm = is_passthrough;
- if (irq_ptr->is_qebsm)
- irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
-
- irq_ptr->qib.qfmt=init_data->q_format;
- if (init_data->no_input_qs)
- irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
- if (init_data->no_output_qs)
- irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib);
- memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8);
-
- qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format,
- init_data->qib_param_field,
- init_data->no_input_qs,
- init_data->no_output_qs,
- init_data->input_slib_elements,
- init_data->output_slib_elements);
-
- /* first input descriptors, then output descriptors */
- is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0;
- for (i=0;i<init_data->no_input_qs;i++)
- qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio);
-
- for (i=0;i<init_data->no_output_qs;i++)
- qdio_allocate_fill_output_desc(irq_ptr, i,
- init_data->no_input_qs,
- is_iqdio);
-
- /* qdr, qib, sls, slsbs, slibs, sbales filled. */
-
- /* get qdio commands */
- ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
- if (!ciw) {
- QDIO_DBF_TEXT2(1,setup,"no eq");
- QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. "
- "Trying to use default.\n");
- } else
- irq_ptr->equeue = *ciw;
- ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
- if (!ciw) {
- QDIO_DBF_TEXT2(1,setup,"no aq");
- QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. "
- "Trying to use default.\n");
- } else
- irq_ptr->aqueue = *ciw;
-
- /* Set new interrupt handler. */
- irq_ptr->original_int_handler = init_data->cdev->handler;
- init_data->cdev->handler = qdio_handler;
-
- return 0;
-}
-
-int
-qdio_establish(struct qdio_initialize *init_data)
-{
- struct qdio_irq *irq_ptr;
- unsigned long saveflags;
- int result, result2;
- struct ccw_device *cdev;
- char dbf_text[20];
-
- cdev=init_data->cdev;
- irq_ptr = cdev->private->qdio_data;
- if (!irq_ptr)
- return -EINVAL;
-
- if (cdev->private->state != DEV_STATE_ONLINE)
- return -EINVAL;
-
- down(&irq_ptr->setting_up_sema);
-
- qdio_fill_irq(init_data);
-
- /* the thinint CHSC stuff */
- if (irq_ptr->is_thinint_irq) {
-
- result = tiqdio_set_subchannel_ind(irq_ptr,0);
- if (result) {
- up(&irq_ptr->setting_up_sema);
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
- return result;
- }
- tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
- }
-
- sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- QDIO_DBF_TEXT0(0,trace,dbf_text);
-
- /* establish q */
- irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd;
- irq_ptr->ccw.flags=CCW_FLAG_SLI;
- irq_ptr->ccw.count=irq_ptr->equeue.count;
- irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr);
-
- spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
-
- ccw_device_set_options_mask(cdev, 0);
- result = ccw_device_start(cdev, &irq_ptr->ccw,
- QDIO_DOING_ESTABLISH, 0, 0);
- if (result) {
- result2 = ccw_device_start(cdev, &irq_ptr->ccw,
- QDIO_DOING_ESTABLISH, 0, 0);
- sprintf(dbf_text,"eq:io%4x",result);
- QDIO_DBF_TEXT2(1,setup,dbf_text);
- if (result2) {
- sprintf(dbf_text,"eq:io%4x",result);
- QDIO_DBF_TEXT2(1,setup,dbf_text);
- }
- QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
- "returned %i, next try returned %i\n",
- irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
- result, result2);
- result=result2;
- }
-
- spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
-
- if (result) {
- up(&irq_ptr->setting_up_sema);
- qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
- return result;
- }
-
- wait_event_interruptible_timeout(cdev->private->wait_q,
- irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
- irq_ptr->state == QDIO_IRQ_STATE_ERR,
- QDIO_ESTABLISH_TIMEOUT);
-
- if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)
- result = 0;
- else {
- up(&irq_ptr->setting_up_sema);
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
- return -EIO;
- }
-
- qdio_get_ssqd_siga(irq_ptr);
- /* if this gets set once, we're running under VM and can omit SVSes */
- if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
- omit_svs=1;
-
- sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac);
- QDIO_DBF_TEXT2(0,setup,dbf_text);
-
- sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac);
- QDIO_DBF_TEXT2(0,setup,dbf_text);
-
- irq_ptr->hydra_gives_outbound_pcis=
- irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED;
- irq_ptr->sync_done_on_outb_pcis=
- irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS;
-
- qdio_initialize_set_siga_flags_input(irq_ptr);
- qdio_initialize_set_siga_flags_output(irq_ptr);
-
- up(&irq_ptr->setting_up_sema);
-
- return result;
-
-}
-
-int
-qdio_activate(struct ccw_device *cdev, int flags)
-{
- struct qdio_irq *irq_ptr;
- int i,result=0,result2;
- unsigned long saveflags;
- char dbf_text[20]; /* see qdio_initialize */
-
- irq_ptr = cdev->private->qdio_data;
- if (!irq_ptr)
- return -ENODEV;
-
- if (cdev->private->state != DEV_STATE_ONLINE)
- return -EINVAL;
-
- down(&irq_ptr->setting_up_sema);
- if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) {
- result=-EBUSY;
- goto out;
- }
-
- sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT2(0,setup,dbf_text);
- QDIO_DBF_TEXT2(0,trace,dbf_text);
-
- /* activate q */
- irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd;
- irq_ptr->ccw.flags=CCW_FLAG_SLI;
- irq_ptr->ccw.count=irq_ptr->aqueue.count;
- irq_ptr->ccw.cda=QDIO_GET_ADDR(0);
-
- spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
-
- ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
- result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
- 0, DOIO_DENY_PREFETCH);
- if (result) {
- result2=ccw_device_start(cdev,&irq_ptr->ccw,
- QDIO_DOING_ACTIVATE,0,0);
- sprintf(dbf_text,"aq:io%4x",result);
- QDIO_DBF_TEXT2(1,setup,dbf_text);
- if (result2) {
- sprintf(dbf_text,"aq:io%4x",result);
- QDIO_DBF_TEXT2(1,setup,dbf_text);
- }
- QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
- "returned %i, next try returned %i\n",
- irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
- result, result2);
- result=result2;
- }
-
- spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
- if (result)
- goto out;
-
- for (i=0;i<irq_ptr->no_input_qs;i++) {
- if (irq_ptr->is_thinint_irq) {
- /*
- * that way we know, that, if we will get interrupted
- * by tiqdio_inbound_processing, qdio_unmark_q will
- * not be called
- */
- qdio_reserve_q(irq_ptr->input_qs[i]);
- qdio_mark_tiq(irq_ptr->input_qs[i]);
- qdio_release_q(irq_ptr->input_qs[i]);
- }
- }
-
- if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) {
- for (i=0;i<irq_ptr->no_input_qs;i++) {
- irq_ptr->input_qs[i]->is_input_q|=
- QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT;
- }
- }
-
- msleep(QDIO_ACTIVATE_TIMEOUT);
- switch (irq_ptr->state) {
- case QDIO_IRQ_STATE_STOPPED:
- case QDIO_IRQ_STATE_ERR:
- up(&irq_ptr->setting_up_sema);
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
- down(&irq_ptr->setting_up_sema);
- result = -EIO;
- break;
- default:
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
- result = 0;
- }
- out:
- up(&irq_ptr->setting_up_sema);
-
- return result;
-}
-
-/* buffers filled forwards again to make Rick happy */
-static void
-qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
- unsigned int count, struct qdio_buffer *buffers)
-{
- struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
- int tmp = 0;
-
- qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
- if (irq->is_qebsm) {
- while (count) {
- tmp = set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
- if (!tmp)
- return;
- }
- return;
- }
- for (;;) {
- set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
- count--;
- if (!count) break;
- qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
- }
-}
-
-static void
-qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
- unsigned int count, struct qdio_buffer *buffers)
-{
- struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
- int tmp = 0;
-
- qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
- if (irq->is_qebsm) {
- while (count) {
- tmp = set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
- if (!tmp)
- return;
- }
- return;
- }
-
- for (;;) {
- set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
- count--;
- if (!count) break;
- qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
- }
-}
-
-static void
-do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
- unsigned int qidx, unsigned int count,
- struct qdio_buffer *buffers)
-{
- int used_elements;
-
- /* This is the inbound handling of queues */
- used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
-
- qdio_do_qdio_fill_input(q,qidx,count,buffers);
-
- if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
- (callflags&QDIO_FLAG_UNDER_INTERRUPT))
- atomic_xchg(&q->polling,0);
-
- if (used_elements)
- return;
- if (callflags&QDIO_FLAG_DONT_SIGA)
- return;
- if (q->siga_in) {
- int result;
-
- result=qdio_siga_input(q);
- if (result) {
- if (q->siga_error)
- q->error_status_flags|=
- QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
- q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
- q->siga_error=result;
- }
- }
-
- qdio_mark_q(q);
-}
-
-static void
-do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
- unsigned int qidx, unsigned int count,
- struct qdio_buffer *buffers)
-{
- int used_elements;
- unsigned int cnt, start_buf;
- unsigned char state = 0;
- struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
-
- /* This is the outbound handling of queues */
- qdio_do_qdio_fill_output(q,qidx,count,buffers);
-
- used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
-
- if (callflags&QDIO_FLAG_DONT_SIGA) {
- qdio_perf_stat_inc(&perf_stats.outbound_cnt);
- return;
- }
- if (callflags & QDIO_FLAG_PCI_OUT)
- q->is_pci_out = 1;
- else
- q->is_pci_out = 0;
- if (q->is_iqdio_q) {
- /* one siga for every sbal */
- while (count--)
- qdio_kick_outbound_q(q);
-
- __qdio_outbound_processing(q);
- } else {
- /* under VM, we do a SIGA sync unconditionally */
- SYNC_MEMORY;
- else {
- /*
- * w/o shadow queues (else branch of
- * SYNC_MEMORY :-/ ), we try to
- * fast-requeue buffers
- */
- if (irq->is_qebsm) {
- cnt = 1;
- start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
- (QDIO_MAX_BUFFERS_PER_Q-1));
- qdio_do_eqbs(q, &state, &start_buf, &cnt);
- } else
- state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
- &(QDIO_MAX_BUFFERS_PER_Q-1) ];
- if (state != SLSB_CU_OUTPUT_PRIMED) {
- qdio_kick_outbound_q(q);
- } else {
- QDIO_DBF_TEXT3(0,trace, "fast-req");
- qdio_perf_stat_inc(&perf_stats.fast_reqs);
- }
- }
- /*
- * only marking the q could take too long,
- * the upper layer module could do a lot of
- * traffic in that time
- */
- __qdio_outbound_processing(q);
- }
-
- qdio_perf_stat_inc(&perf_stats.outbound_cnt);
-}
-
-/* count must be 1 in iqdio */
-int
-do_QDIO(struct ccw_device *cdev,unsigned int callflags,
- unsigned int queue_number, unsigned int qidx,
- unsigned int count,struct qdio_buffer *buffers)
-{
- struct qdio_irq *irq_ptr;
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[20];
-
- sprintf(dbf_text,"doQD%04x",cdev->private->schid.sch_no);
- QDIO_DBF_TEXT3(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
-
- if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
- (count>QDIO_MAX_BUFFERS_PER_Q) ||
- (queue_number>QDIO_MAX_QUEUES_PER_IRQ) )
- return -EINVAL;
-
- if (count==0)
- return 0;
-
- irq_ptr = cdev->private->qdio_data;
- if (!irq_ptr)
- return -ENODEV;
-
-#ifdef CONFIG_QDIO_DEBUG
- if (callflags&QDIO_FLAG_SYNC_INPUT)
- QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
- sizeof(void*));
- else
- QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number],
- sizeof(void*));
- sprintf(dbf_text,"flag%04x",callflags);
- QDIO_DBF_TEXT3(0,trace,dbf_text);
- sprintf(dbf_text,"qi%02xct%02x",qidx,count);
- QDIO_DBF_TEXT3(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
-
- if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
- return -EBUSY;
-
- if (callflags&QDIO_FLAG_SYNC_INPUT)
- do_qdio_handle_inbound(irq_ptr->input_qs[queue_number],
- callflags, qidx, count, buffers);
- else if (callflags&QDIO_FLAG_SYNC_OUTPUT)
- do_qdio_handle_outbound(irq_ptr->output_qs[queue_number],
- callflags, qidx, count, buffers);
- else {
- QDIO_DBF_TEXT3(1,trace,"doQD:inv");
- return -EINVAL;
- }
- return 0;
-}
-
-static int
-qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
- int buffer_length, int *eof, void *data)
-{
- int c=0;
-
- /* we are always called with buffer_length=4k, so we all
- deliver on the first read */
- if (offset>0)
- return 0;
-
-#define _OUTP_IT(x...) c+=sprintf(buffer+c,x)
-#ifdef CONFIG_64BIT
- _OUTP_IT("Number of tasklet runs (total) : %li\n",
- (long)atomic64_read(&perf_stats.tl_runs));
- _OUTP_IT("Inbound tasklet runs tried/retried : %li/%li\n",
- (long)atomic64_read(&perf_stats.inbound_tl_runs),
- (long)atomic64_read(&perf_stats.inbound_tl_runs_resched));
- _OUTP_IT("Inbound-thin tasklet runs tried/retried : %li/%li\n",
- (long)atomic64_read(&perf_stats.inbound_thin_tl_runs),
- (long)atomic64_read(&perf_stats.inbound_thin_tl_runs_resched));
- _OUTP_IT("Outbound tasklet runs tried/retried : %li/%li\n",
- (long)atomic64_read(&perf_stats.outbound_tl_runs),
- (long)atomic64_read(&perf_stats.outbound_tl_runs_resched));
- _OUTP_IT("\n");
- _OUTP_IT("Number of SIGA sync's issued : %li\n",
- (long)atomic64_read(&perf_stats.siga_syncs));
- _OUTP_IT("Number of SIGA in's issued : %li\n",
- (long)atomic64_read(&perf_stats.siga_ins));
- _OUTP_IT("Number of SIGA out's issued : %li\n",
- (long)atomic64_read(&perf_stats.siga_outs));
- _OUTP_IT("Number of PCIs caught : %li\n",
- (long)atomic64_read(&perf_stats.pcis));
- _OUTP_IT("Number of adapter interrupts caught : %li\n",
- (long)atomic64_read(&perf_stats.thinints));
- _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %li\n",
- (long)atomic64_read(&perf_stats.fast_reqs));
- _OUTP_IT("\n");
- _OUTP_IT("Number of inbound transfers : %li\n",
- (long)atomic64_read(&perf_stats.inbound_cnt));
- _OUTP_IT("Number of do_QDIOs outbound : %li\n",
- (long)atomic64_read(&perf_stats.outbound_cnt));
-#else /* CONFIG_64BIT */
- _OUTP_IT("Number of tasklet runs (total) : %i\n",
- atomic_read(&perf_stats.tl_runs));
- _OUTP_IT("Inbound tasklet runs tried/retried : %i/%i\n",
- atomic_read(&perf_stats.inbound_tl_runs),
- atomic_read(&perf_stats.inbound_tl_runs_resched));
- _OUTP_IT("Inbound-thin tasklet runs tried/retried : %i/%i\n",
- atomic_read(&perf_stats.inbound_thin_tl_runs),
- atomic_read(&perf_stats.inbound_thin_tl_runs_resched));
- _OUTP_IT("Outbound tasklet runs tried/retried : %i/%i\n",
- atomic_read(&perf_stats.outbound_tl_runs),
- atomic_read(&perf_stats.outbound_tl_runs_resched));
- _OUTP_IT("\n");
- _OUTP_IT("Number of SIGA sync's issued : %i\n",
- atomic_read(&perf_stats.siga_syncs));
- _OUTP_IT("Number of SIGA in's issued : %i\n",
- atomic_read(&perf_stats.siga_ins));
- _OUTP_IT("Number of SIGA out's issued : %i\n",
- atomic_read(&perf_stats.siga_outs));
- _OUTP_IT("Number of PCIs caught : %i\n",
- atomic_read(&perf_stats.pcis));
- _OUTP_IT("Number of adapter interrupts caught : %i\n",
- atomic_read(&perf_stats.thinints));
- _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %i\n",
- atomic_read(&perf_stats.fast_reqs));
- _OUTP_IT("\n");
- _OUTP_IT("Number of inbound transfers : %i\n",
- atomic_read(&perf_stats.inbound_cnt));
- _OUTP_IT("Number of do_QDIOs outbound : %i\n",
- atomic_read(&perf_stats.outbound_cnt));
-#endif /* CONFIG_64BIT */
- _OUTP_IT("\n");
-
- return c;
-}
-
-static struct proc_dir_entry *qdio_perf_proc_file;
-
-static void
-qdio_add_procfs_entry(void)
-{
- proc_perf_file_registration=0;
- qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
- S_IFREG|0444,NULL);
- if (qdio_perf_proc_file) {
- qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
- } else proc_perf_file_registration=-1;
-
- if (proc_perf_file_registration)
- QDIO_PRINT_WARN("was not able to register perf. " \
- "proc-file (%i).\n",
- proc_perf_file_registration);
-}
-
-static void
-qdio_remove_procfs_entry(void)
-{
- if (!proc_perf_file_registration) /* means if it went ok earlier */
- remove_proc_entry(QDIO_PERF,NULL);
-}
-
-/**
- * attributes in sysfs
- *****************************************************************************/
-
-static ssize_t
-qdio_performance_stats_show(struct bus_type *bus, char *buf)
-{
- return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
-}
-
-static ssize_t
-qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
-{
- unsigned long i;
- int ret;
-
- ret = strict_strtoul(buf, 16, &i);
- if (!ret && ((i == 0) || (i == 1))) {
- if (i == qdio_performance_stats)
- return count;
- qdio_performance_stats = i;
- if (i==0) {
- /* reset perf. stat. info */
-#ifdef CONFIG_64BIT
- atomic64_set(&perf_stats.tl_runs, 0);
- atomic64_set(&perf_stats.outbound_tl_runs, 0);
- atomic64_set(&perf_stats.inbound_tl_runs, 0);
- atomic64_set(&perf_stats.inbound_tl_runs_resched, 0);
- atomic64_set(&perf_stats.inbound_thin_tl_runs, 0);
- atomic64_set(&perf_stats.inbound_thin_tl_runs_resched,
- 0);
- atomic64_set(&perf_stats.siga_outs, 0);
- atomic64_set(&perf_stats.siga_ins, 0);
- atomic64_set(&perf_stats.siga_syncs, 0);
- atomic64_set(&perf_stats.pcis, 0);
- atomic64_set(&perf_stats.thinints, 0);
- atomic64_set(&perf_stats.fast_reqs, 0);
- atomic64_set(&perf_stats.outbound_cnt, 0);
- atomic64_set(&perf_stats.inbound_cnt, 0);
-#else /* CONFIG_64BIT */
- atomic_set(&perf_stats.tl_runs, 0);
- atomic_set(&perf_stats.outbound_tl_runs, 0);
- atomic_set(&perf_stats.inbound_tl_runs, 0);
- atomic_set(&perf_stats.inbound_tl_runs_resched, 0);
- atomic_set(&perf_stats.inbound_thin_tl_runs, 0);
- atomic_set(&perf_stats.inbound_thin_tl_runs_resched, 0);
- atomic_set(&perf_stats.siga_outs, 0);
- atomic_set(&perf_stats.siga_ins, 0);
- atomic_set(&perf_stats.siga_syncs, 0);
- atomic_set(&perf_stats.pcis, 0);
- atomic_set(&perf_stats.thinints, 0);
- atomic_set(&perf_stats.fast_reqs, 0);
- atomic_set(&perf_stats.outbound_cnt, 0);
- atomic_set(&perf_stats.inbound_cnt, 0);
-#endif /* CONFIG_64BIT */
- }
- } else {
- QDIO_PRINT_ERR("QDIO performance_stats: write 0 or 1 to this file!\n");
- return -EINVAL;
- }
- return count;
-}
-
-static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show,
- qdio_performance_stats_store);
-
-static void
-tiqdio_register_thinints(void)
-{
- char dbf_text[20];
-
- tiqdio_ind =
- s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL,
- TIQDIO_THININT_ISC);
- if (IS_ERR(tiqdio_ind)) {
- sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind));
- QDIO_DBF_TEXT0(0,setup,dbf_text);
- QDIO_PRINT_ERR("failed to register adapter handler " \
- "(rc=%li).\nAdapter interrupts might " \
- "not work. Continuing.\n",
- PTR_ERR(tiqdio_ind));
- tiqdio_ind = NULL;
- }
-}
-
-static void
-tiqdio_unregister_thinints(void)
-{
- if (tiqdio_ind)
- s390_unregister_adapter_interrupt(tiqdio_ind,
- TIQDIO_THININT_ISC);
-}
-
-static int
-qdio_get_qdio_memory(void)
-{
- int i;
- indicator_used[0]=1;
-
- for (i=1;i<INDICATORS_PER_CACHELINE;i++)
- indicator_used[i]=0;
- indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
- GFP_KERNEL);
- if (!indicators)
- return -ENOMEM;
- return 0;
-}
-
-static void
-qdio_release_qdio_memory(void)
-{
- kfree(indicators);
-}
-
-static void
-qdio_unregister_dbf_views(void)
-{
- if (qdio_dbf_setup)
- debug_unregister(qdio_dbf_setup);
- if (qdio_dbf_sbal)
- debug_unregister(qdio_dbf_sbal);
- if (qdio_dbf_sense)
- debug_unregister(qdio_dbf_sense);
- if (qdio_dbf_trace)
- debug_unregister(qdio_dbf_trace);
-#ifdef CONFIG_QDIO_DEBUG
- if (qdio_dbf_slsb_out)
- debug_unregister(qdio_dbf_slsb_out);
- if (qdio_dbf_slsb_in)
- debug_unregister(qdio_dbf_slsb_in);
-#endif /* CONFIG_QDIO_DEBUG */
-}
-
-static int
-qdio_register_dbf_views(void)
-{
- qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME,
- QDIO_DBF_SETUP_PAGES,
- QDIO_DBF_SETUP_NR_AREAS,
- QDIO_DBF_SETUP_LEN);
- if (!qdio_dbf_setup)
- goto oom;
- debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view);
- debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL);
-
- qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME,
- QDIO_DBF_SBAL_PAGES,
- QDIO_DBF_SBAL_NR_AREAS,
- QDIO_DBF_SBAL_LEN);
- if (!qdio_dbf_sbal)
- goto oom;
-
- debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view);
- debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL);
-
- qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME,
- QDIO_DBF_SENSE_PAGES,
- QDIO_DBF_SENSE_NR_AREAS,
- QDIO_DBF_SENSE_LEN);
- if (!qdio_dbf_sense)
- goto oom;
-
- debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view);
- debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL);
-
- qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME,
- QDIO_DBF_TRACE_PAGES,
- QDIO_DBF_TRACE_NR_AREAS,
- QDIO_DBF_TRACE_LEN);
- if (!qdio_dbf_trace)
- goto oom;
-
- debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
- debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
-
-#ifdef CONFIG_QDIO_DEBUG
- qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
- QDIO_DBF_SLSB_OUT_PAGES,
- QDIO_DBF_SLSB_OUT_NR_AREAS,
- QDIO_DBF_SLSB_OUT_LEN);
- if (!qdio_dbf_slsb_out)
- goto oom;
- debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view);
- debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL);
-
- qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME,
- QDIO_DBF_SLSB_IN_PAGES,
- QDIO_DBF_SLSB_IN_NR_AREAS,
- QDIO_DBF_SLSB_IN_LEN);
- if (!qdio_dbf_slsb_in)
- goto oom;
- debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
- debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
-#endif /* CONFIG_QDIO_DEBUG */
- return 0;
-oom:
- QDIO_PRINT_ERR("not enough memory for dbf.\n");
- qdio_unregister_dbf_views();
- return -ENOMEM;
-}
-
-static void *qdio_mempool_alloc(gfp_t gfp_mask, void *size)
-{
- return (void *) get_zeroed_page(gfp_mask|GFP_DMA);
-}
-
-static void qdio_mempool_free(void *element, void *size)
-{
- free_page((unsigned long) element);
-}
-
-static int __init
-init_QDIO(void)
-{
- int res;
- void *ptr;
-
- printk("qdio: loading %s\n",version);
-
- res=qdio_get_qdio_memory();
- if (res)
- return res;
-
- qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
- 256, 0, NULL);
- if (!qdio_q_cache) {
- qdio_release_qdio_memory();
- return -ENOMEM;
- }
-
- res = qdio_register_dbf_views();
- if (res) {
- kmem_cache_destroy(qdio_q_cache);
- qdio_release_qdio_memory();
- return res;
- }
-
- QDIO_DBF_TEXT0(0,setup,"initQDIO");
- res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
-
- memset((void*)&perf_stats,0,sizeof(perf_stats));
- QDIO_DBF_TEXT0(0,setup,"perfstat");
- ptr=&perf_stats;
- QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
-
- qdio_add_procfs_entry();
-
- qdio_mempool_scssc = mempool_create(QDIO_MEMPOOL_SCSSC_ELEMENTS,
- qdio_mempool_alloc,
- qdio_mempool_free, NULL);
-
- isc_register(QDIO_AIRQ_ISC);
- if (tiqdio_check_chsc_availability())
- QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
-
- tiqdio_register_thinints();
-
- return 0;
- }
-
-static void __exit
-cleanup_QDIO(void)
-{
- tiqdio_unregister_thinints();
- isc_unregister(QDIO_AIRQ_ISC);
- qdio_remove_procfs_entry();
- qdio_release_qdio_memory();
- qdio_unregister_dbf_views();
- mempool_destroy(qdio_mempool_scssc);
- kmem_cache_destroy(qdio_q_cache);
- bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
- printk("qdio: %s: module removed\n",version);
-}
-
-module_init(init_QDIO);
-module_exit(cleanup_QDIO);
-
-EXPORT_SYMBOL(qdio_allocate);
-EXPORT_SYMBOL(qdio_establish);
-EXPORT_SYMBOL(qdio_initialize);
-EXPORT_SYMBOL(qdio_activate);
-EXPORT_SYMBOL(do_QDIO);
-EXPORT_SYMBOL(qdio_shutdown);
-EXPORT_SYMBOL(qdio_free);
-EXPORT_SYMBOL(qdio_cleanup);
-EXPORT_SYMBOL(qdio_synchronize);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 7656081a24d2..c1a70985abfa 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,66 +1,20 @@
+/*
+ * linux/drivers/s390/cio/qdio.h
+ *
+ * Copyright 2000,2008 IBM Corp.
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
#ifndef _CIO_QDIO_H
#define _CIO_QDIO_H
#include <asm/page.h>
-#include <asm/isc.h>
#include <asm/schid.h>
+#include "chsc.h"
-#ifdef CONFIG_QDIO_DEBUG
-#define QDIO_VERBOSE_LEVEL 9
-#else /* CONFIG_QDIO_DEBUG */
-#define QDIO_VERBOSE_LEVEL 5
-#endif /* CONFIG_QDIO_DEBUG */
-#define QDIO_USE_PROCESSING_STATE
-
-#define QDIO_MINIMAL_BH_RELIEF_TIME 16
-#define QDIO_TIMER_POLL_VALUE 1
-#define IQDIO_TIMER_POLL_VALUE 1
-
-/*
- * unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as
- * we never know, whether we'll get initiative again, e.g. to give the
- * transmit skb's back to the stack, however the stack may be waiting for
- * them... therefore we define 4 as threshold to start polling (which
- * will stop as soon as the asynchronous queue catches up)
- * btw, this only applies to the asynchronous HiperSockets queue
- */
-#define IQDIO_FILL_LEVEL_TO_POLL 4
-
-#define TIQDIO_THININT_ISC QDIO_AIRQ_ISC
-#define TIQDIO_DELAY_TARGET 0
-#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
-#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
-#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */
-#define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */
-#define IQDIO_LOCAL_LAPS 4
-#define IQDIO_LOCAL_LAPS_INT 1
-#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2
-/*#define IQDIO_IQDC_INT_PARM 0x1234*/
-
-#define QDIO_Q_LAPS 5
-
-#define QDIO_STORAGE_KEY PAGE_DEFAULT_KEY
-
-#define L2_CACHELINE_SIZE 256
-#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32))
-
-#define QDIO_PERF "qdio_perf"
-
-/* must be a power of 2 */
-/*#define QDIO_STATS_NUMBER 4
-
-#define QDIO_STATS_CLASSES 2
-#define QDIO_STATS_COUNT_NEEDED 2*/
-
-#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before
- exiting without having use_count
- of the queue to 0 */
-
-#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
-#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
-#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
-#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ)
-#define QDIO_ACTIVATE_TIMEOUT (5) /* 5 ms */
+#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */
+#define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */
+#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
enum qdio_irq_states {
QDIO_IRQ_STATE_INACTIVE,
@@ -72,565 +26,352 @@ enum qdio_irq_states {
NR_QDIO_IRQ_STATES,
};
-/* used as intparm in do_IO: */
-#define QDIO_DOING_SENSEID 0
-#define QDIO_DOING_ESTABLISH 1
-#define QDIO_DOING_ACTIVATE 2
-#define QDIO_DOING_CLEANUP 3
-
-/************************* DEBUG FACILITY STUFF *********************/
-
-#define QDIO_DBF_HEX(ex,name,level,addr,len) \
- do { \
- if (ex) \
- debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \
- else \
- debug_event(qdio_dbf_##name,level,(void*)(addr),len); \
- } while (0)
-#define QDIO_DBF_TEXT(ex,name,level,text) \
- do { \
- if (ex) \
- debug_text_exception(qdio_dbf_##name,level,text); \
- else \
- debug_text_event(qdio_dbf_##name,level,text); \
- } while (0)
-
-
-#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len)
-#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len)
-#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len)
-#ifdef CONFIG_QDIO_DEBUG
-#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len)
-#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len)
-#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len)
-#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len)
-#else /* CONFIG_QDIO_DEBUG */
-#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0)
-#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0)
-#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0)
-#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0)
-#endif /* CONFIG_QDIO_DEBUG */
-
-#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text)
-#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text)
-#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text)
-#ifdef CONFIG_QDIO_DEBUG
-#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
-#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
-#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
-#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
-#else /* CONFIG_QDIO_DEBUG */
-#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
-#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
-#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
-#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
-#endif /* CONFIG_QDIO_DEBUG */
-
-#define QDIO_DBF_SETUP_NAME "qdio_setup"
-#define QDIO_DBF_SETUP_LEN 8
-#define QDIO_DBF_SETUP_PAGES 4
-#define QDIO_DBF_SETUP_NR_AREAS 1
-#ifdef CONFIG_QDIO_DEBUG
-#define QDIO_DBF_SETUP_LEVEL 6
-#else /* CONFIG_QDIO_DEBUG */
-#define QDIO_DBF_SETUP_LEVEL 2
-#endif /* CONFIG_QDIO_DEBUG */
-
-#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
-#define QDIO_DBF_SBAL_LEN 256
-#define QDIO_DBF_SBAL_PAGES 4
-#define QDIO_DBF_SBAL_NR_AREAS 2
-#ifdef CONFIG_QDIO_DEBUG
-#define QDIO_DBF_SBAL_LEVEL 6
-#else /* CONFIG_QDIO_DEBUG */
-#define QDIO_DBF_SBAL_LEVEL 2
-#endif /* CONFIG_QDIO_DEBUG */
-
-#define QDIO_DBF_TRACE_NAME "qdio_trace"
-#define QDIO_DBF_TRACE_LEN 8
-#define QDIO_DBF_TRACE_NR_AREAS 2
-#ifdef CONFIG_QDIO_DEBUG
-#define QDIO_DBF_TRACE_PAGES 16
-#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
-#else /* CONFIG_QDIO_DEBUG */
-#define QDIO_DBF_TRACE_PAGES 4
-#define QDIO_DBF_TRACE_LEVEL 2
-#endif /* CONFIG_QDIO_DEBUG */
-
-#define QDIO_DBF_SENSE_NAME "qdio_sense"
-#define QDIO_DBF_SENSE_LEN 64
-#define QDIO_DBF_SENSE_PAGES 2
-#define QDIO_DBF_SENSE_NR_AREAS 1
-#ifdef CONFIG_QDIO_DEBUG
-#define QDIO_DBF_SENSE_LEVEL 6
-#else /* CONFIG_QDIO_DEBUG */
-#define QDIO_DBF_SENSE_LEVEL 2
-#endif /* CONFIG_QDIO_DEBUG */
-
-#ifdef CONFIG_QDIO_DEBUG
-#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
-
-#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
-#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q
-#define QDIO_DBF_SLSB_OUT_PAGES 256
-#define QDIO_DBF_SLSB_OUT_NR_AREAS 1
-#define QDIO_DBF_SLSB_OUT_LEVEL 6
-
-#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in"
-#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q
-#define QDIO_DBF_SLSB_IN_PAGES 256
-#define QDIO_DBF_SLSB_IN_NR_AREAS 1
-#define QDIO_DBF_SLSB_IN_LEVEL 6
-#endif /* CONFIG_QDIO_DEBUG */
-
-#define QDIO_PRINTK_HEADER QDIO_NAME ": "
-
-#if QDIO_VERBOSE_LEVEL>8
-#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
-#else
-#define QDIO_PRINT_STUPID(x...) do { } while (0)
-#endif
+/* used as intparm in do_IO */
+#define QDIO_DOING_ESTABLISH 1
+#define QDIO_DOING_ACTIVATE 2
+#define QDIO_DOING_CLEANUP 3
+
+#define SLSB_STATE_NOT_INIT 0x0
+#define SLSB_STATE_EMPTY 0x1
+#define SLSB_STATE_PRIMED 0x2
+#define SLSB_STATE_HALTED 0xe
+#define SLSB_STATE_ERROR 0xf
+#define SLSB_TYPE_INPUT 0x0
+#define SLSB_TYPE_OUTPUT 0x20
+#define SLSB_OWNER_PROG 0x80
+#define SLSB_OWNER_CU 0x40
+
+#define SLSB_P_INPUT_NOT_INIT \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */
+#define SLSB_P_INPUT_ACK \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */
+#define SLSB_CU_INPUT_EMPTY \
+ (SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */
+#define SLSB_P_INPUT_PRIMED \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */
+#define SLSB_P_INPUT_HALTED \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */
+#define SLSB_P_INPUT_ERROR \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */
+#define SLSB_P_OUTPUT_NOT_INIT \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
+#define SLSB_P_OUTPUT_EMPTY \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
+#define SLSB_CU_OUTPUT_PRIMED \
+ (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
+#define SLSB_P_OUTPUT_HALTED \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */
+#define SLSB_P_OUTPUT_ERROR \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */
+
+#define SLSB_ERROR_DURING_LOOKUP 0xff
+
+/* additional CIWs returned by extended Sense-ID */
+#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
+#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
-#if QDIO_VERBOSE_LEVEL>7
-#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x)
-#else
-#define QDIO_PRINT_ALL(x...) do { } while (0)
-#endif
-
-#if QDIO_VERBOSE_LEVEL>6
-#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x)
-#else
-#define QDIO_PRINT_INFO(x...) do { } while (0)
-#endif
-
-#if QDIO_VERBOSE_LEVEL>5
-#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
-#else
-#define QDIO_PRINT_WARN(x...) do { } while (0)
-#endif
-
-#if QDIO_VERBOSE_LEVEL>4
-#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
-#else
-#define QDIO_PRINT_ERR(x...) do { } while (0)
-#endif
-
-#if QDIO_VERBOSE_LEVEL>3
-#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
-#else
-#define QDIO_PRINT_CRIT(x...) do { } while (0)
-#endif
-
-#if QDIO_VERBOSE_LEVEL>2
-#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
-#else
-#define QDIO_PRINT_ALERT(x...) do { } while (0)
-#endif
+/* flags for st qdio sch data */
+#define CHSC_FLAG_QDIO_CAPABILITY 0x80
+#define CHSC_FLAG_VALIDITY 0x40
+
+/* qdio adapter-characteristics-1 flag */
+#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
+#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
+#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
+#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
+#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
+#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
+#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
-#if QDIO_VERBOSE_LEVEL>1
-#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x)
-#else
-#define QDIO_PRINT_EMERG(x...) do { } while (0)
-#endif
-
-#define QDIO_HEXDUMP16(importance,header,ptr) \
-QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \
- "%02x %02x %02x %02x %02x %02x %02x %02x " \
- "%02x %02x %02x %02x\n",*(((char*)ptr)), \
- *(((char*)ptr)+1),*(((char*)ptr)+2), \
- *(((char*)ptr)+3),*(((char*)ptr)+4), \
- *(((char*)ptr)+5),*(((char*)ptr)+6), \
- *(((char*)ptr)+7),*(((char*)ptr)+8), \
- *(((char*)ptr)+9),*(((char*)ptr)+10), \
- *(((char*)ptr)+11),*(((char*)ptr)+12), \
- *(((char*)ptr)+13),*(((char*)ptr)+14), \
- *(((char*)ptr)+15)); \
-QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
- *(((char*)ptr)+16),*(((char*)ptr)+17), \
- *(((char*)ptr)+18),*(((char*)ptr)+19), \
- *(((char*)ptr)+20),*(((char*)ptr)+21), \
- *(((char*)ptr)+22),*(((char*)ptr)+23), \
- *(((char*)ptr)+24),*(((char*)ptr)+25), \
- *(((char*)ptr)+26),*(((char*)ptr)+27), \
- *(((char*)ptr)+28),*(((char*)ptr)+29), \
- *(((char*)ptr)+30),*(((char*)ptr)+31));
-
-/****************** END OF DEBUG FACILITY STUFF *********************/
+#ifdef CONFIG_64BIT
+static inline int do_sqbs(u64 token, unsigned char state, int queue,
+ int *start, int *count)
+{
+ register unsigned long _ccq asm ("0") = *count;
+ register unsigned long _token asm ("1") = token;
+ unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
-/*
- * Some instructions as assembly
- */
+ asm volatile(
+ " .insn rsy,0xeb000000008A,%1,0,0(%2)"
+ : "+d" (_ccq), "+d" (_queuestart)
+ : "d" ((unsigned long)state), "d" (_token)
+ : "memory", "cc");
+ *count = _ccq & 0xff;
+ *start = _queuestart & 0xff;
-static inline int
-do_sqbs(unsigned long sch, unsigned char state, int queue,
- unsigned int *start, unsigned int *count)
-{
-#ifdef CONFIG_64BIT
- register unsigned long _ccq asm ("0") = *count;
- register unsigned long _sch asm ("1") = sch;
- unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
-
- asm volatile(
- " .insn rsy,0xeb000000008A,%1,0,0(%2)"
- : "+d" (_ccq), "+d" (_queuestart)
- : "d" ((unsigned long)state), "d" (_sch)
- : "memory", "cc");
- *count = _ccq & 0xff;
- *start = _queuestart & 0xff;
-
- return (_ccq >> 32) & 0xff;
-#else
- return 0;
-#endif
+ return (_ccq >> 32) & 0xff;
}
-static inline int
-do_eqbs(unsigned long sch, unsigned char *state, int queue,
- unsigned int *start, unsigned int *count)
+static inline int do_eqbs(u64 token, unsigned char *state, int queue,
+ int *start, int *count)
{
-#ifdef CONFIG_64BIT
register unsigned long _ccq asm ("0") = *count;
- register unsigned long _sch asm ("1") = sch;
+ register unsigned long _token asm ("1") = token;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
unsigned long _state = 0;
asm volatile(
" .insn rrf,0xB99c0000,%1,%2,0,0"
: "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
- : "d" (_sch)
- : "memory", "cc" );
+ : "d" (_token)
+ : "memory", "cc");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
*state = _state & 0xff;
return (_ccq >> 32) & 0xff;
-#else
- return 0;
-#endif
-}
-
-
-static inline int
-do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2)
-{
- register unsigned long reg0 asm ("0") = 2;
- register struct subchannel_id reg1 asm ("1") = schid;
- register unsigned long reg2 asm ("2") = mask1;
- register unsigned long reg3 asm ("3") = mask2;
- int cc;
-
- asm volatile(
- " siga 0\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc)
- : "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc");
- return cc;
-}
-
-static inline int
-do_siga_input(struct subchannel_id schid, unsigned int mask)
-{
- register unsigned long reg0 asm ("0") = 1;
- register struct subchannel_id reg1 asm ("1") = schid;
- register unsigned long reg2 asm ("2") = mask;
- int cc;
-
- asm volatile(
- " siga 0\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc)
- : "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory");
- return cc;
-}
-
-static inline int
-do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
- unsigned int fc)
-{
- register unsigned long __fc asm("0") = fc;
- register unsigned long __schid asm("1") = schid;
- register unsigned long __mask asm("2") = mask;
- int cc;
-
- asm volatile(
- " siga 0\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
- : "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
- : "cc", "memory");
- (*bb) = ((unsigned int) __fc) >> 31;
- return cc;
-}
-
-static inline unsigned long
-do_clear_global_summary(void)
-{
- register unsigned long __fn asm("1") = 3;
- register unsigned long __tmp asm("2");
- register unsigned long __time asm("3");
-
- asm volatile(
- " .insn rre,0xb2650000,2,0"
- : "+d" (__fn), "=d" (__tmp), "=d" (__time));
- return __time;
}
-
-/*
- * QDIO device commands returned by extended Sense-ID
- */
-#define DEFAULT_ESTABLISH_QS_CMD 0x1b
-#define DEFAULT_ESTABLISH_QS_COUNT 0x1000
-#define DEFAULT_ACTIVATE_QS_CMD 0x1f
-#define DEFAULT_ACTIVATE_QS_COUNT 0
-
-/*
- * additional CIWs returned by extended Sense-ID
- */
-#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
-#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
+#else
+static inline int do_sqbs(u64 token, unsigned char state, int queue,
+ int *start, int *count) { return 0; }
+static inline int do_eqbs(u64 token, unsigned char *state, int queue,
+ int *start, int *count) { return 0; }
+#endif /* CONFIG_64BIT */
-#define QDIO_CHSC_RESPONSE_CODE_OK 1
-/* flags for st qdio sch data */
-#define CHSC_FLAG_QDIO_CAPABILITY 0x80
-#define CHSC_FLAG_VALIDITY 0x40
+struct qdio_irq;
-#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40
-#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20
-#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10
-#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
-#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
+struct siga_flag {
+ u8 input:1;
+ u8 output:1;
+ u8 sync:1;
+ u8 no_sync_ti:1;
+ u8 no_sync_out_ti:1;
+ u8 no_sync_out_pci:1;
+ u8:2;
+} __attribute__ ((packed));
-struct qdio_chsc_ssqd {
+struct chsc_ssqd_area {
struct chsc_header request;
- u16 reserved1:10;
- u16 ssid:2;
- u16 fmt:4;
+ u16:10;
+ u8 ssid:2;
+ u8 fmt:4;
u16 first_sch;
- u16 reserved2;
+ u16:16;
u16 last_sch;
- u32 reserved3;
+ u32:32;
struct chsc_header response;
- u32 reserved4;
- u8 flags;
- u8 reserved5;
- u16 sch;
- u8 qfmt;
- u8 parm;
- u8 qdioac1;
- u8 sch_class;
- u8 pct;
- u8 icnt;
- u8 reserved7;
- u8 ocnt;
- u8 reserved8;
- u8 mbccnt;
- u16 qdioac2;
- u64 sch_token;
-};
+ u32:32;
+ struct qdio_ssqd_desc qdio_ssqd;
+} __attribute__ ((packed));
-struct qdio_perf_stats {
-#ifdef CONFIG_64BIT
- atomic64_t tl_runs;
- atomic64_t outbound_tl_runs;
- atomic64_t outbound_tl_runs_resched;
- atomic64_t inbound_tl_runs;
- atomic64_t inbound_tl_runs_resched;
- atomic64_t inbound_thin_tl_runs;
- atomic64_t inbound_thin_tl_runs_resched;
-
- atomic64_t siga_outs;
- atomic64_t siga_ins;
- atomic64_t siga_syncs;
- atomic64_t pcis;
- atomic64_t thinints;
- atomic64_t fast_reqs;
-
- atomic64_t outbound_cnt;
- atomic64_t inbound_cnt;
-#else /* CONFIG_64BIT */
- atomic_t tl_runs;
- atomic_t outbound_tl_runs;
- atomic_t outbound_tl_runs_resched;
- atomic_t inbound_tl_runs;
- atomic_t inbound_tl_runs_resched;
- atomic_t inbound_thin_tl_runs;
- atomic_t inbound_thin_tl_runs_resched;
-
- atomic_t siga_outs;
- atomic_t siga_ins;
- atomic_t siga_syncs;
- atomic_t pcis;
- atomic_t thinints;
- atomic_t fast_reqs;
-
- atomic_t outbound_cnt;
- atomic_t inbound_cnt;
-#endif /* CONFIG_64BIT */
+struct scssc_area {
+ struct chsc_header request;
+ u16 operation_code;
+ u16:16;
+ u32:32;
+ u32:32;
+ u64 summary_indicator_addr;
+ u64 subchannel_indicator_addr;
+ u32 ks:4;
+ u32 kc:4;
+ u32:21;
+ u32 isc:3;
+ u32 word_with_d_bit;
+ u32:32;
+ struct subchannel_id schid;
+ u32 reserved[1004];
+ struct chsc_header response;
+ u32:32;
+} __attribute__ ((packed));
+
+struct qdio_input_q {
+ /* input buffer acknowledgement flag */
+ int polling;
+
+ /* last time of noticing incoming data */
+ u64 timestamp;
+
+ /* lock for clearing the acknowledgement */
+ spinlock_t lock;
};
-/* unlikely as the later the better */
-#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q)
-#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \
- qdio_siga_sync(q,~0U,~0U)
-#define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \
- qdio_siga_sync(q,~0U,0)
+struct qdio_output_q {
+ /* failed siga-w attempts*/
+ atomic_t busy_siga_counter;
-#define NOW qdio_get_micros()
-#define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW
-#define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time)
-#define SAVE_FRONTIER(q,val) q->last_move_ftc=val
-#define GET_SAVED_FRONTIER(q) (q->last_move_ftc)
+ /* start time of busy condition */
+ u64 timestamp;
-#define MY_MODULE_STRING(x) #x
+ /* PCIs are enabled for the queue */
+ int pci_out_enabled;
-#ifdef CONFIG_64BIT
-#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x)
-#else /* CONFIG_64BIT */
-#define QDIO_GET_ADDR(x) ((__u32)(long)x)
-#endif /* CONFIG_64BIT */
+ /* timer to check for more outbound work */
+ struct timer_list timer;
+};
struct qdio_q {
- volatile struct slsb slsb;
+ struct slsb slsb;
+ union {
+ struct qdio_input_q in;
+ struct qdio_output_q out;
+ } u;
- char unused[QDIO_MAX_BUFFERS_PER_Q];
+ /* queue number */
+ int nr;
- __u32 * dev_st_chg_ind;
+ /* bitmask of queue number */
+ int mask;
+ /* input or output queue */
int is_input_q;
- struct subchannel_id schid;
- struct ccw_device *cdev;
-
- unsigned int is_iqdio_q;
- unsigned int is_thinint_q;
- /* bit 0 means queue 0, bit 1 means queue 1, ... */
- unsigned int mask;
- unsigned int q_no;
+ /* list of thinint input queues */
+ struct list_head entry;
+ /* upper-layer program handler */
qdio_handler_t (*handler);
- /* points to the next buffer to be checked for having
- * been processed by the card (outbound)
- * or to the next buffer the program should check for (inbound) */
- volatile int first_to_check;
- /* and the last time it was: */
- volatile int last_move_ftc;
+ /*
+ * inbound: next buffer the program should check for
+ * outbound: next buffer to check for having been processed
+ * by the card
+ */
+ int first_to_check;
- atomic_t number_of_buffers_used;
- atomic_t polling;
+ /* first_to_check of the last time */
+ int last_move_ftc;
- unsigned int siga_in;
- unsigned int siga_out;
- unsigned int siga_sync;
- unsigned int siga_sync_done_on_thinints;
- unsigned int siga_sync_done_on_outb_tis;
- unsigned int hydra_gives_outbound_pcis;
+ /* beginning position for calling the program */
+ int first_to_kick;
- /* used to save beginning position when calling dd_handlers */
- int first_element_to_kick;
+ /* number of buffers in use by the adapter */
+ atomic_t nr_buf_used;
- atomic_t use_count;
- atomic_t is_in_shutdown;
-
- void *irq_ptr;
-
- struct timer_list timer;
-#ifdef QDIO_USE_TIMERS_FOR_POLLING
- atomic_t timer_already_set;
- spinlock_t timer_lock;
-#else /* QDIO_USE_TIMERS_FOR_POLLING */
+ struct qdio_irq *irq_ptr;
struct tasklet_struct tasklet;
-#endif /* QDIO_USE_TIMERS_FOR_POLLING */
-
- enum qdio_irq_states state;
-
- /* used to store the error condition during a data transfer */
+ /* error condition during a data transfer */
unsigned int qdio_error;
- unsigned int siga_error;
- unsigned int error_status_flags;
-
- /* list of interesting queues */
- volatile struct qdio_q *list_next;
- volatile struct qdio_q *list_prev;
struct sl *sl;
- volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q];
-
- struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q];
-
- unsigned long int_parm;
-
- /*struct {
- int in_bh_check_limit;
- int threshold;
- } threshold_classes[QDIO_STATS_CLASSES];*/
-
- struct {
- /* inbound: the time to stop polling
- outbound: the time to kick peer */
- int threshold; /* the real value */
-
- /* outbound: last time of do_QDIO
- inbound: last time of noticing incoming data */
- /*__u64 last_transfer_times[QDIO_STATS_NUMBER];
- int last_transfer_index; */
-
- __u64 last_transfer_time;
- __u64 busy_start;
- } timing;
- atomic_t busy_siga_counter;
- unsigned int queue_type;
- unsigned int is_pci_out;
-
- /* leave this member at the end. won't be cleared in qdio_fill_qs */
- struct slib *slib; /* a page is allocated under this pointer,
- sl points into this page, offset PAGE_SIZE/2
- (after slib) */
+ struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
+
+ /*
+ * Warning: Leave this member at the end so it won't be cleared in
+ * qdio_fill_qs. A page is allocated under this pointer and used for
+ * slib and sl. slib is 2048 bytes big and sl points to offset
+ * PAGE_SIZE / 2.
+ */
+ struct slib *slib;
} __attribute__ ((aligned(256)));
struct qdio_irq {
- __u32 * volatile dev_st_chg_ind;
+ struct qib qib;
+ u32 *dsci; /* address of device state change indicator */
+ struct ccw_device *cdev;
unsigned long int_parm;
struct subchannel_id schid;
-
- unsigned int is_iqdio_irq;
- unsigned int is_thinint_irq;
- unsigned int hydra_gives_outbound_pcis;
- unsigned int sync_done_on_outb_pcis;
-
- /* QEBSM facility */
- unsigned int is_qebsm;
- unsigned long sch_token;
+ unsigned long sch_token; /* QEBSM facility */
enum qdio_irq_states state;
- unsigned int no_input_qs;
- unsigned int no_output_qs;
+ struct siga_flag siga_flag; /* siga sync information from qdioac */
- unsigned char qdioac;
+ int nr_input_qs;
+ int nr_output_qs;
struct ccw1 ccw;
-
struct ciw equeue;
struct ciw aqueue;
- struct qib qib;
-
- void (*original_int_handler) (struct ccw_device *,
- unsigned long, struct irb *);
+ struct qdio_ssqd_desc ssqd_desc;
+
+ void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
- /* leave these four members together at the end. won't be cleared in qdio_fill_irq */
+ /*
+ * Warning: Leave these members together at the end so they won't be
+ * cleared in qdio_setup_irq.
+ */
struct qdr *qdr;
+ unsigned long chsc_page;
+
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
- struct semaphore setting_up_sema;
+
+ struct mutex setup_mutex;
};
-#endif
+
+/* helper functions */
+#define queue_type(q) q->irq_ptr->qib.qfmt
+
+#define is_thinint_irq(irq) \
+ (irq->qib.qfmt == QDIO_IQDIO_QFMT || \
+ css_general_characteristics.aif_osa)
+
+/* the highest iqdio queue is used for multicast */
+static inline int multicast_outbound(struct qdio_q *q)
+{
+ return (q->irq_ptr->nr_output_qs > 1) &&
+ (q->nr == q->irq_ptr->nr_output_qs - 1);
+}
+
+static inline unsigned long long get_usecs(void)
+{
+ return monotonic_clock() >> 12;
+}
+
+#define pci_out_supported(q) \
+ (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
+#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
+
+#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti)
+#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti)
+#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
+#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
+#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync)
+#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci)
+
+#define for_each_input_queue(irq_ptr, q, i) \
+ for (i = 0, q = irq_ptr->input_qs[0]; \
+ i < irq_ptr->nr_input_qs; \
+ q = irq_ptr->input_qs[++i])
+#define for_each_output_queue(irq_ptr, q, i) \
+ for (i = 0, q = irq_ptr->output_qs[0]; \
+ i < irq_ptr->nr_output_qs; \
+ q = irq_ptr->output_qs[++i])
+
+#define prev_buf(bufnr) \
+ ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
+#define next_buf(bufnr) \
+ ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
+#define add_buf(bufnr, inc) \
+ ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
+
+/* prototypes for thin interrupt */
+void qdio_sync_after_thinint(struct qdio_q *q);
+int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state);
+void qdio_check_outbound_after_thinint(struct qdio_q *q);
+int qdio_inbound_q_moved(struct qdio_q *q);
+void qdio_kick_inbound_handler(struct qdio_q *q);
+void qdio_stop_polling(struct qdio_q *q);
+int qdio_siga_sync_q(struct qdio_q *q);
+
+void qdio_setup_thinint(struct qdio_irq *irq_ptr);
+int qdio_establish_thinint(struct qdio_irq *irq_ptr);
+void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
+void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
+void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
+void tiqdio_inbound_processing(unsigned long q);
+int tiqdio_allocate_memory(void);
+void tiqdio_free_memory(void);
+int tiqdio_register_thinints(void);
+void tiqdio_unregister_thinints(void);
+
+/* prototypes for setup */
+void qdio_inbound_processing(unsigned long data);
+void qdio_outbound_processing(unsigned long data);
+void qdio_outbound_timer(unsigned long data);
+void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ struct irb *irb);
+int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
+ int nr_output_qs);
+void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
+int qdio_setup_irq(struct qdio_initialize *init_data);
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
+ struct ccw_device *cdev);
+void qdio_release_memory(struct qdio_irq *irq_ptr);
+int qdio_setup_init(void);
+void qdio_setup_exit(void);
+
+#endif /* _CIO_QDIO_H */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
new file mode 100644
index 000000000000..337aa3087a78
--- /dev/null
+++ b/drivers/s390/cio/qdio_debug.c
@@ -0,0 +1,240 @@
+/*
+ * drivers/s390/cio/qdio_debug.c
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Author: Jan Glauber (jang@linux.vnet.ibm.com)
+ */
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <asm/qdio.h>
+#include <asm/debug.h>
+#include "qdio_debug.h"
+#include "qdio.h"
+
+debug_info_t *qdio_dbf_setup;
+debug_info_t *qdio_dbf_trace;
+
+static struct dentry *debugfs_root;
+#define MAX_DEBUGFS_QUEUES 32
+static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
+static DEFINE_MUTEX(debugfs_mutex);
+
+void qdio_allocate_do_dbf(struct qdio_initialize *init_data)
+{
+ char dbf_text[20];
+
+ sprintf(dbf_text, "qfmt:%x", init_data->q_format);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8);
+ sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *));
+ QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *));
+ QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *));
+ sprintf(dbf_text, "niq:%4x", init_data->no_input_qs);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ sprintf(dbf_text, "noq:%4x", init_data->no_output_qs);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *));
+ QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *));
+ QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long));
+ QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long));
+ QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *));
+ QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *));
+}
+
+static void qdio_unregister_dbf_views(void)
+{
+ if (qdio_dbf_setup)
+ debug_unregister(qdio_dbf_setup);
+ if (qdio_dbf_trace)
+ debug_unregister(qdio_dbf_trace);
+}
+
+static int qdio_register_dbf_views(void)
+{
+ qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES,
+ QDIO_DBF_SETUP_NR_AREAS,
+ QDIO_DBF_SETUP_LEN);
+ if (!qdio_dbf_setup)
+ goto oom;
+ debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL);
+
+ qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES,
+ QDIO_DBF_TRACE_NR_AREAS,
+ QDIO_DBF_TRACE_LEN);
+ if (!qdio_dbf_trace)
+ goto oom;
+ debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL);
+ return 0;
+oom:
+ qdio_unregister_dbf_views();
+ return -ENOMEM;
+}
+
+static int qstat_show(struct seq_file *m, void *v)
+{
+ unsigned char state;
+ struct qdio_q *q = m->private;
+ int i;
+
+ if (!q)
+ return 0;
+
+ seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci);
+ seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
+ seq_printf(m, "ftc: %d\n", q->first_to_check);
+ seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc);
+ seq_printf(m, "polling: %d\n", q->u.in.polling);
+ seq_printf(m, "slsb buffer states:\n");
+
+ qdio_siga_sync_q(q);
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
+ get_buf_state(q, i, &state);
+ switch (state) {
+ case SLSB_P_INPUT_NOT_INIT:
+ case SLSB_P_OUTPUT_NOT_INIT:
+ seq_printf(m, "N");
+ break;
+ case SLSB_P_INPUT_PRIMED:
+ case SLSB_CU_OUTPUT_PRIMED:
+ seq_printf(m, "+");
+ break;
+ case SLSB_P_INPUT_ACK:
+ seq_printf(m, "A");
+ break;
+ case SLSB_P_INPUT_ERROR:
+ case SLSB_P_OUTPUT_ERROR:
+ seq_printf(m, "x");
+ break;
+ case SLSB_CU_INPUT_EMPTY:
+ case SLSB_P_OUTPUT_EMPTY:
+ seq_printf(m, "-");
+ break;
+ case SLSB_P_INPUT_HALTED:
+ case SLSB_P_OUTPUT_HALTED:
+ seq_printf(m, ".");
+ break;
+ default:
+ seq_printf(m, "?");
+ }
+ if (i == 63)
+ seq_printf(m, "\n");
+ }
+ seq_printf(m, "\n");
+ return 0;
+}
+
+static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct qdio_q *q = seq->private;
+
+ if (!q)
+ return 0;
+
+ if (q->is_input_q)
+ xchg(q->irq_ptr->dsci, 1);
+ local_bh_disable();
+ tasklet_schedule(&q->tasklet);
+ local_bh_enable();
+ return count;
+}
+
+static int qstat_seq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, qstat_show,
+ filp->f_path.dentry->d_inode->i_private);
+}
+
+static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name)
+{
+ memset(name, 0, sizeof(name));
+ sprintf(name, "%s", cdev->dev.bus_id);
+ if (q->is_input_q)
+ sprintf(name + strlen(name), "_input");
+ else
+ sprintf(name + strlen(name), "_output");
+ sprintf(name + strlen(name), "_%d", q->nr);
+}
+
+static void remove_debugfs_entry(struct qdio_q *q)
+{
+ int i;
+
+ for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) {
+ if (!debugfs_queues[i])
+ continue;
+ if (debugfs_queues[i]->d_inode->i_private == q) {
+ debugfs_remove(debugfs_queues[i]);
+ debugfs_queues[i] = NULL;
+ }
+ }
+}
+
+static struct file_operations debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = qstat_seq_open,
+ .read = seq_read,
+ .write = qstat_seq_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
+{
+ int i = 0;
+ char name[40];
+
+ while (debugfs_queues[i] != NULL) {
+ i++;
+ if (i >= MAX_DEBUGFS_QUEUES)
+ return;
+ }
+ get_queue_name(q, cdev, name);
+ debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
+ debugfs_root, q, &debugfs_fops);
+}
+
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
+{
+ struct qdio_q *q;
+ int i;
+
+ mutex_lock(&debugfs_mutex);
+ for_each_input_queue(irq_ptr, q, i)
+ setup_debugfs_entry(q, cdev);
+ for_each_output_queue(irq_ptr, q, i)
+ setup_debugfs_entry(q, cdev);
+ mutex_unlock(&debugfs_mutex);
+}
+
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
+{
+ struct qdio_q *q;
+ int i;
+
+ mutex_lock(&debugfs_mutex);
+ for_each_input_queue(irq_ptr, q, i)
+ remove_debugfs_entry(q);
+ for_each_output_queue(irq_ptr, q, i)
+ remove_debugfs_entry(q);
+ mutex_unlock(&debugfs_mutex);
+}
+
+int __init qdio_debug_init(void)
+{
+ debugfs_root = debugfs_create_dir("qdio_queues", NULL);
+ return qdio_register_dbf_views();
+}
+
+void qdio_debug_exit(void)
+{
+ debugfs_remove(debugfs_root);
+ qdio_unregister_dbf_views();
+}
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
new file mode 100644
index 000000000000..8484b83698e1
--- /dev/null
+++ b/drivers/s390/cio/qdio_debug.h
@@ -0,0 +1,91 @@
+/*
+ * drivers/s390/cio/qdio_debug.h
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Author: Jan Glauber (jang@linux.vnet.ibm.com)
+ */
+#ifndef QDIO_DEBUG_H
+#define QDIO_DEBUG_H
+
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include "qdio.h"
+
+#define QDIO_DBF_HEX(ex, name, level, addr, len) \
+ do { \
+ if (ex) \
+ debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \
+ else \
+ debug_event(qdio_dbf_##name, level, (void *)(addr), len); \
+ } while (0)
+#define QDIO_DBF_TEXT(ex, name, level, text) \
+ do { \
+ if (ex) \
+ debug_text_exception(qdio_dbf_##name, level, text); \
+ else \
+ debug_text_event(qdio_dbf_##name, level, text); \
+ } while (0)
+
+#define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len)
+#define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len)
+#define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len)
+
+#ifdef CONFIG_QDIO_DEBUG
+#define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len)
+#define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len)
+#define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len)
+#define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len)
+#else
+#define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0)
+#define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0)
+#define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0)
+#define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0)
+#endif /* CONFIG_QDIO_DEBUG */
+
+#define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text)
+#define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text)
+#define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text)
+
+#ifdef CONFIG_QDIO_DEBUG
+#define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text)
+#define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text)
+#define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text)
+#define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text)
+#else
+#define QDIO_DBF_TEXT3(ex, name, text) do {} while (0)
+#define QDIO_DBF_TEXT4(ex, name, text) do {} while (0)
+#define QDIO_DBF_TEXT5(ex, name, text) do {} while (0)
+#define QDIO_DBF_TEXT6(ex, name, text) do {} while (0)
+#endif /* CONFIG_QDIO_DEBUG */
+
+/* s390dbf views */
+#define QDIO_DBF_SETUP_LEN 8
+#define QDIO_DBF_SETUP_PAGES 4
+#define QDIO_DBF_SETUP_NR_AREAS 1
+
+#define QDIO_DBF_TRACE_LEN 8
+#define QDIO_DBF_TRACE_NR_AREAS 2
+
+#ifdef CONFIG_QDIO_DEBUG
+#define QDIO_DBF_TRACE_PAGES 16
+#define QDIO_DBF_SETUP_LEVEL 6
+#define QDIO_DBF_TRACE_LEVEL 4
+#else /* !CONFIG_QDIO_DEBUG */
+#define QDIO_DBF_TRACE_PAGES 4
+#define QDIO_DBF_SETUP_LEVEL 2
+#define QDIO_DBF_TRACE_LEVEL 2
+#endif /* CONFIG_QDIO_DEBUG */
+
+extern debug_info_t *qdio_dbf_setup;
+extern debug_info_t *qdio_dbf_trace;
+
+void qdio_allocate_do_dbf(struct qdio_initialize *init_data);
+void debug_print_bstat(struct qdio_q *q);
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
+ struct ccw_device *cdev);
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr,
+ struct ccw_device *cdev);
+int qdio_debug_init(void);
+void qdio_debug_exit(void);
+#endif
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
new file mode 100644
index 000000000000..d10c73cc1688
--- /dev/null
+++ b/drivers/s390/cio/qdio_main.c
@@ -0,0 +1,1755 @@
+/*
+ * linux/drivers/s390/cio/qdio_main.c
+ *
+ * Linux for s390 qdio support, buffer handling, qdio API and module support.
+ *
+ * Copyright 2000,2008 IBM Corp.
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <asm/atomic.h>
+#include <asm/debug.h>
+#include <asm/qdio.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+#include "qdio_perf.h"
+
+MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
+ "Jan Glauber <jang@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("QDIO base support");
+MODULE_LICENSE("GPL");
+
+static inline int do_siga_sync(struct subchannel_id schid,
+ unsigned int out_mask, unsigned int in_mask)
+{
+ register unsigned long __fc asm ("0") = 2;
+ register struct subchannel_id __schid asm ("1") = schid;
+ register unsigned long out asm ("2") = out_mask;
+ register unsigned long in asm ("3") = in_mask;
+ int cc;
+
+ asm volatile(
+ " siga 0\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc)
+ : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
+ return cc;
+}
+
+static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
+{
+ register unsigned long __fc asm ("0") = 1;
+ register struct subchannel_id __schid asm ("1") = schid;
+ register unsigned long __mask asm ("2") = mask;
+ int cc;
+
+ asm volatile(
+ " siga 0\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc)
+ : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
+ return cc;
+}
+
+/**
+ * do_siga_output - perform SIGA-w/wt function
+ * @schid: subchannel id or in case of QEBSM the subchannel token
+ * @mask: which output queues to process
+ * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
+ * @fc: function code to perform
+ *
+ * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
+ * Note: For IQDC unicast queues only the highest priority queue is processed.
+ */
+static inline int do_siga_output(unsigned long schid, unsigned long mask,
+ u32 *bb, unsigned int fc)
+{
+ register unsigned long __fc asm("0") = fc;
+ register unsigned long __schid asm("1") = schid;
+ register unsigned long __mask asm("2") = mask;
+ int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
+
+ asm volatile(
+ " siga 0\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
+ : : "cc", "memory");
+ *bb = ((unsigned int) __fc) >> 31;
+ return cc;
+}
+
+static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
+{
+ char dbf_text[15];
+
+ /* all done or next buffer state different */
+ if (ccq == 0 || ccq == 32)
+ return 0;
+ /* not all buffers processed */
+ if (ccq == 96 || ccq == 97)
+ return 1;
+ /* notify devices immediately */
+ sprintf(dbf_text, "%d", ccq);
+ QDIO_DBF_TEXT2(1, trace, dbf_text);
+ return -EIO;
+}
+
+/**
+ * qdio_do_eqbs - extract buffer states for QEBSM
+ * @q: queue to manipulate
+ * @state: state of the extracted buffers
+ * @start: buffer number to start at
+ * @count: count of buffers to examine
+ *
+ * Returns the number of successfull extracted equal buffer states.
+ * Stops processing if a state is different from the last buffers state.
+ */
+static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
+ int start, int count)
+{
+ unsigned int ccq = 0;
+ int tmp_count = count, tmp_start = start;
+ int nr = q->nr;
+ int rc;
+ char dbf_text[15];
+
+ BUG_ON(!q->irq_ptr->sch_token);
+
+ if (!q->is_input_q)
+ nr += q->irq_ptr->nr_input_qs;
+again:
+ ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
+ rc = qdio_check_ccq(q, ccq);
+
+ /* At least one buffer was processed, return and extract the remaining
+ * buffers later.
+ */
+ if ((ccq == 96) && (count != tmp_count))
+ return (count - tmp_count);
+ if (rc == 1) {
+ QDIO_DBF_TEXT5(1, trace, "eqAGAIN");
+ goto again;
+ }
+
+ if (rc < 0) {
+ QDIO_DBF_TEXT2(1, trace, "eqberr");
+ sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr);
+ QDIO_DBF_TEXT2(1, trace, dbf_text);
+ q->handler(q->irq_ptr->cdev,
+ QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
+ 0, -1, -1, q->irq_ptr->int_parm);
+ return 0;
+ }
+ return count - tmp_count;
+}
+
+/**
+ * qdio_do_sqbs - set buffer states for QEBSM
+ * @q: queue to manipulate
+ * @state: new state of the buffers
+ * @start: first buffer number to change
+ * @count: how many buffers to change
+ *
+ * Returns the number of successfully changed buffers.
+ * Does retrying until the specified count of buffer states is set or an
+ * error occurs.
+ */
+static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
+ int count)
+{
+ unsigned int ccq = 0;
+ int tmp_count = count, tmp_start = start;
+ int nr = q->nr;
+ int rc;
+ char dbf_text[15];
+
+ BUG_ON(!q->irq_ptr->sch_token);
+
+ if (!q->is_input_q)
+ nr += q->irq_ptr->nr_input_qs;
+again:
+ ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
+ rc = qdio_check_ccq(q, ccq);
+ if (rc == 1) {
+ QDIO_DBF_TEXT5(1, trace, "sqAGAIN");
+ goto again;
+ }
+ if (rc < 0) {
+ QDIO_DBF_TEXT3(1, trace, "sqberr");
+ sprintf(dbf_text, "%2x,%2x", count, tmp_count);
+ QDIO_DBF_TEXT3(1, trace, dbf_text);
+ sprintf(dbf_text, "%d,%d", ccq, nr);
+ QDIO_DBF_TEXT3(1, trace, dbf_text);
+
+ q->handler(q->irq_ptr->cdev,
+ QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
+ 0, -1, -1, q->irq_ptr->int_parm);
+ return 0;
+ }
+ WARN_ON(tmp_count);
+ return count - tmp_count;
+}
+
+/* returns number of examined buffers and their common state in *state */
+static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state, unsigned int count)
+{
+ unsigned char __state = 0;
+ int i;
+
+ BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
+ BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
+
+ if (is_qebsm(q))
+ return qdio_do_eqbs(q, state, bufnr, count);
+
+ for (i = 0; i < count; i++) {
+ if (!__state)
+ __state = q->slsb.val[bufnr];
+ else if (q->slsb.val[bufnr] != __state)
+ break;
+ bufnr = next_buf(bufnr);
+ }
+ *state = __state;
+ return i;
+}
+
+inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state)
+{
+ return get_buf_states(q, bufnr, state, 1);
+}
+
+/* wrap-around safe setting of slsb states, returns number of changed buffers */
+static inline int set_buf_states(struct qdio_q *q, int bufnr,
+ unsigned char state, int count)
+{
+ int i;
+
+ BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
+ BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
+
+ if (is_qebsm(q))
+ return qdio_do_sqbs(q, state, bufnr, count);
+
+ for (i = 0; i < count; i++) {
+ xchg(&q->slsb.val[bufnr], state);
+ bufnr = next_buf(bufnr);
+ }
+ return count;
+}
+
+static inline int set_buf_state(struct qdio_q *q, int bufnr,
+ unsigned char state)
+{
+ return set_buf_states(q, bufnr, state, 1);
+}
+
+/* set slsb states to initial state */
+void qdio_init_buf_states(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q;
+ int i;
+
+ for_each_input_queue(irq_ptr, q, i)
+ set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
+ QDIO_MAX_BUFFERS_PER_Q);
+ for_each_output_queue(irq_ptr, q, i)
+ set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
+ QDIO_MAX_BUFFERS_PER_Q);
+}
+
+static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
+ unsigned int input)
+{
+ int cc;
+
+ if (!need_siga_sync(q))
+ return 0;
+
+ qdio_perf_stat_inc(&perf_stats.siga_sync);
+
+ cc = do_siga_sync(q->irq_ptr->schid, output, input);
+ if (cc) {
+ QDIO_DBF_TEXT4(0, trace, "sigasync");
+ QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
+ QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
+ }
+ return cc;
+}
+
+inline int qdio_siga_sync_q(struct qdio_q *q)
+{
+ if (q->is_input_q)
+ return qdio_siga_sync(q, 0, q->mask);
+ else
+ return qdio_siga_sync(q, q->mask, 0);
+}
+
+static inline int qdio_siga_sync_out(struct qdio_q *q)
+{
+ return qdio_siga_sync(q, ~0U, 0);
+}
+
+static inline int qdio_siga_sync_all(struct qdio_q *q)
+{
+ return qdio_siga_sync(q, ~0U, ~0U);
+}
+
+static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
+{
+ unsigned int fc = 0;
+ unsigned long schid;
+
+ if (!is_qebsm(q))
+ schid = *((u32 *)&q->irq_ptr->schid);
+ else {
+ schid = q->irq_ptr->sch_token;
+ fc |= 0x80;
+ }
+ return do_siga_output(schid, q->mask, busy_bit, fc);
+}
+
+static int qdio_siga_output(struct qdio_q *q)
+{
+ int cc;
+ u32 busy_bit;
+ u64 start_time = 0;
+
+ QDIO_DBF_TEXT5(0, trace, "sigaout");
+ QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
+
+ qdio_perf_stat_inc(&perf_stats.siga_out);
+again:
+ cc = qdio_do_siga_output(q, &busy_bit);
+ if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) {
+ if (!start_time)
+ start_time = get_usecs();
+ else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+ goto again;
+ }
+
+ if (cc == 2 && busy_bit)
+ cc |= QDIO_ERROR_SIGA_BUSY;
+ if (cc)
+ QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
+ return cc;
+}
+
+static inline int qdio_siga_input(struct qdio_q *q)
+{
+ int cc;
+
+ QDIO_DBF_TEXT4(0, trace, "sigain");
+ QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
+
+ qdio_perf_stat_inc(&perf_stats.siga_in);
+
+ cc = do_siga_input(q->irq_ptr->schid, q->mask);
+ if (cc)
+ QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
+ return cc;
+}
+
+/* called from thinint inbound handler */
+void qdio_sync_after_thinint(struct qdio_q *q)
+{
+ if (pci_out_supported(q)) {
+ if (need_siga_sync_thinint(q))
+ qdio_siga_sync_all(q);
+ else if (need_siga_sync_out_thinint(q))
+ qdio_siga_sync_out(q);
+ } else
+ qdio_siga_sync_q(q);
+}
+
+inline void qdio_stop_polling(struct qdio_q *q)
+{
+ spin_lock_bh(&q->u.in.lock);
+ if (!q->u.in.polling) {
+ spin_unlock_bh(&q->u.in.lock);
+ return;
+ }
+ q->u.in.polling = 0;
+ qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
+
+ /* show the card that we are not polling anymore */
+ set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
+ spin_unlock_bh(&q->u.in.lock);
+}
+
+static void announce_buffer_error(struct qdio_q *q)
+{
+ char dbf_text[15];
+
+ if (q->is_input_q)
+ QDIO_DBF_TEXT3(1, trace, "inperr");
+ else
+ QDIO_DBF_TEXT3(0, trace, "outperr");
+
+ sprintf(dbf_text, "%x-%x-%x", q->first_to_check,
+ q->sbal[q->first_to_check]->element[14].flags,
+ q->sbal[q->first_to_check]->element[15].flags);
+ QDIO_DBF_TEXT3(1, trace, dbf_text);
+ QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256);
+
+ q->qdio_error = QDIO_ERROR_SLSB_STATE;
+}
+
+static int get_inbound_buffer_frontier(struct qdio_q *q)
+{
+ int count, stop;
+ unsigned char state;
+
+ /*
+ * If we still poll don't update last_move_ftc, keep the
+ * previously ACK buffer there.
+ */
+ if (!q->u.in.polling)
+ q->last_move_ftc = q->first_to_check;
+
+ /*
+ * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
+ * would return 0.
+ */
+ count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+ stop = add_buf(q->first_to_check, count);
+
+ /*
+ * No siga sync here, as a PCI or we after a thin interrupt
+ * will sync the queues.
+ */
+
+ /* need to set count to 1 for non-qebsm */
+ if (!is_qebsm(q))
+ count = 1;
+
+check_next:
+ if (q->first_to_check == stop)
+ goto out;
+
+ count = get_buf_states(q, q->first_to_check, &state, count);
+ if (!count)
+ goto out;
+
+ switch (state) {
+ case SLSB_P_INPUT_PRIMED:
+ QDIO_DBF_TEXT5(0, trace, "inptprim");
+
+ /*
+ * Only ACK the first buffer. The ACK will be removed in
+ * qdio_stop_polling.
+ */
+ if (q->u.in.polling)
+ state = SLSB_P_INPUT_NOT_INIT;
+ else {
+ q->u.in.polling = 1;
+ state = SLSB_P_INPUT_ACK;
+ }
+ set_buf_state(q, q->first_to_check, state);
+
+ /*
+ * Need to change all PRIMED buffers to NOT_INIT, otherwise
+ * we're loosing initiative in the thinint code.
+ */
+ if (count > 1)
+ set_buf_states(q, next_buf(q->first_to_check),
+ SLSB_P_INPUT_NOT_INIT, count - 1);
+
+ /*
+ * No siga-sync needed for non-qebsm here, as the inbound queue
+ * will be synced on the next siga-r, resp.
+ * tiqdio_is_inbound_q_done will do the siga-sync.
+ */
+ q->first_to_check = add_buf(q->first_to_check, count);
+ atomic_sub(count, &q->nr_buf_used);
+ goto check_next;
+ case SLSB_P_INPUT_ERROR:
+ announce_buffer_error(q);
+ /* process the buffer, the upper layer will take care of it */
+ q->first_to_check = add_buf(q->first_to_check, count);
+ atomic_sub(count, &q->nr_buf_used);
+ break;
+ case SLSB_CU_INPUT_EMPTY:
+ case SLSB_P_INPUT_NOT_INIT:
+ case SLSB_P_INPUT_ACK:
+ QDIO_DBF_TEXT5(0, trace, "inpnipro");
+ break;
+ default:
+ BUG();
+ }
+out:
+ QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int));
+ return q->first_to_check;
+}
+
+int qdio_inbound_q_moved(struct qdio_q *q)
+{
+ int bufnr;
+
+ bufnr = get_inbound_buffer_frontier(q);
+
+ if ((bufnr != q->last_move_ftc) || q->qdio_error) {
+ if (!need_siga_sync(q) && !pci_out_supported(q))
+ q->u.in.timestamp = get_usecs();
+
+ QDIO_DBF_TEXT4(0, trace, "inhasmvd");
+ QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
+ return 1;
+ } else
+ return 0;
+}
+
+static int qdio_inbound_q_done(struct qdio_q *q)
+{
+ unsigned char state;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15];
+#endif
+
+ if (!atomic_read(&q->nr_buf_used))
+ return 1;
+
+ /*
+ * We need that one for synchronization with the adapter, as it
+ * does a kind of PCI avoidance.
+ */
+ qdio_siga_sync_q(q);
+
+ get_buf_state(q, q->first_to_check, &state);
+ if (state == SLSB_P_INPUT_PRIMED)
+ /* we got something to do */
+ return 0;
+
+ /* on VM, we don't poll, so the q is always done here */
+ if (need_siga_sync(q) || pci_out_supported(q))
+ return 1;
+
+ /*
+ * At this point we know, that inbound first_to_check
+ * has (probably) not moved (see qdio_inbound_processing).
+ */
+ if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+#ifdef CONFIG_QDIO_DEBUG
+ QDIO_DBF_TEXT4(0, trace, "inqisdon");
+ QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
+ sprintf(dbf_text, "pf%02x", q->first_to_check);
+ QDIO_DBF_TEXT4(0, trace, dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+ return 1;
+ } else {
+#ifdef CONFIG_QDIO_DEBUG
+ QDIO_DBF_TEXT4(0, trace, "inqisntd");
+ QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
+ sprintf(dbf_text, "pf%02x", q->first_to_check);
+ QDIO_DBF_TEXT4(0, trace, dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+ return 0;
+ }
+}
+
+void qdio_kick_inbound_handler(struct qdio_q *q)
+{
+ int count, start, end;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15];
+#endif
+
+ qdio_perf_stat_inc(&perf_stats.inbound_handler);
+
+ start = q->first_to_kick;
+ end = q->first_to_check;
+ if (end >= start)
+ count = end - start;
+ else
+ count = end + QDIO_MAX_BUFFERS_PER_Q - start;
+
+#ifdef CONFIG_QDIO_DEBUG
+ sprintf(dbf_text, "s=%2xc=%2x", start, count);
+ QDIO_DBF_TEXT4(0, trace, dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+
+ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+ return;
+
+ q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr,
+ start, count, q->irq_ptr->int_parm);
+
+ /* for the next time */
+ q->first_to_kick = q->first_to_check;
+ q->qdio_error = 0;
+}
+
+static void __qdio_inbound_processing(struct qdio_q *q)
+{
+ qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
+again:
+ if (!qdio_inbound_q_moved(q))
+ return;
+
+ qdio_kick_inbound_handler(q);
+
+ if (!qdio_inbound_q_done(q))
+ /* means poll time is not yet over */
+ goto again;
+
+ qdio_stop_polling(q);
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (!qdio_inbound_q_done(q))
+ goto again;
+}
+
+/* inbound tasklet */
+void qdio_inbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+ __qdio_inbound_processing(q);
+}
+
+static int get_outbound_buffer_frontier(struct qdio_q *q)
+{
+ int count, stop;
+ unsigned char state;
+
+ if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
+ (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
+ qdio_siga_sync_q(q);
+
+ /*
+ * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
+ * would return 0.
+ */
+ count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+ stop = add_buf(q->first_to_check, count);
+
+ /* need to set count to 1 for non-qebsm */
+ if (!is_qebsm(q))
+ count = 1;
+
+check_next:
+ if (q->first_to_check == stop)
+ return q->first_to_check;
+
+ count = get_buf_states(q, q->first_to_check, &state, count);
+ if (!count)
+ return q->first_to_check;
+
+ switch (state) {
+ case SLSB_P_OUTPUT_EMPTY:
+ /* the adapter got it */
+ QDIO_DBF_TEXT5(0, trace, "outpempt");
+
+ atomic_sub(count, &q->nr_buf_used);
+ q->first_to_check = add_buf(q->first_to_check, count);
+ /*
+ * We fetch all buffer states at once. get_buf_states may
+ * return count < stop. For QEBSM we do not loop.
+ */
+ if (is_qebsm(q))
+ break;
+ goto check_next;
+ case SLSB_P_OUTPUT_ERROR:
+ announce_buffer_error(q);
+ /* process the buffer, the upper layer will take care of it */
+ q->first_to_check = add_buf(q->first_to_check, count);
+ atomic_sub(count, &q->nr_buf_used);
+ break;
+ case SLSB_CU_OUTPUT_PRIMED:
+ /* the adapter has not fetched the output yet */
+ QDIO_DBF_TEXT5(0, trace, "outpprim");
+ break;
+ case SLSB_P_OUTPUT_NOT_INIT:
+ case SLSB_P_OUTPUT_HALTED:
+ break;
+ default:
+ BUG();
+ }
+ return q->first_to_check;
+}
+
+/* all buffers processed? */
+static inline int qdio_outbound_q_done(struct qdio_q *q)
+{
+ return atomic_read(&q->nr_buf_used) == 0;
+}
+
+static inline int qdio_outbound_q_moved(struct qdio_q *q)
+{
+ int bufnr;
+
+ bufnr = get_outbound_buffer_frontier(q);
+
+ if ((bufnr != q->last_move_ftc) || q->qdio_error) {
+ q->last_move_ftc = bufnr;
+ QDIO_DBF_TEXT4(0, trace, "oqhasmvd");
+ QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
+ return 1;
+ } else
+ return 0;
+}
+
+/*
+ * VM could present us cc=2 and busy bit set on SIGA-write
+ * during reconfiguration of their Guest LAN (only in iqdio mode,
+ * otherwise qdio is asynchronous and cc=2 and busy bit there will take
+ * the queues down immediately).
+ *
+ * Therefore qdio_siga_output will try for a short time constantly,
+ * if such a condition occurs. If it doesn't change, it will
+ * increase the busy_siga_counter and save the timestamp, and
+ * schedule the queue for later processing. qdio_outbound_processing
+ * will check out the counter. If non-zero, it will call qdio_kick_outbound_q
+ * as often as the value of the counter. This will attempt further SIGA
+ * instructions. For each successful SIGA, the counter is
+ * decreased, for failing SIGAs the counter remains the same, after
+ * all. After some time of no movement, qdio_kick_outbound_q will
+ * finally fail and reflect corresponding error codes to call
+ * the upper layer module and have it take the queues down.
+ *
+ * Note that this is a change from the original HiperSockets design
+ * (saying cc=2 and busy bit means take the queues down), but in
+ * these days Guest LAN didn't exist... excessive cc=2 with busy bit
+ * conditions will still take the queues down, but the threshold is
+ * higher due to the Guest LAN environment.
+ *
+ * Called from outbound tasklet and do_QDIO handler.
+ */
+static void qdio_kick_outbound_q(struct qdio_q *q)
+{
+ int rc;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15];
+
+ QDIO_DBF_TEXT5(0, trace, "kickoutq");
+ QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
+#endif /* CONFIG_QDIO_DEBUG */
+
+ if (!need_siga_out(q))
+ return;
+
+ rc = qdio_siga_output(q);
+ switch (rc) {
+ case 0:
+ /* went smooth this time, reset timestamp */
+ q->u.out.timestamp = 0;
+
+ /* TODO: improve error handling for CC=0 case */
+#ifdef CONFIG_QDIO_DEBUG
+ QDIO_DBF_TEXT3(0, trace, "cc2reslv");
+ sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
+ atomic_read(&q->u.out.busy_siga_counter));
+ QDIO_DBF_TEXT3(0, trace, dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+ break;
+ /* cc=2 and busy bit */
+ case (2 | QDIO_ERROR_SIGA_BUSY):
+ atomic_inc(&q->u.out.busy_siga_counter);
+
+ /* if the last siga was successful, save timestamp here */
+ if (!q->u.out.timestamp)
+ q->u.out.timestamp = get_usecs();
+
+ /* if we're in time, don't touch qdio_error */
+ if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) {
+ tasklet_schedule(&q->tasklet);
+ break;
+ }
+ QDIO_DBF_TEXT2(0, trace, "cc2REPRT");
+#ifdef CONFIG_QDIO_DEBUG
+ sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
+ atomic_read(&q->u.out.busy_siga_counter));
+ QDIO_DBF_TEXT3(0, trace, dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+ default:
+ /* for plain cc=1, 2 or 3 */
+ q->qdio_error = rc;
+ }
+}
+
+static void qdio_kick_outbound_handler(struct qdio_q *q)
+{
+ int start, end, count;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15];
+#endif
+
+ start = q->first_to_kick;
+ end = q->last_move_ftc;
+ if (end >= start)
+ count = end - start;
+ else
+ count = end + QDIO_MAX_BUFFERS_PER_Q - start;
+
+#ifdef CONFIG_QDIO_DEBUG
+ QDIO_DBF_TEXT4(0, trace, "kickouth");
+ QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
+
+ sprintf(dbf_text, "s=%2xc=%2x", start, count);
+ QDIO_DBF_TEXT4(0, trace, dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+
+ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+ return;
+
+ q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
+ q->irq_ptr->int_parm);
+
+ /* for the next time: */
+ q->first_to_kick = q->last_move_ftc;
+ q->qdio_error = 0;
+}
+
+static void __qdio_outbound_processing(struct qdio_q *q)
+{
+ int siga_attempts;
+
+ qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
+
+ /* see comment in qdio_kick_outbound_q */
+ siga_attempts = atomic_read(&q->u.out.busy_siga_counter);
+ while (siga_attempts--) {
+ atomic_dec(&q->u.out.busy_siga_counter);
+ qdio_kick_outbound_q(q);
+ }
+
+ BUG_ON(atomic_read(&q->nr_buf_used) < 0);
+
+ if (qdio_outbound_q_moved(q))
+ qdio_kick_outbound_handler(q);
+
+ if (queue_type(q) == QDIO_ZFCP_QFMT) {
+ if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
+ tasklet_schedule(&q->tasklet);
+ return;
+ }
+
+ /* bail out for HiperSockets unicast queues */
+ if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
+ return;
+
+ if (q->u.out.pci_out_enabled)
+ return;
+
+ /*
+ * Now we know that queue type is either qeth without pci enabled
+ * or HiperSockets multicast. Make sure buffer switch from PRIMED to
+ * EMPTY is noticed and outbound_handler is called after some time.
+ */
+ if (qdio_outbound_q_done(q))
+ del_timer(&q->u.out.timer);
+ else {
+ if (!timer_pending(&q->u.out.timer)) {
+ mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
+ qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
+ }
+ }
+}
+
+/* outbound tasklet */
+void qdio_outbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+ __qdio_outbound_processing(q);
+}
+
+void qdio_outbound_timer(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+ tasklet_schedule(&q->tasklet);
+}
+
+/* called from thinint inbound tasklet */
+void qdio_check_outbound_after_thinint(struct qdio_q *q)
+{
+ struct qdio_q *out;
+ int i;
+
+ if (!pci_out_supported(q))
+ return;
+
+ for_each_output_queue(q->irq_ptr, out, i)
+ if (!qdio_outbound_q_done(out))
+ tasklet_schedule(&out->tasklet);
+}
+
+static inline void qdio_set_state(struct qdio_irq *irq_ptr,
+ enum qdio_irq_states state)
+{
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15];
+
+ QDIO_DBF_TEXT5(0, trace, "newstate");
+ sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state);
+ QDIO_DBF_TEXT5(0, trace, dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+
+ irq_ptr->state = state;
+ mb();
+}
+
+static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
+{
+ char dbf_text[15];
+
+ if (irb->esw.esw0.erw.cons) {
+ sprintf(dbf_text, "sens%4x", schid.sch_no);
+ QDIO_DBF_TEXT2(1, trace, dbf_text);
+ QDIO_DBF_HEX0(0, trace, irb, 64);
+ QDIO_DBF_HEX0(0, trace, irb->ecw, 64);
+ }
+}
+
+/* PCI interrupt handler */
+static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
+{
+ int i;
+ struct qdio_q *q;
+
+ qdio_perf_stat_inc(&perf_stats.pci_int);
+
+ for_each_input_queue(irq_ptr, q, i)
+ tasklet_schedule(&q->tasklet);
+
+ if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
+ return;
+
+ for_each_output_queue(irq_ptr, q, i) {
+ if (qdio_outbound_q_done(q))
+ continue;
+
+ if (!siga_syncs_out_pci(q))
+ qdio_siga_sync_q(q);
+
+ tasklet_schedule(&q->tasklet);
+ }
+}
+
+static void qdio_handle_activate_check(struct ccw_device *cdev,
+ unsigned long intparm, int cstat, int dstat)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct qdio_q *q;
+ char dbf_text[15];
+
+ QDIO_DBF_TEXT2(1, trace, "ick2");
+ sprintf(dbf_text, "%s", cdev->dev.bus_id);
+ QDIO_DBF_TEXT2(1, trace, dbf_text);
+ QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int));
+ QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
+ QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
+
+ if (irq_ptr->nr_input_qs) {
+ q = irq_ptr->input_qs[0];
+ } else if (irq_ptr->nr_output_qs) {
+ q = irq_ptr->output_qs[0];
+ } else {
+ dump_stack();
+ goto no_handler;
+ }
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
+ 0, -1, -1, irq_ptr->int_parm);
+no_handler:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+}
+
+static void qdio_call_shutdown(struct work_struct *work)
+{
+ struct ccw_device_private *priv;
+ struct ccw_device *cdev;
+
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ put_device(&cdev->dev);
+}
+
+static void qdio_int_error(struct ccw_device *cdev)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ switch (irq_ptr->state) {
+ case QDIO_IRQ_STATE_INACTIVE:
+ case QDIO_IRQ_STATE_CLEANUP:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ break;
+ case QDIO_IRQ_STATE_ESTABLISHED:
+ case QDIO_IRQ_STATE_ACTIVE:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+ if (get_device(&cdev->dev)) {
+ /* Can't call shutdown from interrupt context. */
+ PREPARE_WORK(&cdev->private->kick_work,
+ qdio_call_shutdown);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
+ wake_up(&cdev->private->wait_q);
+}
+
+static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
+ int dstat)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
+ QDIO_DBF_TEXT2(1, setup, "eq:ckcon");
+ goto error;
+ }
+
+ if (!(dstat & DEV_STAT_DEV_END)) {
+ QDIO_DBF_TEXT2(1, setup, "eq:no de");
+ goto error;
+ }
+
+ if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
+ QDIO_DBF_TEXT2(1, setup, "eq:badio");
+ goto error;
+ }
+ return 0;
+error:
+ QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
+ QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ return 1;
+}
+
+static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
+ int dstat)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ char dbf_text[15];
+
+ sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ QDIO_DBF_TEXT0(0, trace, dbf_text);
+
+ if (!qdio_establish_check_errors(cdev, cstat, dstat))
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
+}
+
+/* qdio interrupt handler */
+void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ struct irb *irb)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ int cstat, dstat;
+ char dbf_text[15];
+
+ qdio_perf_stat_inc(&perf_stats.qdio_int);
+
+ if (!intparm || !irq_ptr) {
+ sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT2(1, setup, dbf_text);
+ return;
+ }
+
+ if (IS_ERR(irb)) {
+ switch (PTR_ERR(irb)) {
+ case -EIO:
+ sprintf(dbf_text, "ierr%4x",
+ cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT2(1, setup, dbf_text);
+ qdio_int_error(cdev);
+ return;
+ case -ETIMEDOUT:
+ sprintf(dbf_text, "qtoh%4x",
+ cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT2(1, setup, dbf_text);
+ qdio_int_error(cdev);
+ return;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ }
+ qdio_irq_check_sense(irq_ptr->schid, irb);
+
+ cstat = irb->scsw.cmd.cstat;
+ dstat = irb->scsw.cmd.dstat;
+
+ switch (irq_ptr->state) {
+ case QDIO_IRQ_STATE_INACTIVE:
+ qdio_establish_handle_irq(cdev, cstat, dstat);
+ break;
+
+ case QDIO_IRQ_STATE_CLEANUP:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ break;
+
+ case QDIO_IRQ_STATE_ESTABLISHED:
+ case QDIO_IRQ_STATE_ACTIVE:
+ if (cstat & SCHN_STAT_PCI) {
+ qdio_int_handler_pci(irq_ptr);
+ /* no state change so no need to wake up wait_q */
+ return;
+ }
+ if ((cstat & ~SCHN_STAT_PCI) || dstat) {
+ qdio_handle_activate_check(cdev, intparm, cstat,
+ dstat);
+ break;
+ }
+ default:
+ WARN_ON(1);
+ }
+ wake_up(&cdev->private->wait_q);
+}
+
+/**
+ * qdio_get_ssqd_desc - get qdio subchannel description
+ * @cdev: ccw device to get description for
+ *
+ * Returns a pointer to the saved qdio subchannel description,
+ * or NULL for not setup qdio devices.
+ */
+struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev)
+{
+ struct qdio_irq *irq_ptr;
+
+ QDIO_DBF_TEXT0(0, setup, "getssqd");
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return NULL;
+
+ return &irq_ptr->ssqd_desc;
+}
+EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
+
+/**
+ * qdio_cleanup - shutdown queues and free data structures
+ * @cdev: associated ccw device
+ * @how: use halt or clear to shutdown
+ *
+ * This function calls qdio_shutdown() for @cdev with method @how
+ * and on success qdio_free() for @cdev.
+ */
+int qdio_cleanup(struct ccw_device *cdev, int how)
+{
+ struct qdio_irq *irq_ptr;
+ char dbf_text[15];
+ int rc;
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+ sprintf(dbf_text, "qcln%4x", irq_ptr->schid.sch_no);
+ QDIO_DBF_TEXT1(0, trace, dbf_text);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+
+ rc = qdio_shutdown(cdev, how);
+ if (rc == 0)
+ rc = qdio_free(cdev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_cleanup);
+
+static void qdio_shutdown_queues(struct ccw_device *cdev)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct qdio_q *q;
+ int i;
+
+ for_each_input_queue(irq_ptr, q, i)
+ tasklet_disable(&q->tasklet);
+
+ for_each_output_queue(irq_ptr, q, i) {
+ tasklet_disable(&q->tasklet);
+ del_timer(&q->u.out.timer);
+ }
+}
+
+/**
+ * qdio_shutdown - shut down a qdio subchannel
+ * @cdev: associated ccw device
+ * @how: use halt or clear to shutdown
+ */
+int qdio_shutdown(struct ccw_device *cdev, int how)
+{
+ struct qdio_irq *irq_ptr;
+ int rc;
+ unsigned long flags;
+ char dbf_text[15];
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+ mutex_lock(&irq_ptr->setup_mutex);
+ /*
+ * Subchannel was already shot down. We cannot prevent being called
+ * twice since cio may trigger a shutdown asynchronously.
+ */
+ if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
+ mutex_unlock(&irq_ptr->setup_mutex);
+ return 0;
+ }
+
+ sprintf(dbf_text, "qsqs%4x", irq_ptr->schid.sch_no);
+ QDIO_DBF_TEXT1(0, trace, dbf_text);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+
+ tiqdio_remove_input_queues(irq_ptr);
+ qdio_shutdown_queues(cdev);
+ qdio_shutdown_debug_entries(irq_ptr, cdev);
+
+ /* cleanup subchannel */
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+
+ if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
+ rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
+ else
+ /* default behaviour is halt */
+ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+ if (rc) {
+ sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ sprintf(dbf_text, "rc=%d", rc);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ goto no_cleanup;
+ }
+
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ wait_event_interruptible_timeout(cdev->private->wait_q,
+ irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
+ irq_ptr->state == QDIO_IRQ_STATE_ERR,
+ 10 * HZ);
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+
+no_cleanup:
+ qdio_shutdown_thinint(irq_ptr);
+
+ /* restore interrupt handler */
+ if ((void *)cdev->handler == (void *)qdio_int_handler)
+ cdev->handler = irq_ptr->orig_handler;
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ mutex_unlock(&irq_ptr->setup_mutex);
+ module_put(THIS_MODULE);
+ if (rc)
+ return rc;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_shutdown);
+
+/**
+ * qdio_free - free data structures for a qdio subchannel
+ * @cdev: associated ccw device
+ */
+int qdio_free(struct ccw_device *cdev)
+{
+ struct qdio_irq *irq_ptr;
+ char dbf_text[15];
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+ mutex_lock(&irq_ptr->setup_mutex);
+
+ sprintf(dbf_text, "qfqs%4x", irq_ptr->schid.sch_no);
+ QDIO_DBF_TEXT1(0, trace, dbf_text);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+
+ cdev->private->qdio_data = NULL;
+ mutex_unlock(&irq_ptr->setup_mutex);
+
+ qdio_release_memory(irq_ptr);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_free);
+
+/**
+ * qdio_initialize - allocate and establish queues for a qdio subchannel
+ * @init_data: initialization data
+ *
+ * This function first allocates queues via qdio_allocate() and on success
+ * establishes them via qdio_establish().
+ */
+int qdio_initialize(struct qdio_initialize *init_data)
+{
+ int rc;
+ char dbf_text[15];
+
+ sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ QDIO_DBF_TEXT0(0, trace, dbf_text);
+
+ rc = qdio_allocate(init_data);
+ if (rc)
+ return rc;
+
+ rc = qdio_establish(init_data);
+ if (rc)
+ qdio_free(init_data->cdev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_initialize);
+
+/**
+ * qdio_allocate - allocate qdio queues and associated data
+ * @init_data: initialization data
+ */
+int qdio_allocate(struct qdio_initialize *init_data)
+{
+ struct qdio_irq *irq_ptr;
+ char dbf_text[15];
+
+ sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ QDIO_DBF_TEXT0(0, trace, dbf_text);
+
+ if ((init_data->no_input_qs && !init_data->input_handler) ||
+ (init_data->no_output_qs && !init_data->output_handler))
+ return -EINVAL;
+
+ if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
+ (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
+ return -EINVAL;
+
+ if ((!init_data->input_sbal_addr_array) ||
+ (!init_data->output_sbal_addr_array))
+ return -EINVAL;
+
+ qdio_allocate_do_dbf(init_data);
+
+ /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
+ irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!irq_ptr)
+ goto out_err;
+ QDIO_DBF_TEXT0(0, setup, "irq_ptr:");
+ QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *));
+
+ mutex_init(&irq_ptr->setup_mutex);
+
+ /*
+ * Allocate a page for the chsc calls in qdio_establish.
+ * Must be pre-allocated since a zfcp recovery will call
+ * qdio_establish. In case of low memory and swap on a zfcp disk
+ * we may not be able to allocate memory otherwise.
+ */
+ irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
+ if (!irq_ptr->chsc_page)
+ goto out_rel;
+
+ /* qdr is used in ccw1.cda which is u32 */
+ irq_ptr->qdr = kzalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
+ if (!irq_ptr->qdr)
+ goto out_rel;
+ WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
+
+ QDIO_DBF_TEXT0(0, setup, "qdr:");
+ QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *));
+
+ if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
+ init_data->no_output_qs))
+ goto out_rel;
+
+ init_data->cdev->private->qdio_data = irq_ptr;
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ return 0;
+out_rel:
+ qdio_release_memory(irq_ptr);
+out_err:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(qdio_allocate);
+
+/**
+ * qdio_establish - establish queues on a qdio subchannel
+ * @init_data: initialization data
+ */
+int qdio_establish(struct qdio_initialize *init_data)
+{
+ char dbf_text[20];
+ struct qdio_irq *irq_ptr;
+ struct ccw_device *cdev = init_data->cdev;
+ unsigned long saveflags;
+ int rc;
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ return -EINVAL;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
+ sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ QDIO_DBF_TEXT0(0, trace, dbf_text);
+
+ mutex_lock(&irq_ptr->setup_mutex);
+ qdio_setup_irq(init_data);
+
+ rc = qdio_establish_thinint(irq_ptr);
+ if (rc) {
+ mutex_unlock(&irq_ptr->setup_mutex);
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ return rc;
+ }
+
+ /* establish q */
+ irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
+ irq_ptr->ccw.flags = CCW_FLAG_SLI;
+ irq_ptr->ccw.count = irq_ptr->equeue.count;
+ irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
+ ccw_device_set_options_mask(cdev, 0);
+
+ rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
+ if (rc) {
+ sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no);
+ QDIO_DBF_TEXT2(1, setup, dbf_text);
+ sprintf(dbf_text, "eq:rc%4x", rc);
+ QDIO_DBF_TEXT2(1, setup, dbf_text);
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
+
+ if (rc) {
+ mutex_unlock(&irq_ptr->setup_mutex);
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ return rc;
+ }
+
+ wait_event_interruptible_timeout(cdev->private->wait_q,
+ irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
+ irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+
+ if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
+ mutex_unlock(&irq_ptr->setup_mutex);
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ return -EIO;
+ }
+
+ qdio_setup_ssqd_info(irq_ptr);
+ sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac);
+ QDIO_DBF_TEXT2(0, setup, dbf_text);
+
+ /* qebsm is now setup if available, initialize buffer states */
+ qdio_init_buf_states(irq_ptr);
+
+ mutex_unlock(&irq_ptr->setup_mutex);
+ qdio_print_subchannel_info(irq_ptr, cdev);
+ qdio_setup_debug_entries(irq_ptr, cdev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_establish);
+
+/**
+ * qdio_activate - activate queues on a qdio subchannel
+ * @cdev: associated cdev
+ */
+int qdio_activate(struct ccw_device *cdev)
+{
+ struct qdio_irq *irq_ptr;
+ int rc;
+ unsigned long saveflags;
+ char dbf_text[20];
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ return -EINVAL;
+
+ mutex_lock(&irq_ptr->setup_mutex);
+ if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ sprintf(dbf_text, "qact%4x", irq_ptr->schid.sch_no);
+ QDIO_DBF_TEXT2(0, setup, dbf_text);
+ QDIO_DBF_TEXT2(0, trace, dbf_text);
+
+ irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
+ irq_ptr->ccw.flags = CCW_FLAG_SLI;
+ irq_ptr->ccw.count = irq_ptr->aqueue.count;
+ irq_ptr->ccw.cda = 0;
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
+ ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
+
+ rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
+ 0, DOIO_DENY_PREFETCH);
+ if (rc) {
+ sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no);
+ QDIO_DBF_TEXT2(1, setup, dbf_text);
+ sprintf(dbf_text, "aq:rc%4x", rc);
+ QDIO_DBF_TEXT2(1, setup, dbf_text);
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
+
+ if (rc)
+ goto out;
+
+ if (is_thinint_irq(irq_ptr))
+ tiqdio_add_input_queues(irq_ptr);
+
+ /* wait for subchannel to become active */
+ msleep(5);
+
+ switch (irq_ptr->state) {
+ case QDIO_IRQ_STATE_STOPPED:
+ case QDIO_IRQ_STATE_ERR:
+ mutex_unlock(&irq_ptr->setup_mutex);
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ return -EIO;
+ default:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
+ rc = 0;
+ }
+out:
+ mutex_unlock(&irq_ptr->setup_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_activate);
+
+static inline int buf_in_between(int bufnr, int start, int count)
+{
+ int end = add_buf(start, count);
+
+ if (end > start) {
+ if (bufnr >= start && bufnr < end)
+ return 1;
+ else
+ return 0;
+ }
+
+ /* wrap-around case */
+ if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
+ (bufnr < end))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * handle_inbound - reset processed input buffers
+ * @q: queue containing the buffers
+ * @callflags: flags
+ * @bufnr: first buffer to process
+ * @count: how many buffers are emptied
+ */
+static void handle_inbound(struct qdio_q *q, unsigned int callflags,
+ int bufnr, int count)
+{
+ unsigned long flags;
+ int used, rc;
+
+ /*
+ * do_QDIO could run in parallel with the queue tasklet so the
+ * upper-layer programm could empty the ACK'ed buffer here.
+ * If that happens we must clear the polling flag, otherwise
+ * qdio_stop_polling() could set the buffer to NOT_INIT after
+ * it was set to EMPTY which would kill us.
+ */
+ spin_lock_irqsave(&q->u.in.lock, flags);
+ if (q->u.in.polling)
+ if (buf_in_between(q->last_move_ftc, bufnr, count))
+ q->u.in.polling = 0;
+
+ count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
+ spin_unlock_irqrestore(&q->u.in.lock, flags);
+
+ used = atomic_add_return(count, &q->nr_buf_used) - count;
+ BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
+
+ /* no need to signal as long as the adapter had free buffers */
+ if (used)
+ return;
+
+ if (need_siga_in(q)) {
+ rc = qdio_siga_input(q);
+ if (rc)
+ q->qdio_error = rc;
+ }
+}
+
+/**
+ * handle_outbound - process filled outbound buffers
+ * @q: queue containing the buffers
+ * @callflags: flags
+ * @bufnr: first buffer to process
+ * @count: how many buffers are filled
+ */
+static void handle_outbound(struct qdio_q *q, unsigned int callflags,
+ int bufnr, int count)
+{
+ unsigned char state;
+ int used;
+
+ qdio_perf_stat_inc(&perf_stats.outbound_handler);
+
+ count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
+ used = atomic_add_return(count, &q->nr_buf_used);
+ BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
+
+ if (callflags & QDIO_FLAG_PCI_OUT)
+ q->u.out.pci_out_enabled = 1;
+ else
+ q->u.out.pci_out_enabled = 0;
+
+ if (queue_type(q) == QDIO_IQDIO_QFMT) {
+ if (multicast_outbound(q))
+ qdio_kick_outbound_q(q);
+ else
+ /*
+ * One siga-w per buffer required for unicast
+ * HiperSockets.
+ */
+ while (count--)
+ qdio_kick_outbound_q(q);
+ goto out;
+ }
+
+ if (need_siga_sync(q)) {
+ qdio_siga_sync_q(q);
+ goto out;
+ }
+
+ /* try to fast requeue buffers */
+ get_buf_state(q, prev_buf(bufnr), &state);
+ if (state != SLSB_CU_OUTPUT_PRIMED)
+ qdio_kick_outbound_q(q);
+ else {
+ QDIO_DBF_TEXT5(0, trace, "fast-req");
+ qdio_perf_stat_inc(&perf_stats.fast_requeue);
+ }
+out:
+ /* Fixme: could wait forever if called from process context */
+ tasklet_schedule(&q->tasklet);
+}
+
+/**
+ * do_QDIO - process input or output buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @callflags: input or output and special flags from the program
+ * @q_nr: queue number
+ * @bufnr: buffer number
+ * @count: how many buffers to process
+ */
+int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
+ int q_nr, int bufnr, int count)
+{
+ struct qdio_irq *irq_ptr;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[20];
+
+ sprintf(dbf_text, "doQD%04x", cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT3(0, trace, dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+
+ if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
+ (count > QDIO_MAX_BUFFERS_PER_Q) ||
+ (q_nr > QDIO_MAX_QUEUES_PER_IRQ))
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+#ifdef CONFIG_QDIO_DEBUG
+ if (callflags & QDIO_FLAG_SYNC_INPUT)
+ QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr],
+ sizeof(void *));
+ else
+ QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr],
+ sizeof(void *));
+
+ sprintf(dbf_text, "flag%04x", callflags);
+ QDIO_DBF_TEXT3(0, trace, dbf_text);
+ sprintf(dbf_text, "qi%02xct%02x", bufnr, count);
+ QDIO_DBF_TEXT3(0, trace, dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+
+ if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
+ return -EBUSY;
+
+ if (callflags & QDIO_FLAG_SYNC_INPUT)
+ handle_inbound(irq_ptr->input_qs[q_nr],
+ callflags, bufnr, count);
+ else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
+ handle_outbound(irq_ptr->output_qs[q_nr],
+ callflags, bufnr, count);
+ else {
+ QDIO_DBF_TEXT3(1, trace, "doQD:inv");
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(do_QDIO);
+
+static int __init init_QDIO(void)
+{
+ int rc;
+
+ rc = qdio_setup_init();
+ if (rc)
+ return rc;
+ rc = tiqdio_allocate_memory();
+ if (rc)
+ goto out_cache;
+ rc = qdio_debug_init();
+ if (rc)
+ goto out_ti;
+ rc = qdio_setup_perf_stats();
+ if (rc)
+ goto out_debug;
+ rc = tiqdio_register_thinints();
+ if (rc)
+ goto out_perf;
+ return 0;
+
+out_perf:
+ qdio_remove_perf_stats();
+out_debug:
+ qdio_debug_exit();
+out_ti:
+ tiqdio_free_memory();
+out_cache:
+ qdio_setup_exit();
+ return rc;
+}
+
+static void __exit exit_QDIO(void)
+{
+ tiqdio_unregister_thinints();
+ tiqdio_free_memory();
+ qdio_remove_perf_stats();
+ qdio_debug_exit();
+ qdio_setup_exit();
+}
+
+module_init(init_QDIO);
+module_exit(exit_QDIO);
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
new file mode 100644
index 000000000000..ea01b85b1cc9
--- /dev/null
+++ b/drivers/s390/cio/qdio_perf.c
@@ -0,0 +1,151 @@
+/*
+ * drivers/s390/cio/qdio_perf.c
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Author: Jan Glauber (jang@linux.vnet.ibm.com)
+ */
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/ccwdev.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "chsc.h"
+#include "qdio_debug.h"
+#include "qdio_perf.h"
+
+int qdio_performance_stats;
+struct qdio_perf_stats perf_stats;
+
+#ifdef CONFIG_PROC_FS
+static struct proc_dir_entry *qdio_perf_pde;
+#endif
+
+inline void qdio_perf_stat_inc(atomic_long_t *count)
+{
+ if (qdio_performance_stats)
+ atomic_long_inc(count);
+}
+
+inline void qdio_perf_stat_dec(atomic_long_t *count)
+{
+ if (qdio_performance_stats)
+ atomic_long_dec(count);
+}
+
+/*
+ * procfs functions
+ */
+static int qdio_perf_proc_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
+ (long)atomic_long_read(&perf_stats.qdio_int));
+ seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
+ (long)atomic_long_read(&perf_stats.pci_int));
+ seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
+ (long)atomic_long_read(&perf_stats.thin_int));
+ seq_printf(m, "\n");
+ seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
+ (long)atomic_long_read(&perf_stats.tasklet_inbound));
+ seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
+ (long)atomic_long_read(&perf_stats.tasklet_outbound));
+ seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
+ (long)atomic_long_read(&perf_stats.tasklet_thinint),
+ (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
+ seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
+ (long)atomic_long_read(&perf_stats.thinint_inbound),
+ (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
+ seq_printf(m, "\n");
+ seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
+ (long)atomic_long_read(&perf_stats.siga_in));
+ seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
+ (long)atomic_long_read(&perf_stats.siga_out));
+ seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
+ (long)atomic_long_read(&perf_stats.siga_sync));
+ seq_printf(m, "\n");
+ seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
+ (long)atomic_long_read(&perf_stats.inbound_handler));
+ seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
+ (long)atomic_long_read(&perf_stats.outbound_handler));
+ seq_printf(m, "\n");
+ seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
+ (long)atomic_long_read(&perf_stats.fast_requeue));
+ seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
+ (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
+ seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
+ (long)atomic_long_read(&perf_stats.debug_stop_polling));
+ seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
+ (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
+ seq_printf(m, "\n");
+ return 0;
+}
+static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, qdio_perf_proc_show, NULL);
+}
+
+static struct file_operations qdio_perf_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = qdio_perf_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * sysfs functions
+ */
+static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf)
+{
+ return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
+}
+
+static ssize_t qdio_perf_stats_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ unsigned long i;
+
+ if (strict_strtoul(buf, 16, &i) != 0)
+ return -EINVAL;
+ if ((i != 0) && (i != 1))
+ return -EINVAL;
+ if (i == qdio_performance_stats)
+ return count;
+
+ qdio_performance_stats = i;
+ /* reset performance statistics */
+ if (i == 0)
+ memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
+ return count;
+}
+
+static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show,
+ qdio_perf_stats_store);
+
+int __init qdio_setup_perf_stats(void)
+{
+ int rc;
+
+ rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_PROC_FS
+ memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
+ qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO,
+ NULL, &qdio_perf_proc_fops);
+#endif
+ return 0;
+}
+
+void __exit qdio_remove_perf_stats(void)
+{
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("qdio_perf", NULL);
+#endif
+ bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
+}
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
new file mode 100644
index 000000000000..5c406a8b7387
--- /dev/null
+++ b/drivers/s390/cio/qdio_perf.h
@@ -0,0 +1,54 @@
+/*
+ * drivers/s390/cio/qdio_perf.h
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Author: Jan Glauber (jang@linux.vnet.ibm.com)
+ */
+#ifndef QDIO_PERF_H
+#define QDIO_PERF_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <asm/atomic.h>
+
+struct qdio_perf_stats {
+ /* interrupt handler calls */
+ atomic_long_t qdio_int;
+ atomic_long_t pci_int;
+ atomic_long_t thin_int;
+
+ /* tasklet runs */
+ atomic_long_t tasklet_inbound;
+ atomic_long_t tasklet_outbound;
+ atomic_long_t tasklet_thinint;
+ atomic_long_t tasklet_thinint_loop;
+ atomic_long_t thinint_inbound;
+ atomic_long_t thinint_inbound_loop;
+ atomic_long_t thinint_inbound_loop2;
+
+ /* signal adapter calls */
+ atomic_long_t siga_out;
+ atomic_long_t siga_in;
+ atomic_long_t siga_sync;
+
+ /* misc */
+ atomic_long_t inbound_handler;
+ atomic_long_t outbound_handler;
+ atomic_long_t fast_requeue;
+
+ /* for debugging */
+ atomic_long_t debug_tl_out_timer;
+ atomic_long_t debug_stop_polling;
+};
+
+extern struct qdio_perf_stats perf_stats;
+extern int qdio_performance_stats;
+
+int qdio_setup_perf_stats(void);
+void qdio_remove_perf_stats(void);
+
+extern void qdio_perf_stat_inc(atomic_long_t *count);
+extern void qdio_perf_stat_dec(atomic_long_t *count);
+
+#endif
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
new file mode 100644
index 000000000000..f0923a8aceda
--- /dev/null
+++ b/drivers/s390/cio/qdio_setup.c
@@ -0,0 +1,521 @@
+/*
+ * driver/s390/cio/qdio_setup.c
+ *
+ * qdio queue initialization
+ *
+ * Copyright (C) IBM Corp. 2008
+ * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <asm/qdio.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "chsc.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+
+static struct kmem_cache *qdio_q_cache;
+
+/*
+ * qebsm is only available under 64bit but the adapter sets the feature
+ * flag anyway, so we manually override it.
+ */
+static inline int qebsm_possible(void)
+{
+#ifdef CONFIG_64BIT
+ return css_general_characteristics.qebsm;
+#endif
+ return 0;
+}
+
+/*
+ * qib_param_field: pointer to 128 bytes or NULL, if no param field
+ * nr_input_qs: pointer to nr_queues*128 words of data or NULL
+ */
+static void set_impl_params(struct qdio_irq *irq_ptr,
+ unsigned int qib_param_field_format,
+ unsigned char *qib_param_field,
+ unsigned long *input_slib_elements,
+ unsigned long *output_slib_elements)
+{
+ struct qdio_q *q;
+ int i, j;
+
+ if (!irq_ptr)
+ return;
+
+ WARN_ON((unsigned long)&irq_ptr->qib & 0xff);
+ irq_ptr->qib.pfmt = qib_param_field_format;
+ if (qib_param_field)
+ memcpy(irq_ptr->qib.parm, qib_param_field,
+ QDIO_MAX_BUFFERS_PER_Q);
+
+ if (!input_slib_elements)
+ goto output;
+
+ for_each_input_queue(irq_ptr, q, i) {
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ q->slib->slibe[j].parms =
+ input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
+ }
+output:
+ if (!output_slib_elements)
+ return;
+
+ for_each_output_queue(irq_ptr, q, i) {
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ q->slib->slibe[j].parms =
+ output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
+ }
+}
+
+static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
+{
+ struct qdio_q *q;
+ int i;
+
+ for (i = 0; i < nr_queues; i++) {
+ q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+ WARN_ON((unsigned long)q & 0xff);
+
+ q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
+ if (!q->slib) {
+ kmem_cache_free(qdio_q_cache, q);
+ return -ENOMEM;
+ }
+ WARN_ON((unsigned long)q->slib & 0x7ff);
+ irq_ptr_qs[i] = q;
+ }
+ return 0;
+}
+
+int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
+{
+ int rc;
+
+ rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
+ if (rc)
+ return rc;
+ rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
+ return rc;
+}
+
+static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
+ qdio_handler_t *handler, int i)
+{
+ /* must be cleared by every qdio_establish */
+ memset(q, 0, ((char *)&q->slib) - ((char *)q));
+ memset(q->slib, 0, PAGE_SIZE);
+
+ q->irq_ptr = irq_ptr;
+ q->mask = 1 << (31 - i);
+ q->nr = i;
+ q->handler = handler;
+}
+
+static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
+ void **sbals_array, char *dbf_text, int i)
+{
+ struct qdio_q *prev;
+ int j;
+
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ QDIO_DBF_HEX0(0, setup, &q, sizeof(void *));
+
+ q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
+
+ /* fill in sbal */
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
+ q->sbal[j] = *sbals_array++;
+ WARN_ON((unsigned long)q->sbal[j] & 0xff);
+ }
+
+ /* fill in slib */
+ if (i > 0) {
+ prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
+ : irq_ptr->output_qs[i - 1];
+ prev->slib->nsliba = (unsigned long)q->slib;
+ }
+
+ q->slib->sla = (unsigned long)q->sl;
+ q->slib->slsba = (unsigned long)&q->slsb.val[0];
+
+ /* fill in sl */
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ q->sl->element[j].sbal = (unsigned long)q->sbal[j];
+
+ QDIO_DBF_TEXT2(0, setup, "sl-sb-b0");
+ QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *));
+ QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *));
+ QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *));
+}
+
+static void setup_queues(struct qdio_irq *irq_ptr,
+ struct qdio_initialize *qdio_init)
+{
+ char dbf_text[20];
+ struct qdio_q *q;
+ void **input_sbal_array = qdio_init->input_sbal_addr_array;
+ void **output_sbal_array = qdio_init->output_sbal_addr_array;
+ int i;
+
+ sprintf(dbf_text, "qfqs%4x", qdio_init->cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+
+ for_each_input_queue(irq_ptr, q, i) {
+ sprintf(dbf_text, "in-q%4x", i);
+ setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
+
+ q->is_input_q = 1;
+ spin_lock_init(&q->u.in.lock);
+ setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i);
+ input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+
+ if (is_thinint_irq(irq_ptr))
+ tasklet_init(&q->tasklet, tiqdio_inbound_processing,
+ (unsigned long) q);
+ else
+ tasklet_init(&q->tasklet, qdio_inbound_processing,
+ (unsigned long) q);
+ }
+
+ for_each_output_queue(irq_ptr, q, i) {
+ sprintf(dbf_text, "outq%4x", i);
+ setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
+
+ q->is_input_q = 0;
+ setup_storage_lists(q, irq_ptr, output_sbal_array,
+ dbf_text, i);
+ output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+
+ tasklet_init(&q->tasklet, qdio_outbound_processing,
+ (unsigned long) q);
+ setup_timer(&q->u.out.timer, (void(*)(unsigned long))
+ &qdio_outbound_timer, (unsigned long)q);
+ }
+}
+
+static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
+{
+ if (qdioac & AC1_SIGA_INPUT_NEEDED)
+ irq_ptr->siga_flag.input = 1;
+ if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
+ irq_ptr->siga_flag.output = 1;
+ if (qdioac & AC1_SIGA_SYNC_NEEDED)
+ irq_ptr->siga_flag.sync = 1;
+ if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)
+ irq_ptr->siga_flag.no_sync_ti = 1;
+ if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)
+ irq_ptr->siga_flag.no_sync_out_pci = 1;
+
+ if (irq_ptr->siga_flag.no_sync_out_pci &&
+ irq_ptr->siga_flag.no_sync_ti)
+ irq_ptr->siga_flag.no_sync_out_ti = 1;
+}
+
+static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
+ unsigned char qdioac, unsigned long token)
+{
+ char dbf_text[15];
+
+ if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
+ goto no_qebsm;
+ if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
+ (!(qdioac & AC1_SC_QEBSM_ENABLED)))
+ goto no_qebsm;
+
+ irq_ptr->sch_token = token;
+
+ QDIO_DBF_TEXT0(0, setup, "V=V:1");
+ sprintf(dbf_text, "%8lx", irq_ptr->sch_token);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ return;
+
+no_qebsm:
+ irq_ptr->sch_token = 0;
+ irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
+ QDIO_DBF_TEXT0(0, setup, "noV=V");
+}
+
+static int __get_ssqd_info(struct qdio_irq *irq_ptr)
+{
+ struct chsc_ssqd_area *ssqd;
+ int rc;
+
+ QDIO_DBF_TEXT0(0, setup, "getssqd");
+ ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
+ memset(ssqd, 0, PAGE_SIZE);
+
+ ssqd->request = (struct chsc_header) {
+ .length = 0x0010,
+ .code = 0x0024,
+ };
+ ssqd->first_sch = irq_ptr->schid.sch_no;
+ ssqd->last_sch = irq_ptr->schid.sch_no;
+ ssqd->ssid = irq_ptr->schid.ssid;
+
+ if (chsc(ssqd))
+ return -EIO;
+ rc = chsc_error_from_response(ssqd->response.code);
+ if (rc)
+ return rc;
+
+ if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
+ !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
+ (ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no))
+ return -EINVAL;
+
+ memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
+ sizeof(struct qdio_ssqd_desc));
+ return 0;
+}
+
+void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
+{
+ unsigned char qdioac;
+ char dbf_text[15];
+ int rc;
+
+ rc = __get_ssqd_info(irq_ptr);
+ if (rc) {
+ QDIO_DBF_TEXT2(0, setup, "ssqdasig");
+ sprintf(dbf_text, "schno%x", irq_ptr->schid.sch_no);
+ QDIO_DBF_TEXT2(0, setup, dbf_text);
+ sprintf(dbf_text, "rc:%d", rc);
+ QDIO_DBF_TEXT2(0, setup, dbf_text);
+ /* all flags set, worst case */
+ qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
+ AC1_SIGA_SYNC_NEEDED;
+ } else
+ qdioac = irq_ptr->ssqd_desc.qdioac1;
+
+ check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
+ process_ac_flags(irq_ptr, qdioac);
+
+ sprintf(dbf_text, "qdioac%2x", qdioac);
+ QDIO_DBF_TEXT2(0, setup, dbf_text);
+}
+
+void qdio_release_memory(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q;
+ int i;
+
+ /*
+ * Must check queue array manually since irq_ptr->nr_input_queues /
+ * irq_ptr->nr_input_queues may not yet be set.
+ */
+ for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
+ q = irq_ptr->input_qs[i];
+ if (q) {
+ free_page((unsigned long) q->slib);
+ kmem_cache_free(qdio_q_cache, q);
+ }
+ }
+ for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
+ q = irq_ptr->output_qs[i];
+ if (q) {
+ free_page((unsigned long) q->slib);
+ kmem_cache_free(qdio_q_cache, q);
+ }
+ }
+ kfree(irq_ptr->qdr);
+ free_page(irq_ptr->chsc_page);
+ free_page((unsigned long) irq_ptr);
+}
+
+static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
+ struct qdio_q **irq_ptr_qs,
+ int i, int nr)
+{
+ irq_ptr->qdr->qdf0[i + nr].sliba =
+ (unsigned long)irq_ptr_qs[i]->slib;
+
+ irq_ptr->qdr->qdf0[i + nr].sla =
+ (unsigned long)irq_ptr_qs[i]->sl;
+
+ irq_ptr->qdr->qdf0[i + nr].slsba =
+ (unsigned long)&irq_ptr_qs[i]->slsb.val[0];
+
+ irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY;
+ irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY;
+ irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY;
+ irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY;
+}
+
+static void setup_qdr(struct qdio_irq *irq_ptr,
+ struct qdio_initialize *qdio_init)
+{
+ int i;
+
+ irq_ptr->qdr->qfmt = qdio_init->q_format;
+ irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
+ irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
+ irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
+ irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
+ irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
+ irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY;
+
+ for (i = 0; i < qdio_init->no_input_qs; i++)
+ __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
+
+ for (i = 0; i < qdio_init->no_output_qs; i++)
+ __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
+ qdio_init->no_input_qs);
+}
+
+static void setup_qib(struct qdio_irq *irq_ptr,
+ struct qdio_initialize *init_data)
+{
+ if (qebsm_possible())
+ irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+
+ irq_ptr->qib.qfmt = init_data->q_format;
+ if (init_data->no_input_qs)
+ irq_ptr->qib.isliba =
+ (unsigned long)(irq_ptr->input_qs[0]->slib);
+ if (init_data->no_output_qs)
+ irq_ptr->qib.osliba =
+ (unsigned long)(irq_ptr->output_qs[0]->slib);
+ memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
+}
+
+int qdio_setup_irq(struct qdio_initialize *init_data)
+{
+ struct ciw *ciw;
+ struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
+ int rc;
+
+ memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr));
+ /* wipes qib.ac, required by ar7063 */
+ memset(irq_ptr->qdr, 0, sizeof(struct qdr));
+
+ irq_ptr->int_parm = init_data->int_parm;
+ irq_ptr->nr_input_qs = init_data->no_input_qs;
+ irq_ptr->nr_output_qs = init_data->no_output_qs;
+
+ irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
+ irq_ptr->cdev = init_data->cdev;
+ setup_queues(irq_ptr, init_data);
+
+ setup_qib(irq_ptr, init_data);
+ qdio_setup_thinint(irq_ptr);
+ set_impl_params(irq_ptr, init_data->qib_param_field_format,
+ init_data->qib_param_field,
+ init_data->input_slib_elements,
+ init_data->output_slib_elements);
+
+ /* fill input and output descriptors */
+ setup_qdr(irq_ptr, init_data);
+
+ /* qdr, qib, sls, slsbs, slibs, sbales are filled now */
+
+ /* get qdio commands */
+ ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
+ if (!ciw) {
+ QDIO_DBF_TEXT2(1, setup, "no eq");
+ rc = -EINVAL;
+ goto out_err;
+ }
+ irq_ptr->equeue = *ciw;
+
+ ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
+ if (!ciw) {
+ QDIO_DBF_TEXT2(1, setup, "no aq");
+ rc = -EINVAL;
+ goto out_err;
+ }
+ irq_ptr->aqueue = *ciw;
+
+ /* set new interrupt handler */
+ irq_ptr->orig_handler = init_data->cdev->handler;
+ init_data->cdev->handler = qdio_int_handler;
+ return 0;
+out_err:
+ qdio_release_memory(irq_ptr);
+ return rc;
+}
+
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
+ struct ccw_device *cdev)
+{
+ char s[80];
+
+ sprintf(s, "%s ", cdev->dev.bus_id);
+
+ switch (irq_ptr->qib.qfmt) {
+ case QDIO_QETH_QFMT:
+ sprintf(s + strlen(s), "OSADE ");
+ break;
+ case QDIO_ZFCP_QFMT:
+ sprintf(s + strlen(s), "ZFCP ");
+ break;
+ case QDIO_IQDIO_QFMT:
+ sprintf(s + strlen(s), "HiperSockets ");
+ break;
+ }
+ sprintf(s + strlen(s), "using: ");
+
+ if (!is_thinint_irq(irq_ptr))
+ sprintf(s + strlen(s), "no");
+ sprintf(s + strlen(s), "AdapterInterrupts ");
+ if (!(irq_ptr->sch_token != 0))
+ sprintf(s + strlen(s), "no");
+ sprintf(s + strlen(s), "QEBSM ");
+ if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
+ sprintf(s + strlen(s), "no");
+ sprintf(s + strlen(s), "OutboundPCI ");
+ if (!css_general_characteristics.aif_tdd)
+ sprintf(s + strlen(s), "no");
+ sprintf(s + strlen(s), "TDD\n");
+ printk(KERN_INFO "qdio: %s", s);
+
+ memset(s, 0, sizeof(s));
+ sprintf(s, "%s SIGA required: ", cdev->dev.bus_id);
+ if (irq_ptr->siga_flag.input)
+ sprintf(s + strlen(s), "Read ");
+ if (irq_ptr->siga_flag.output)
+ sprintf(s + strlen(s), "Write ");
+ if (irq_ptr->siga_flag.sync)
+ sprintf(s + strlen(s), "Sync ");
+ if (!irq_ptr->siga_flag.no_sync_ti)
+ sprintf(s + strlen(s), "SyncAI ");
+ if (!irq_ptr->siga_flag.no_sync_out_ti)
+ sprintf(s + strlen(s), "SyncOutAI ");
+ if (!irq_ptr->siga_flag.no_sync_out_pci)
+ sprintf(s + strlen(s), "SyncOutPCI");
+ sprintf(s + strlen(s), "\n");
+ printk(KERN_INFO "qdio: %s", s);
+}
+
+int __init qdio_setup_init(void)
+{
+ char dbf_text[15];
+
+ qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
+ 256, 0, NULL);
+ if (!qdio_q_cache)
+ return -ENOMEM;
+
+ /* Check for OSA/FCP thin interrupts (bit 67). */
+ sprintf(dbf_text, "thini%1x",
+ (css_general_characteristics.aif_osa) ? 1 : 0);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+
+ /* Check for QEBSM support in general (bit 58). */
+ sprintf(dbf_text, "cssQBS:%1x",
+ (qebsm_possible()) ? 1 : 0);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ return 0;
+}
+
+void __exit qdio_setup_exit(void)
+{
+ kmem_cache_destroy(qdio_q_cache);
+}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
new file mode 100644
index 000000000000..9291a771d812
--- /dev/null
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -0,0 +1,380 @@
+/*
+ * linux/drivers/s390/cio/thinint_qdio.c
+ *
+ * thin interrupt support for qdio
+ *
+ * Copyright 2000-2008 IBM Corp.
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/io.h>
+#include <asm/atomic.h>
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include <asm/airq.h>
+#include <asm/isc.h>
+
+#include "cio.h"
+#include "ioasm.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+#include "qdio_perf.h"
+
+/*
+ * Restriction: only 63 iqdio subchannels would have its own indicator,
+ * after that, subsequent subchannels share one indicator
+ */
+#define TIQDIO_NR_NONSHARED_IND 63
+#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
+#define TIQDIO_SHARED_IND 63
+
+/* list of thin interrupt input queues */
+static LIST_HEAD(tiq_list);
+
+/* adapter local summary indicator */
+static unsigned char *tiqdio_alsi;
+
+/* device state change indicators */
+struct indicator_t {
+ u32 ind; /* u32 because of compare-and-swap performance */
+ atomic_t count; /* use count, 0 or 1 for non-shared indicators */
+};
+static struct indicator_t *q_indicators;
+
+static void tiqdio_tasklet_fn(unsigned long data);
+static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
+
+static int css_qdio_omit_svs;
+
+static inline unsigned long do_clear_global_summary(void)
+{
+ register unsigned long __fn asm("1") = 3;
+ register unsigned long __tmp asm("2");
+ register unsigned long __time asm("3");
+
+ asm volatile(
+ " .insn rre,0xb2650000,2,0"
+ : "+d" (__fn), "=d" (__tmp), "=d" (__time));
+ return __time;
+}
+
+/* returns addr for the device state change indicator */
+static u32 *get_indicator(void)
+{
+ int i;
+
+ for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
+ if (!atomic_read(&q_indicators[i].count)) {
+ atomic_set(&q_indicators[i].count, 1);
+ return &q_indicators[i].ind;
+ }
+
+ /* use the shared indicator */
+ atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
+ return &q_indicators[TIQDIO_SHARED_IND].ind;
+}
+
+static void put_indicator(u32 *addr)
+{
+ int i;
+
+ if (!addr)
+ return;
+ i = ((unsigned long)addr - (unsigned long)q_indicators) /
+ sizeof(struct indicator_t);
+ atomic_dec(&q_indicators[i].count);
+}
+
+void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q;
+ int i;
+
+ /* No TDD facility? If we must use SIGA-s we can also omit SVS. */
+ if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
+ css_qdio_omit_svs = 1;
+
+ for_each_input_queue(irq_ptr, q, i) {
+ list_add_rcu(&q->entry, &tiq_list);
+ synchronize_rcu();
+ }
+ xchg(irq_ptr->dsci, 1);
+ tasklet_schedule(&tiqdio_tasklet);
+}
+
+/*
+ * we cannot stop the tiqdio tasklet here since it is for all
+ * thinint qdio devices and it must run as long as there is a
+ * thinint device left
+ */
+void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q;
+ int i;
+
+ for_each_input_queue(irq_ptr, q, i) {
+ list_del_rcu(&q->entry);
+ synchronize_rcu();
+ }
+}
+
+static inline int tiqdio_inbound_q_done(struct qdio_q *q)
+{
+ unsigned char state;
+
+ if (!atomic_read(&q->nr_buf_used))
+ return 1;
+
+ qdio_siga_sync_q(q);
+ get_buf_state(q, q->first_to_check, &state);
+
+ if (state == SLSB_P_INPUT_PRIMED)
+ /* more work coming */
+ return 0;
+ return 1;
+}
+
+static inline int shared_ind(struct qdio_irq *irq_ptr)
+{
+ return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
+}
+
+static void __tiqdio_inbound_processing(struct qdio_q *q)
+{
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound);
+ qdio_sync_after_thinint(q);
+
+ /*
+ * Maybe we have work on our outbound queues... at least
+ * we have to check the PCI capable queues.
+ */
+ qdio_check_outbound_after_thinint(q);
+
+again:
+ if (!qdio_inbound_q_moved(q))
+ return;
+
+ qdio_kick_inbound_handler(q);
+
+ if (!tiqdio_inbound_q_done(q)) {
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
+ goto again;
+ }
+
+ qdio_stop_polling(q);
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (!tiqdio_inbound_q_done(q)) {
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
+ goto again;
+ }
+}
+
+void tiqdio_inbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+
+ __tiqdio_inbound_processing(q);
+}
+
+/* check for work on all inbound thinint queues */
+static void tiqdio_tasklet_fn(unsigned long data)
+{
+ struct qdio_q *q;
+
+ qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
+again:
+
+ /* protect tiq_list entries, only changed in activate or shutdown */
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(q, &tiq_list, entry)
+ /* only process queues from changed sets */
+ if (*q->irq_ptr->dsci) {
+
+ /* only clear it if the indicator is non-shared */
+ if (!shared_ind(q->irq_ptr))
+ xchg(q->irq_ptr->dsci, 0);
+ /*
+ * don't call inbound processing directly since
+ * that could starve other thinint queues
+ */
+ tasklet_schedule(&q->tasklet);
+ }
+
+ rcu_read_unlock();
+
+ /*
+ * if we used the shared indicator clear it now after all queues
+ * were processed
+ */
+ if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) {
+ xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
+
+ /* prevent racing */
+ if (*tiqdio_alsi)
+ xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
+ }
+
+ /* check for more work */
+ if (*tiqdio_alsi) {
+ xchg(tiqdio_alsi, 0);
+ qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
+ goto again;
+ }
+}
+
+/**
+ * tiqdio_thinint_handler - thin interrupt handler for qdio
+ * @ind: pointer to adapter local summary indicator
+ * @drv_data: NULL
+ */
+static void tiqdio_thinint_handler(void *ind, void *drv_data)
+{
+ qdio_perf_stat_inc(&perf_stats.thin_int);
+
+ /*
+ * SVS only when needed: issue SVS to benefit from iqdio interrupt
+ * avoidance (SVS clears adapter interrupt suppression overwrite)
+ */
+ if (!css_qdio_omit_svs)
+ do_clear_global_summary();
+
+ /*
+ * reset local summary indicator (tiqdio_alsi) to stop adapter
+ * interrupts for now, the tasklet will clean all dsci's
+ */
+ xchg((u8 *)ind, 0);
+ tasklet_hi_schedule(&tiqdio_tasklet);
+}
+
+static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
+{
+ struct scssc_area *scssc_area;
+ char dbf_text[15];
+ void *ptr;
+ int rc;
+
+ scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
+ memset(scssc_area, 0, PAGE_SIZE);
+
+ if (reset) {
+ scssc_area->summary_indicator_addr = 0;
+ scssc_area->subchannel_indicator_addr = 0;
+ } else {
+ scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
+ scssc_area->subchannel_indicator_addr =
+ virt_to_phys(irq_ptr->dsci);
+ }
+
+ scssc_area->request = (struct chsc_header) {
+ .length = 0x0fe0,
+ .code = 0x0021,
+ };
+ scssc_area->operation_code = 0;
+ scssc_area->ks = PAGE_DEFAULT_KEY;
+ scssc_area->kc = PAGE_DEFAULT_KEY;
+ scssc_area->isc = QDIO_AIRQ_ISC;
+ scssc_area->schid = irq_ptr->schid;
+
+ /* enable the time delay disablement facility */
+ if (css_general_characteristics.aif_tdd)
+ scssc_area->word_with_d_bit = 0x10000000;
+
+ rc = chsc(scssc_area);
+ if (rc)
+ return -EIO;
+
+ rc = chsc_error_from_response(scssc_area->response.code);
+ if (rc) {
+ sprintf(dbf_text, "sidR%4x", scssc_area->response.code);
+ QDIO_DBF_TEXT1(0, trace, dbf_text);
+ QDIO_DBF_TEXT1(0, setup, dbf_text);
+ ptr = &scssc_area->response;
+ QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN);
+ return rc;
+ }
+
+ QDIO_DBF_TEXT2(0, setup, "setscind");
+ QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr,
+ sizeof(unsigned long));
+ QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr,
+ sizeof(unsigned long));
+ return 0;
+}
+
+/* allocate non-shared indicators and shared indicator */
+int __init tiqdio_allocate_memory(void)
+{
+ q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
+ GFP_KERNEL);
+ if (!q_indicators)
+ return -ENOMEM;
+ return 0;
+}
+
+void tiqdio_free_memory(void)
+{
+ kfree(q_indicators);
+}
+
+int __init tiqdio_register_thinints(void)
+{
+ char dbf_text[20];
+
+ isc_register(QDIO_AIRQ_ISC);
+ tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
+ NULL, QDIO_AIRQ_ISC);
+ if (IS_ERR(tiqdio_alsi)) {
+ sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi));
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+ tiqdio_alsi = NULL;
+ isc_unregister(QDIO_AIRQ_ISC);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int qdio_establish_thinint(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return 0;
+
+ /* Check for aif time delay disablement. If installed,
+ * omit SVS even under LPAR
+ */
+ if (css_general_characteristics.aif_tdd)
+ css_qdio_omit_svs = 1;
+ return set_subchannel_ind(irq_ptr, 0);
+}
+
+void qdio_setup_thinint(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return;
+ irq_ptr->dsci = get_indicator();
+ QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *));
+}
+
+void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return;
+
+ /* reset adapter interrupt indicators */
+ put_indicator(irq_ptr->dsci);
+ set_subchannel_ind(irq_ptr, 1);
+}
+
+void __exit tiqdio_unregister_thinints(void)
+{
+ tasklet_disable(&tiqdio_tasklet);
+
+ if (tiqdio_alsi) {
+ s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
+ isc_unregister(QDIO_AIRQ_ISC);
+ }
+}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 699ac11debd8..1895dbb553cd 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -239,11 +239,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
/*not used unless the microcode gets patched*/
#define QETH_PCI_TIMER_VALUE(card) 3
-#define QETH_MIN_INPUT_THRESHOLD 1
-#define QETH_MAX_INPUT_THRESHOLD 500
-#define QETH_MIN_OUTPUT_THRESHOLD 1
-#define QETH_MAX_OUTPUT_THRESHOLD 300
-
/* priority queing */
#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
#define QETH_DEFAULT_QUEUE 2
@@ -811,17 +806,14 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
enum qeth_ipa_cmds, enum qeth_prot_versions);
int qeth_query_setadapterparms(struct qeth_card *);
-int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int,
- unsigned int, const char *);
+int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *);
void qeth_queue_input_buffer(struct qeth_card *, int);
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **);
void qeth_schedule_recovery(struct qeth_card *);
void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
- unsigned int, unsigned int,
- unsigned int, int, int,
- unsigned long);
+ int, int, int, unsigned long);
void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 0ac54dc638c2..c3ad89e302bd 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2073,7 +2073,7 @@ static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
static int qeth_qdio_activate(struct qeth_card *card)
{
QETH_DBF_TEXT(SETUP, 3, "qdioact");
- return qdio_activate(CARD_DDEV(card), 0);
+ return qdio_activate(CARD_DDEV(card));
}
static int qeth_dm_act(struct qeth_card *card)
@@ -2349,16 +2349,11 @@ int qeth_init_qdio_queues(struct qeth_card *card)
card->qdio.in_q->next_buf_to_init =
card->qdio.in_buf_pool.buf_count - 1;
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
- card->qdio.in_buf_pool.buf_count - 1, NULL);
+ card->qdio.in_buf_pool.buf_count - 1);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
return rc;
}
- rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- return rc;
- }
/* outbound queue */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
memset(card->qdio.out_qs[i]->qdio_bufs, 0,
@@ -2559,9 +2554,9 @@ int qeth_query_setadapterparms(struct qeth_card *card)
EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
- unsigned int siga_error, const char *dbftext)
+ const char *dbftext)
{
- if (qdio_error || siga_error) {
+ if (qdio_error) {
QETH_DBF_TEXT(TRACE, 2, dbftext);
QETH_DBF_TEXT(QERR, 2, dbftext);
QETH_DBF_TEXT_(QERR, 2, " F15=%02X",
@@ -2569,7 +2564,6 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
buf->element[14].flags & 0xff);
QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
- QETH_DBF_TEXT_(QERR, 2, " serr=%X", siga_error);
return 1;
}
return 0;
@@ -2622,9 +2616,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
card->perf_stats.inbound_do_qdio_start_time =
qeth_get_micros();
}
- rc = do_QDIO(CARD_DDEV(card),
- QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
- 0, queue->next_buf_to_init, count, NULL);
+ rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
+ queue->next_buf_to_init, count);
if (card->options.performance_stats)
card->perf_stats.inbound_do_qdio_time +=
qeth_get_micros() -
@@ -2643,14 +2636,13 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
static int qeth_handle_send_error(struct qeth_card *card,
- struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err,
- unsigned int siga_err)
+ struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
{
int sbalf15 = buffer->buffer->element[15].flags & 0xff;
- int cc = siga_err & 3;
+ int cc = qdio_err & 3;
QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
- qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr");
+ qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr");
switch (cc) {
case 0:
if (qdio_err) {
@@ -2662,7 +2654,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
}
return QETH_SEND_ERROR_NONE;
case 2:
- if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
+ if (qdio_err & QDIO_ERROR_SIGA_BUSY) {
QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B");
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
return QETH_SEND_ERROR_KICK_IT;
@@ -2758,8 +2750,8 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
return 0;
}
-static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
- int index, int count)
+static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
+ int count)
{
struct qeth_qdio_out_buffer *buf;
int rc;
@@ -2807,12 +2799,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
qeth_get_micros();
}
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
- if (under_int)
- qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
- queue->queue_no, index, count, NULL);
+ queue->queue_no, index, count);
if (queue->card->options.performance_stats)
queue->card->perf_stats.outbound_do_qdio_time +=
qeth_get_micros() -
@@ -2866,16 +2856,15 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
queue->card->perf_stats.bufs_sent_pack +=
flush_cnt;
if (flush_cnt)
- qeth_flush_buffers(queue, 1, index, flush_cnt);
+ qeth_flush_buffers(queue, index, flush_cnt);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
}
}
}
-void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
- unsigned int qdio_error, unsigned int siga_error,
- unsigned int __queue, int first_element, int count,
- unsigned long card_ptr)
+void qeth_qdio_output_handler(struct ccw_device *ccwdev,
+ unsigned int qdio_error, int __queue, int first_element,
+ int count, unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
@@ -2883,15 +2872,12 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
int i;
QETH_DBF_TEXT(TRACE, 6, "qdouhdl");
- if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
- if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
- QETH_DBF_TEXT(TRACE, 2, "achkcond");
- QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 2, "%08x", status);
- netif_stop_queue(card->dev);
- qeth_schedule_recovery(card);
- return;
- }
+ if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
+ QETH_DBF_TEXT(TRACE, 2, "achkcond");
+ QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
+ netif_stop_queue(card->dev);
+ qeth_schedule_recovery(card);
+ return;
}
if (card->options.performance_stats) {
card->perf_stats.outbound_handler_cnt++;
@@ -2901,8 +2887,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
for (i = first_element; i < (first_element + count); ++i) {
buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
/*we only handle the KICK_IT error by doing a recovery */
- if (qeth_handle_send_error(card, buffer,
- qdio_error, siga_error)
+ if (qeth_handle_send_error(card, buffer, qdio_error)
== QETH_SEND_ERROR_KICK_IT){
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
@@ -3164,11 +3149,11 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
if (ctx == NULL) {
qeth_fill_buffer(queue, buffer, skb);
- qeth_flush_buffers(queue, 0, index, 1);
+ qeth_flush_buffers(queue, index, 1);
} else {
flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
WARN_ON(buffers_needed != flush_cnt);
- qeth_flush_buffers(queue, 0, index, flush_cnt);
+ qeth_flush_buffers(queue, index, flush_cnt);
}
return 0;
out:
@@ -3221,8 +3206,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
* again */
if (atomic_read(&buffer->state) !=
QETH_QDIO_BUF_EMPTY){
- qeth_flush_buffers(queue, 0,
- start_index, flush_count);
+ qeth_flush_buffers(queue, start_index,
+ flush_count);
atomic_set(&queue->state,
QETH_OUT_Q_UNLOCKED);
return -EBUSY;
@@ -3253,7 +3238,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
flush_count += tmp;
out:
if (flush_count)
- qeth_flush_buffers(queue, 0, start_index, flush_count);
+ qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
/*
@@ -3274,7 +3259,7 @@ out:
if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
flush_count += qeth_flush_buffers_on_no_pci(queue);
if (flush_count)
- qeth_flush_buffers(queue, 0, start_index, flush_count);
+ qeth_flush_buffers(queue, start_index, flush_count);
}
/* at this point the queue is UNLOCKED again */
if (queue->card->options.performance_stats && do_pack)
@@ -3686,10 +3671,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.q_format = qeth_get_qdio_q_format(card);
init_data.qib_param_field_format = 0;
init_data.qib_param_field = qib_param_field;
- init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
- init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
- init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
- init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
init_data.no_input_qs = 1;
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = card->discipline.input_handler;
@@ -3751,8 +3732,9 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
int qeth_core_hardsetup_card(struct qeth_card *card)
{
+ struct qdio_ssqd_desc *qdio_ssqd;
int retries = 3;
- int mpno;
+ int mpno = 0;
int rc;
QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
@@ -3784,7 +3766,10 @@ retry:
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
return rc;
}
- mpno = qdio_get_ssqd_pct(CARD_DDEV(card));
+
+ qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card));
+ if (qdio_ssqd)
+ mpno = qdio_ssqd->pcnt;
if (mpno)
mpno = min(mpno - 1, QETH_MAX_PORTNO);
if (card->info.portno > mpno) {
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index f682f7b14480..3fbc3bdec0c5 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -726,8 +726,7 @@ tx_drop:
}
static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
- unsigned int status, unsigned int qdio_err,
- unsigned int siga_err, unsigned int queue,
+ unsigned int qdio_err, unsigned int queue,
int first_element, int count, unsigned long card_ptr)
{
struct net_device *net_dev;
@@ -742,23 +741,20 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_cnt++;
card->perf_stats.inbound_start_time = qeth_get_micros();
}
- if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
- if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
- QETH_DBF_TEXT(TRACE, 1, "qdinchk");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
- count);
- QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status);
- qeth_schedule_recovery(card);
- return;
- }
+ if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
+ QETH_DBF_TEXT(TRACE, 1, "qdinchk");
+ QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
+ QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
+ count);
+ QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
+ qeth_schedule_recovery(card);
+ return;
}
for (i = first_element; i < (first_element + count); ++i) {
index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index];
- if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
- qeth_check_qdio_errors(buffer->buffer,
- qdio_err, siga_err, "qinerr")))
+ if (!(qdio_err &&
+ qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr")))
qeth_l2_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */
qeth_put_buffer_pool_entry(card, buffer->pool_entry);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 06deaee50f6d..22f64aa6dd1f 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2939,8 +2939,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
}
static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
- unsigned int status, unsigned int qdio_err,
- unsigned int siga_err, unsigned int queue, int first_element,
+ unsigned int qdio_err, unsigned int queue, int first_element,
int count, unsigned long card_ptr)
{
struct net_device *net_dev;
@@ -2955,23 +2954,21 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_cnt++;
card->perf_stats.inbound_start_time = qeth_get_micros();
}
- if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
- if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
- QETH_DBF_TEXT(TRACE, 1, "qdinchk");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
- first_element, count);
- QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status);
- qeth_schedule_recovery(card);
- return;
- }
+ if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
+ QETH_DBF_TEXT(TRACE, 1, "qdinchk");
+ QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
+ QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
+ first_element, count);
+ QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
+ qeth_schedule_recovery(card);
+ return;
}
for (i = first_element; i < (first_element + count); ++i) {
index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index];
- if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
+ if (!(qdio_err &&
qeth_check_qdio_errors(buffer->buffer,
- qdio_err, siga_err, "qinerr")))
+ qdio_err, "qinerr")))
qeth_l3_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */
qeth_put_buffer_pool_entry(card, buffer->pool_entry);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 36169c6944fd..fca48b88fc53 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -297,15 +297,13 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
/**
* zfcp_hba_dbf_event_qdio - trace event for QDIO related failure
* @adapter: adapter affected by this QDIO related event
- * @status: as passed by qdio module
* @qdio_error: as passed by qdio module
- * @siga_error: as passed by qdio module
* @sbal_index: first buffer with error condition, as passed by qdio module
* @sbal_count: number of buffers affected, as passed by qdio module
*/
-void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
- unsigned int qdio_error, unsigned int siga_error,
- int sbal_index, int sbal_count)
+void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
+ unsigned int qdio_error, int sbal_index,
+ int sbal_count)
{
struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
unsigned long flags;
@@ -313,9 +311,7 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
memset(r, 0, sizeof(*r));
strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE);
- r->u.qdio.status = status;
r->u.qdio.qdio_error = qdio_error;
- r->u.qdio.siga_error = siga_error;
r->u.qdio.sbal_index = sbal_index;
r->u.qdio.sbal_count = sbal_count;
debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
@@ -398,9 +394,7 @@ static void zfcp_hba_dbf_view_status(char **p,
static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
{
- zfcp_dbf_out(p, "status", "0x%08x", r->status);
zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
- zfcp_dbf_out(p, "siga_error", "0x%08x", r->siga_error);
zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
}
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index d04aea604974..0ddb18449d11 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -139,9 +139,7 @@ struct zfcp_hba_dbf_record_status {
} __attribute__ ((packed));
struct zfcp_hba_dbf_record_qdio {
- u32 status;
u32 qdio_error;
- u32 siga_error;
u8 sbal_index;
u8 sbal_count;
} __attribute__ ((packed));
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 8065b2b224b7..edfdb21591f3 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -48,9 +48,8 @@ extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *);
extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
struct fsf_status_read_buffer *);
-extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *,
- unsigned int, unsigned int, unsigned int,
- int, int);
+extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
+ int);
extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 72e3094796d4..d6dbd653fde9 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -74,17 +74,15 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
}
}
-static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int status,
- unsigned int qdio_err, unsigned int siga_err,
- unsigned int queue_no, int first, int count,
+static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
+ int queue_no, int first, int count,
unsigned long parm)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
struct zfcp_qdio_queue *queue = &adapter->req_q;
- if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
- zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err,
- first, count);
+ if (unlikely(qdio_err)) {
+ zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
zfcp_qdio_handler_error(adapter, 140);
return;
}
@@ -129,8 +127,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
count = atomic_read(&queue->count) + processed;
- retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
- 0, start, count, NULL);
+ retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count);
if (unlikely(retval)) {
atomic_set(&queue->count, count);
@@ -142,9 +139,8 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
}
}
-static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status,
- unsigned int qdio_err, unsigned int siga_err,
- unsigned int queue_no, int first, int count,
+static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
+ int queue_no, int first, int count,
unsigned long parm)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
@@ -152,9 +148,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status,
volatile struct qdio_buffer_element *sbale;
int sbal_idx, sbale_idx, sbal_no;
- if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
- zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err,
- first, count);
+ if (unlikely(qdio_err)) {
+ zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
zfcp_qdio_handler_error(adapter, 147);
return;
}
@@ -362,7 +357,7 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
}
retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first,
- count, NULL);
+ count);
if (unlikely(retval)) {
zfcp_qdio_zero_sbals(req_q->sbal, first, count);
return retval;
@@ -400,10 +395,6 @@ int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
init_data->qib_param_field = NULL;
init_data->input_slib_elements = NULL;
init_data->output_slib_elements = NULL;
- init_data->min_input_threshold = 1;
- init_data->max_input_threshold = 5000;
- init_data->min_output_threshold = 1;
- init_data->max_output_threshold = 1000;
init_data->no_input_qs = 1;
init_data->no_output_qs = 1;
init_data->input_handler = zfcp_qdio_int_resp;
@@ -436,9 +427,7 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock(&req_q->lock);
- while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR)
- == -EINPROGRESS)
- ssleep(1);
+ qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
/* cleanup used outbound sbals */
count = atomic_read(&req_q->count);
@@ -473,7 +462,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
return -EIO;
}
- if (qdio_activate(adapter->ccw_device, 0)) {
+ if (qdio_activate(adapter->ccw_device)) {
dev_err(&adapter->ccw_device->dev,
"Activate of QDIO queues failed.\n");
goto failed_qdio;
@@ -487,7 +476,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
}
if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
- QDIO_MAX_BUFFERS_PER_Q, NULL)) {
+ QDIO_MAX_BUFFERS_PER_Q)) {
dev_err(&adapter->ccw_device->dev,
"Init of QDIO response queue failed.\n");
goto failed_qdio;
@@ -501,9 +490,6 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
return 0;
failed_qdio:
- while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR)
- == -EINPROGRESS)
- ssleep(1);
-
+ qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
return -EIO;
}