summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-13 14:20:19 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-13 14:20:19 -0800
commitc7708fac5a878d6e0f2de0aa19f9749cff4f707f (patch)
tree21a59cbe503ca526697f7d0bce5e0e30980bcbc0
parent3127f23f013eabe9b58132c05061684c49146ba3 (diff)
parent6726a807c38d7fd09bc23a0adc738efec6ff9492 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 update from Martin Schwidefsky: "Add support to generate code for the latest machine zEC12, MOD and XOR instruction support for the BPF jit compiler, the dasd safe offline feature and the big one: the s390 architecture gets PCI support!! Right before the world ends on the 21st ;-)" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (41 commits) s390/qdio: rename the misleading PCI flag of qdio devices s390/pci: remove obsolete email addresses s390/pci: speed up __iowrite64_copy by using pci store block insn s390/pci: enable NEED_DMA_MAP_STATE s390/pci: no msleep in potential IRQ context s390/pci: fix potential NULL pointer dereference in dma_free_seg_table() s390/pci: use kmem_cache_zalloc instead of kmem_cache_alloc/memset s390/bpf,jit: add support for XOR instruction s390/bpf,jit: add support MOD instruction s390/cio: fix pgid reserved check vga: compile fix, disable vga for s390 s390/pci: add PCI Kconfig options s390/pci: s390 specific PCI sysfs attributes s390/pci: PCI hotplug support via SCLP s390/pci: CHSC PCI support for error and availability events s390/pci: DMA support s390/pci: PCI adapter interrupts for MSI/MSI-X s390/bitops: find leftmost bit instruction support s390/pci: CLP interface s390/pci: base support ...
-rw-r--r--arch/s390/Kbuild1
-rw-r--r--arch/s390/Kconfig70
-rw-r--r--arch/s390/Makefile1
-rw-r--r--arch/s390/crypto/aes_s390.c18
-rw-r--r--arch/s390/crypto/des_s390.c12
-rw-r--r--arch/s390/crypto/ghash_s390.c21
-rw-r--r--arch/s390/crypto/sha_common.c9
-rw-r--r--arch/s390/include/asm/bitops.h81
-rw-r--r--arch/s390/include/asm/ccwdev.h6
-rw-r--r--arch/s390/include/asm/ccwgroup.h3
-rw-r--r--arch/s390/include/asm/clp.h28
-rw-r--r--arch/s390/include/asm/dma-mapping.h76
-rw-r--r--arch/s390/include/asm/dma.h19
-rw-r--r--arch/s390/include/asm/hw_irq.h22
-rw-r--r--arch/s390/include/asm/io.h55
-rw-r--r--arch/s390/include/asm/irq.h12
-rw-r--r--arch/s390/include/asm/isc.h1
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/pci.h156
-rw-r--r--arch/s390/include/asm/pci_clp.h182
-rw-r--r--arch/s390/include/asm/pci_dma.h196
-rw-r--r--arch/s390/include/asm/pci_insn.h280
-rw-r--r--arch/s390/include/asm/pci_io.h194
-rw-r--r--arch/s390/include/asm/pgtable.h11
-rw-r--r--arch/s390/include/asm/sclp.h2
-rw-r--r--arch/s390/include/asm/topology.h34
-rw-r--r--arch/s390/include/asm/vga.h6
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/dis.c578
-rw-r--r--arch/s390/kernel/entry.S7
-rw-r--r--arch/s390/kernel/entry.h21
-rw-r--r--arch/s390/kernel/entry64.S36
-rw-r--r--arch/s390/kernel/head.S74
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/pgm_check.S152
-rw-r--r--arch/s390/kernel/setup.c39
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/topology.c113
-rw-r--r--arch/s390/kernel/traps.c52
-rw-r--r--arch/s390/mm/Makefile12
-rw-r--r--arch/s390/mm/dump_pagetables.c7
-rw-r--r--arch/s390/mm/fault.c31
-rw-r--r--arch/s390/mm/init.c29
-rw-r--r--arch/s390/mm/pageattr.c82
-rw-r--r--arch/s390/mm/pgtable.c16
-rw-r--r--arch/s390/mm/vmem.c46
-rw-r--r--arch/s390/net/bpf_jit_comp.c28
-rw-r--r--arch/s390/pci/Makefile6
-rw-r--r--arch/s390/pci/pci.c1103
-rw-r--r--arch/s390/pci/pci_clp.c324
-rw-r--r--arch/s390/pci/pci_dma.c506
-rw-r--r--arch/s390/pci/pci_event.c93
-rw-r--r--arch/s390/pci/pci_msi.c141
-rw-r--r--arch/s390/pci/pci_sysfs.c86
-rw-r--r--drivers/gpu/vga/Kconfig2
-rw-r--r--drivers/pci/hotplug/Kconfig11
-rw-r--r--drivers/pci/hotplug/Makefile1
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c252
-rw-r--r--drivers/pci/msi.c6
-rw-r--r--drivers/s390/block/dasd.c97
-rw-r--r--drivers/s390/block/dasd_devmap.c34
-rw-r--r--drivers/s390/block/dasd_eckd.c92
-rw-r--r--drivers/s390/block/dasd_fba.c23
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dasd_ioctl.c11
-rw-r--r--drivers/s390/char/sclp.h3
-rw-r--r--drivers/s390/char/sclp_cmd.c81
-rw-r--r--drivers/s390/cio/ccwgroup.c26
-rw-r--r--drivers/s390/cio/chsc.c156
-rw-r--r--drivers/s390/cio/device.c11
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_ops.c17
-rw-r--r--drivers/s390/cio/device_pgid.c10
-rw-r--r--drivers/s390/cio/qdio_main.c52
-rw-r--r--drivers/s390/cio/qdio_setup.c9
-rw-r--r--drivers/s390/cio/qdio_thinint.c2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c68
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h2
-rw-r--r--include/asm-generic/io.h21
-rw-r--r--include/linux/irq.h10
80 files changed, 5313 insertions, 774 deletions
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index cc45d25487b0..647c3eccc3d0 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -6,3 +6,4 @@ obj-$(CONFIG_S390_HYPFS_FS) += hypfs/
obj-$(CONFIG_APPLDATA_BASE) += appldata/
obj-$(CONFIG_MATHEMU) += math-emu/
obj-y += net/
+obj-$(CONFIG_PCI) += pci/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 3cbb8757704e..32425af9d68d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -34,12 +34,6 @@ config GENERIC_BUG
config GENERIC_BUG_RELATIVE_POINTERS
def_bool y
-config NO_IOMEM
- def_bool y
-
-config NO_DMA
- def_bool y
-
config ARCH_DMA_ADDR_T_64BIT
def_bool 64BIT
@@ -58,6 +52,12 @@ config KEXEC
config AUDIT_ARCH
def_bool y
+config NO_IOPORT
+ def_bool y
+
+config PCI_QUIRKS
+ def_bool n
+
config S390
def_bool y
select USE_GENERIC_SMP_HELPERS if SMP
@@ -171,6 +171,10 @@ config HAVE_MARCH_Z196_FEATURES
def_bool n
select HAVE_MARCH_Z10_FEATURES
+config HAVE_MARCH_ZEC12_FEATURES
+ def_bool n
+ select HAVE_MARCH_Z196_FEATURES
+
choice
prompt "Processor type"
default MARCH_G5
@@ -222,6 +226,13 @@ config MARCH_Z196
(2818 and 2817 series). The kernel will be slightly faster but will
not work on older machines.
+config MARCH_ZEC12
+ bool "IBM zEC12"
+ select HAVE_MARCH_ZEC12_FEATURES if 64BIT
+ help
+ Select this to enable optimizations for IBM zEC12 (2827 series). The
+ kernel will be slightly faster but will not work on older machines.
+
endchoice
config 64BIT
@@ -426,6 +437,53 @@ config QDIO
If unsure, say Y.
+menuconfig PCI
+ bool "PCI support"
+ default n
+ depends on 64BIT
+ select ARCH_SUPPORTS_MSI
+ select PCI_MSI
+ help
+ Enable PCI support.
+
+if PCI
+
+config PCI_NR_FUNCTIONS
+ int "Maximum number of PCI functions (1-4096)"
+ range 1 4096
+ default "64"
+ help
+ This allows you to specify the maximum number of PCI functions which
+ this kernel will support.
+
+source "drivers/pci/Kconfig"
+source "drivers/pci/pcie/Kconfig"
+source "drivers/pci/hotplug/Kconfig"
+
+endif # PCI
+
+config PCI_DOMAINS
+ def_bool PCI
+
+config HAS_IOMEM
+ def_bool PCI
+
+config IOMMU_HELPER
+ def_bool PCI
+
+config HAS_DMA
+ def_bool PCI
+ select HAVE_DMA_API_DEBUG
+
+config NEED_SG_DMA_LENGTH
+ def_bool PCI
+
+config HAVE_DMA_ATTRS
+ def_bool PCI
+
+config NEED_DMA_MAP_STATE
+ def_bool PCI
+
config CHSC_SCH
def_tristate m
prompt "Support for CHSC subchannels"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 49e76e8b477d..4b8e08b56f49 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -41,6 +41,7 @@ cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10)
cflags-$(CONFIG_MARCH_Z196) += $(call cc-option,-march=z196)
+cflags-$(CONFIG_MARCH_ZEC12) += $(call cc-option,-march=zEC12)
#KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index da3c1a7dcd8e..b4dbade8ca24 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -325,7 +325,8 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
- BUG_ON((ret < 0) || (ret != n));
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -457,7 +458,8 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, param, out, in, n);
- BUG_ON((ret < 0) || (ret != n));
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -625,7 +627,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
param = xts_ctx->pcc.key + offset;
ret = crypt_s390_pcc(func, param);
- BUG_ON(ret < 0);
+ if (ret < 0)
+ return -EIO;
memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
param = xts_ctx->key + offset;
@@ -636,7 +639,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
- BUG_ON(ret < 0 || ret != n);
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -769,7 +773,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
}
ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
- BUG_ON(ret < 0 || ret != n);
+ if (ret < 0 || ret != n)
+ return -EIO;
if (n > AES_BLOCK_SIZE)
memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
@@ -788,7 +793,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
AES_BLOCK_SIZE, ctrblk);
- BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE);
+ if (ret < 0 || ret != AES_BLOCK_SIZE)
+ return -EIO;
memcpy(out, buf, nbytes);
crypto_inc(ctrblk, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index b49fb96f4207..bcca01c9989d 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -94,7 +94,8 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, key, out, in, n);
- BUG_ON((ret < 0) || (ret != n));
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -120,7 +121,8 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, iv, out, in, n);
- BUG_ON((ret < 0) || (ret != n));
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -386,7 +388,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
}
ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
- BUG_ON((ret < 0) || (ret != n));
+ if (ret < 0 || ret != n)
+ return -EIO;
if (n > DES_BLOCK_SIZE)
memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE);
@@ -404,7 +407,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in,
DES_BLOCK_SIZE, ctrblk);
- BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE);
+ if (ret < 0 || ret != DES_BLOCK_SIZE)
+ return -EIO;
memcpy(out, buf, nbytes);
crypto_inc(ctrblk, DES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index 1ebd3a15cca4..d43485d142e9 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -72,14 +72,16 @@ static int ghash_update(struct shash_desc *desc,
if (!dctx->bytes) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
GHASH_BLOCK_SIZE);
- BUG_ON(ret != GHASH_BLOCK_SIZE);
+ if (ret != GHASH_BLOCK_SIZE)
+ return -EIO;
}
}
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
- BUG_ON(ret != n);
+ if (ret != n)
+ return -EIO;
src += n;
srclen -= n;
}
@@ -92,7 +94,7 @@ static int ghash_update(struct shash_desc *desc,
return 0;
}
-static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
{
u8 *buf = dctx->buffer;
int ret;
@@ -103,21 +105,24 @@ static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
memset(pos, 0, dctx->bytes);
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
- BUG_ON(ret != GHASH_BLOCK_SIZE);
+ if (ret != GHASH_BLOCK_SIZE)
+ return -EIO;
}
dctx->bytes = 0;
+ return 0;
}
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ int ret;
- ghash_flush(ctx, dctx);
- memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
-
- return 0;
+ ret = ghash_flush(ctx, dctx);
+ if (!ret)
+ memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
+ return ret;
}
static struct shash_alg ghash_alg = {
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index bd37d09b9d3c..8620b0ec9c42 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -36,7 +36,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
- BUG_ON(ret != bsize);
+ if (ret != bsize)
+ return -EIO;
data += bsize - index;
len -= bsize - index;
index = 0;
@@ -46,7 +47,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if (len >= bsize) {
ret = crypt_s390_kimd(ctx->func, ctx->state, data,
len & ~(bsize - 1));
- BUG_ON(ret != (len & ~(bsize - 1)));
+ if (ret != (len & ~(bsize - 1)))
+ return -EIO;
data += ret;
len -= ret;
}
@@ -88,7 +90,8 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
- BUG_ON(ret != end);
+ if (ret != end)
+ return -EIO;
/* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 6f573890fb28..15422933c60b 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -640,6 +640,87 @@ static inline unsigned long find_first_bit(const unsigned long * addr,
}
#define find_first_bit find_first_bit
+/*
+ * Big endian variant whichs starts bit counting from left using
+ * the flogr (find leftmost one) instruction.
+ */
+static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
+{
+ register unsigned long bit asm("2") = val;
+ register unsigned long out asm("3");
+
+ asm volatile (
+ " .insn rre,0xb9830000,%[bit],%[bit]\n"
+ : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
+ return nr + bit;
+}
+
+/*
+ * 64 bit special left bitops format:
+ * order in memory:
+ * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
+ * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
+ * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
+ * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
+ * after that follows the next long with bit numbers
+ * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
+ * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
+ * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
+ * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
+ * The reason for this bit ordering is the fact that
+ * the hardware sets bits in a bitmap starting at bit 0
+ * and we don't want to scan the bitmap from the 'wrong
+ * end'.
+ */
+static inline unsigned long find_first_bit_left(const unsigned long *addr,
+ unsigned long size)
+{
+ unsigned long bytes, bits;
+
+ if (!size)
+ return 0;
+ bytes = __ffs_word_loop(addr, size);
+ bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
+ return (bits < size) ? bits : size;
+}
+
+static inline int find_next_bit_left(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p;
+ unsigned long bit, set;
+
+ if (offset >= size)
+ return size;
+ bit = offset & (__BITOPS_WORDSIZE - 1);
+ offset -= bit;
+ size -= offset;
+ p = addr + offset / __BITOPS_WORDSIZE;
+ if (bit) {
+ set = __flo_word(0, *p & (~0UL << bit));
+ if (set >= size)
+ return size + offset;
+ if (set < __BITOPS_WORDSIZE)
+ return set + offset;
+ offset += __BITOPS_WORDSIZE;
+ size -= __BITOPS_WORDSIZE;
+ p++;
+ }
+ return offset + find_first_bit_left(p, size);
+}
+
+#define for_each_set_bit_left(bit, addr, size) \
+ for ((bit) = find_first_bit_left((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit_left((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_left_cont(bit, addr, size) \
+ for ((bit) = find_next_bit_left((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit_left((addr), (size), (bit) + 1))
+
/**
* find_next_zero_bit - find the first zero bit in a memory region
* @addr: The address to base the search on
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 1cb4bb3f32d9..6d1f3573f0df 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -18,6 +18,9 @@ struct irb;
struct ccw1;
struct ccw_dev_id;
+/* from asm/schid.h */
+struct subchannel_id;
+
/* simplified initializers for struct ccw_device:
* CCW_DEVICE and CCW_DEVICE_DEVTYPE initialize one
* entry in your MODULE_DEVICE_TABLE and set the match_flag correctly */
@@ -223,8 +226,7 @@ extern int ccw_device_force_console(void);
int ccw_device_siosl(struct ccw_device *);
-// FIXME: these have to go
-extern int _ccw_device_get_subchannel_number(struct ccw_device *);
+extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
extern void *ccw_device_get_chp_desc(struct ccw_device *, int);
#endif /* _S390_CCWDEV_H_ */
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index 01a905eb11e0..23723ce5ca7a 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -59,6 +59,9 @@ extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver);
int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
int num_devices, const char *buf);
+extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
+extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
+
extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
diff --git a/arch/s390/include/asm/clp.h b/arch/s390/include/asm/clp.h
new file mode 100644
index 000000000000..6c3aecc245ff
--- /dev/null
+++ b/arch/s390/include/asm/clp.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_S390_CLP_H
+#define _ASM_S390_CLP_H
+
+/* CLP common request & response block size */
+#define CLP_BLK_SIZE (PAGE_SIZE * 2)
+
+struct clp_req_hdr {
+ u16 len;
+ u16 cmd;
+} __packed;
+
+struct clp_rsp_hdr {
+ u16 len;
+ u16 rsp;
+} __packed;
+
+/* CLP Response Codes */
+#define CLP_RC_OK 0x0010 /* Command request successfully */
+#define CLP_RC_CMD 0x0020 /* Command code not recognized */
+#define CLP_RC_PERM 0x0030 /* Command not authorized */
+#define CLP_RC_FMT 0x0040 /* Invalid command request format */
+#define CLP_RC_LEN 0x0050 /* Invalid command request length */
+#define CLP_RC_8K 0x0060 /* Command requires 8K LPCB */
+#define CLP_RC_RESNOT0 0x0070 /* Reserved field not zero */
+#define CLP_RC_NODATA 0x0080 /* No data available */
+#define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */
+
+#endif
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..8a32f7dfd3af
--- /dev/null
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -0,0 +1,76 @@
+#ifndef _ASM_S390_DMA_MAPPING_H
+#define _ASM_S390_DMA_MAPPING_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-debug.h>
+#include <linux/io.h>
+
+#define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
+
+extern struct dma_map_ops s390_dma_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ return &s390_dma_ops;
+}
+
+extern int dma_set_mask(struct device *dev, u64 mask);
+extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (dma_ops->dma_supported == NULL)
+ return 1;
+ return dma_ops->dma_supported(dev, mask);
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return 0;
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (dma_ops->mapping_error)
+ return dma_ops->mapping_error(dev, dma_addr);
+ return (dma_addr == 0UL);
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *ret;
+
+ ret = ops->alloc(dev, size, dma_handle, flag, NULL);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
+ return ret;
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+#endif /* _ASM_S390_DMA_MAPPING_H */
diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h
index 6fb6de4f15b0..de015d85e3e5 100644
--- a/arch/s390/include/asm/dma.h
+++ b/arch/s390/include/asm/dma.h
@@ -1,14 +1,13 @@
-/*
- * S390 version
- */
-
-#ifndef _ASM_DMA_H
-#define _ASM_DMA_H
+#ifndef _ASM_S390_DMA_H
+#define _ASM_S390_DMA_H
-#include <asm/io.h> /* need byte IO */
+#include <asm/io.h>
+/*
+ * MAX_DMA_ADDRESS is ambiguous because on s390 its completely unrelated
+ * to DMA. It _is_ used for the s390 memory zone split at 2GB caused
+ * by the 31 bit heritage.
+ */
#define MAX_DMA_ADDRESS 0x80000000
-#define free_dma(x) do { } while (0)
-
-#endif /* _ASM_DMA_H */
+#endif /* _ASM_S390_DMA_H */
diff --git a/arch/s390/include/asm/hw_irq.h b/arch/s390/include/asm/hw_irq.h
new file mode 100644
index 000000000000..7e3d2586c1ff
--- /dev/null
+++ b/arch/s390/include/asm/hw_irq.h
@@ -0,0 +1,22 @@
+#ifndef _HW_IRQ_H
+#define _HW_IRQ_H
+
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
+{
+ return __irq_get_msi_desc(irq);
+}
+
+/* Must be called with msi map lock held */
+static inline int irq_set_msi_desc(unsigned int irq, struct msi_desc *msi)
+{
+ if (!msi)
+ return -EINVAL;
+
+ msi->irq = irq;
+ return 0;
+}
+
+#endif
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 559e921a6bba..16c3eb164f4f 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -9,9 +9,9 @@
#ifndef _S390_IO_H
#define _S390_IO_H
+#include <linux/kernel.h>
#include <asm/page.h>
-
-#define IO_SPACE_LIMIT 0xffffffff
+#include <asm/pci_io.h>
/*
* Change virtual addresses to physical addresses and vv.
@@ -24,10 +24,11 @@ static inline unsigned long virt_to_phys(volatile void * address)
" lra %0,0(%1)\n"
" jz 0f\n"
" la %0,0\n"
- "0:"
+ "0:"
: "=a" (real_address) : "a" (address) : "cc");
- return real_address;
+ return real_address;
}
+#define virt_to_phys virt_to_phys
static inline void * phys_to_virt(unsigned long address)
{
@@ -42,4 +43,50 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
*/
#define xlate_dev_kmem_ptr(p) p
+#define IO_SPACE_LIMIT 0
+
+#ifdef CONFIG_PCI
+
+#define ioremap_nocache(addr, size) ioremap(addr, size)
+#define ioremap_wc ioremap_nocache
+
+/* TODO: s390 cannot support io_remap_pfn_range... */
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
+{
+ return (void __iomem *) offset;
+}
+
+static inline void iounmap(volatile void __iomem *addr)
+{
+}
+
+/*
+ * s390 needs a private implementation of pci_iomap since ioremap with its
+ * offset parameter isn't sufficient. That's because BAR spaces are not
+ * disjunctive on s390 so we need the bar parameter of pci_iomap to find
+ * the corresponding device and create the mapping cookie.
+ */
+#define pci_iomap pci_iomap
+#define pci_iounmap pci_iounmap
+
+#define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count)
+#define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count)
+#define memset_io(dst, val, count) zpci_memset_io(dst, val, count)
+
+#define __raw_readb zpci_read_u8
+#define __raw_readw zpci_read_u16
+#define __raw_readl zpci_read_u32
+#define __raw_readq zpci_read_u64
+#define __raw_writeb zpci_write_u8
+#define __raw_writew zpci_write_u16
+#define __raw_writel zpci_write_u32
+#define __raw_writeq zpci_write_u64
+
+#endif /* CONFIG_PCI */
+
+#include <asm-generic/io.h>
+
#endif
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 6703dd986fd4..e6972f85d2b0 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -33,6 +33,8 @@ enum interruption_class {
IOINT_APB,
IOINT_ADM,
IOINT_CSC,
+ IOINT_PCI,
+ IOINT_MSI,
NMI_NMI,
NR_IRQS,
};
@@ -51,4 +53,14 @@ void service_subclass_irq_unregister(void);
void measurement_alert_subclass_register(void);
void measurement_alert_subclass_unregister(void);
+#ifdef CONFIG_LOCKDEP
+# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
+# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
+ disable_irq_nosync(irq)
+# define disable_irq_lockdep(irq) disable_irq(irq)
+# define enable_irq_lockdep(irq) enable_irq(irq)
+# define enable_irq_lockdep_irqrestore(irq, flags) \
+ enable_irq(irq)
+#endif
+
#endif /* _ASM_IRQ_H */
diff --git a/arch/s390/include/asm/isc.h b/arch/s390/include/asm/isc.h
index 5ae606456b0a..68d7d68300f2 100644
--- a/arch/s390/include/asm/isc.h
+++ b/arch/s390/include/asm/isc.h
@@ -18,6 +18,7 @@
#define CHSC_SCH_ISC 7 /* CHSC subchannels */
/* Adapter interrupts. */
#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
+#define PCI_ISC 2 /* PCI I/O subchannels */
#define AP_ISC 6 /* adjunct processor (crypto) devices */
/* Functions for registration of I/O interruption subclasses */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 39faa4ac9660..a86ad4084073 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -30,6 +30,8 @@
#include <asm/setup.h>
#ifndef __ASSEMBLY__
+void storage_key_init_range(unsigned long start, unsigned long end);
+
static unsigned long pfmf(unsigned long function, unsigned long address)
{
asm volatile(
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 42a145c9ddd6..a6175ad0c42f 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -1,10 +1,158 @@
#ifndef __ASM_S390_PCI_H
#define __ASM_S390_PCI_H
-/* S/390 systems don't have a PCI bus. This file is just here because some stupid .c code
- * includes it even if CONFIG_PCI is not set.
- */
+/* must be set before including asm-generic/pci.h */
#define PCI_DMA_BUS_IS_PHYS (0)
+/* must be set before including pci_clp.h */
+#define PCI_BAR_COUNT 6
-#endif /* __ASM_S390_PCI_H */
+#include <asm-generic/pci.h>
+#include <asm-generic/pci-dma-compat.h>
+#include <asm/pci_clp.h>
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+#define pcibios_assign_all_busses() (0)
+
+void __iomem *pci_iomap(struct pci_dev *, int, unsigned long);
+void pci_iounmap(struct pci_dev *, void __iomem *);
+int pci_domain_nr(struct pci_bus *);
+int pci_proc_domain(struct pci_bus *);
+
+/* MSI arch hooks */
+#define arch_setup_msi_irqs arch_setup_msi_irqs
+#define arch_teardown_msi_irqs arch_teardown_msi_irqs
+
+#define ZPCI_BUS_NR 0 /* default bus number */
+#define ZPCI_DEVFN 0 /* default device number */
+
+/* PCI Function Controls */
+#define ZPCI_FC_FN_ENABLED 0x80
+#define ZPCI_FC_ERROR 0x40
+#define ZPCI_FC_BLOCKED 0x20
+#define ZPCI_FC_DMA_ENABLED 0x10
+
+struct msi_map {
+ unsigned long irq;
+ struct msi_desc *msi;
+ struct hlist_node msi_chain;
+};
+
+#define ZPCI_NR_MSI_VECS 64
+#define ZPCI_MSI_MASK (ZPCI_NR_MSI_VECS - 1)
+
+enum zpci_state {
+ ZPCI_FN_STATE_RESERVED,
+ ZPCI_FN_STATE_STANDBY,
+ ZPCI_FN_STATE_CONFIGURED,
+ ZPCI_FN_STATE_ONLINE,
+ NR_ZPCI_FN_STATES,
+};
+
+struct zpci_bar_struct {
+ u32 val; /* bar start & 3 flag bits */
+ u8 size; /* order 2 exponent */
+ u16 map_idx; /* index into bar mapping array */
+};
+
+/* Private data per function */
+struct zpci_dev {
+ struct pci_dev *pdev;
+ struct pci_bus *bus;
+ struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */
+
+ enum zpci_state state;
+ u32 fid; /* function ID, used by sclp */
+ u32 fh; /* function handle, used by insn's */
+ u16 pchid; /* physical channel ID */
+ u8 pfgid; /* function group ID */
+ u16 domain;
+
+ /* IRQ stuff */
+ u64 msi_addr; /* MSI address */
+ struct zdev_irq_map *irq_map;
+ struct msi_map *msi_map[ZPCI_NR_MSI_VECS];
+ unsigned int aisb; /* number of the summary bit */
+
+ /* DMA stuff */
+ unsigned long *dma_table;
+ spinlock_t dma_table_lock;
+ int tlb_refresh;
+
+ spinlock_t iommu_bitmap_lock;
+ unsigned long *iommu_bitmap;
+ unsigned long iommu_size;
+ unsigned long iommu_pages;
+ unsigned int next_bit;
+
+ struct zpci_bar_struct bars[PCI_BAR_COUNT];
+
+ u64 start_dma; /* Start of available DMA addresses */
+ u64 end_dma; /* End of available DMA addresses */
+ u64 dma_mask; /* DMA address space mask */
+
+ enum pci_bus_speed max_bus_speed;
+};
+
+struct pci_hp_callback_ops {
+ int (*create_slot) (struct zpci_dev *zdev);
+ void (*remove_slot) (struct zpci_dev *zdev);
+};
+
+static inline bool zdev_enabled(struct zpci_dev *zdev)
+{
+ return (zdev->fh & (1UL << 31)) ? true : false;
+}
+
+/* -----------------------------------------------------------------------------
+ Prototypes
+----------------------------------------------------------------------------- */
+/* Base stuff */
+struct zpci_dev *zpci_alloc_device(void);
+int zpci_create_device(struct zpci_dev *);
+int zpci_enable_device(struct zpci_dev *);
+void zpci_stop_device(struct zpci_dev *);
+void zpci_free_device(struct zpci_dev *);
+int zpci_scan_device(struct zpci_dev *);
+int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
+int zpci_unregister_ioat(struct zpci_dev *, u8);
+
+/* CLP */
+int clp_find_pci_devices(void);
+int clp_add_pci_device(u32, u32, int);
+int clp_enable_fh(struct zpci_dev *, u8);
+int clp_disable_fh(struct zpci_dev *);
+
+/* MSI */
+struct msi_desc *__irq_get_msi_desc(unsigned int);
+int zpci_msi_set_mask_bits(struct msi_desc *, u32, u32);
+int zpci_setup_msi_irq(struct zpci_dev *, struct msi_desc *, unsigned int, int);
+void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *);
+int zpci_msihash_init(void);
+void zpci_msihash_exit(void);
+
+/* Error handling and recovery */
+void zpci_event_error(void *);
+void zpci_event_availability(void *);
+
+/* Helpers */
+struct zpci_dev *get_zdev(struct pci_dev *);
+struct zpci_dev *get_zdev_by_fid(u32);
+bool zpci_fid_present(u32);
+
+/* sysfs */
+int zpci_sysfs_add_device(struct device *);
+void zpci_sysfs_remove_device(struct device *);
+
+/* DMA */
+int zpci_dma_init(void);
+void zpci_dma_exit(void);
+
+/* Hotplug */
+extern struct mutex zpci_list_lock;
+extern struct list_head zpci_list;
+extern struct pci_hp_callback_ops hotplug_ops;
+extern unsigned int pci_probe;
+
+#endif
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
new file mode 100644
index 000000000000..d31d739f8689
--- /dev/null
+++ b/arch/s390/include/asm/pci_clp.h
@@ -0,0 +1,182 @@
+#ifndef _ASM_S390_PCI_CLP_H
+#define _ASM_S390_PCI_CLP_H
+
+#include <asm/clp.h>
+
+/*
+ * Call Logical Processor - Command Codes
+ */
+#define CLP_LIST_PCI 0x0002
+#define CLP_QUERY_PCI_FN 0x0003
+#define CLP_QUERY_PCI_FNGRP 0x0004
+#define CLP_SET_PCI_FN 0x0005
+
+/* PCI function handle list entry */
+struct clp_fh_list_entry {
+ u16 device_id;
+ u16 vendor_id;
+ u32 config_state : 1;
+ u32 : 31;
+ u32 fid; /* PCI function id */
+ u32 fh; /* PCI function handle */
+} __packed;
+
+#define CLP_RC_SETPCIFN_FH 0x0101 /* Invalid PCI fn handle */
+#define CLP_RC_SETPCIFN_FHOP 0x0102 /* Fn handle not valid for op */
+#define CLP_RC_SETPCIFN_DMAAS 0x0103 /* Invalid DMA addr space */
+#define CLP_RC_SETPCIFN_RES 0x0104 /* Insufficient resources */
+#define CLP_RC_SETPCIFN_ALRDY 0x0105 /* Fn already in requested state */
+#define CLP_RC_SETPCIFN_ERR 0x0106 /* Fn in permanent error state */
+#define CLP_RC_SETPCIFN_RECPND 0x0107 /* Error recovery pending */
+#define CLP_RC_SETPCIFN_BUSY 0x0108 /* Fn busy */
+#define CLP_RC_LISTPCI_BADRT 0x010a /* Resume token not recognized */
+#define CLP_RC_QUERYPCIFG_PFGID 0x010b /* Unrecognized PFGID */
+
+/* request or response block header length */
+#define LIST_PCI_HDR_LEN 32
+
+/* Number of function handles fitting in response block */
+#define CLP_FH_LIST_NR_ENTRIES \
+ ((CLP_BLK_SIZE - 2 * LIST_PCI_HDR_LEN) \
+ / sizeof(struct clp_fh_list_entry))
+
+#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
+#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
+
+#define CLP_UTIL_STR_LEN 64
+
+/* List PCI functions request */
+struct clp_req_list_pci {
+ struct clp_req_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u64 resume_token;
+ u64 reserved2;
+} __packed;
+
+/* List PCI functions response */
+struct clp_rsp_list_pci {
+ struct clp_rsp_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u64 resume_token;
+ u32 reserved2;
+ u16 max_fn;
+ u8 reserved3;
+ u8 entry_size;
+ struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
+} __packed;
+
+/* Query PCI function request */
+struct clp_req_query_pci {
+ struct clp_req_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u32 fh; /* function handle */
+ u32 reserved2;
+ u64 reserved3;
+} __packed;
+
+/* Query PCI function response */
+struct clp_rsp_query_pci {
+ struct clp_rsp_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u16 vfn; /* virtual fn number */
+ u16 : 7;
+ u16 util_str_avail : 1; /* utility string available? */
+ u16 pfgid : 8; /* pci function group id */
+ u32 fid; /* pci function id */
+ u8 bar_size[PCI_BAR_COUNT];
+ u16 pchid;
+ u32 bar[PCI_BAR_COUNT];
+ u64 reserved2;
+ u64 sdma; /* start dma as */
+ u64 edma; /* end dma as */
+ u64 reserved3[6];
+ u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
+} __packed;
+
+/* Query PCI function group request */
+struct clp_req_query_pci_grp {
+ struct clp_req_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u32 : 24;
+ u32 pfgid : 8; /* function group id */
+ u32 reserved2;
+ u64 reserved3;
+} __packed;
+
+/* Query PCI function group response */
+struct clp_rsp_query_pci_grp {
+ struct clp_rsp_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u16 : 4;
+ u16 noi : 12; /* number of interrupts */
+ u8 version;
+ u8 : 6;
+ u8 frame : 1;
+ u8 refresh : 1; /* TLB refresh mode */
+ u16 reserved2;
+ u16 mui;
+ u64 reserved3;
+ u64 dasm; /* dma address space mask */
+ u64 msia; /* MSI address */
+ u64 reserved4;
+ u64 reserved5;
+} __packed;
+
+/* Set PCI function request */
+struct clp_req_set_pci {
+ struct clp_req_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u32 fh; /* function handle */
+ u16 reserved2;
+ u8 oc; /* operation controls */
+ u8 ndas; /* number of dma spaces */
+ u64 reserved3;
+} __packed;
+
+/* Set PCI function response */
+struct clp_rsp_set_pci {
+ struct clp_rsp_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u32 fh; /* function handle */
+ u32 reserved3;
+ u64 reserved4;
+} __packed;
+
+/* Combined request/response block structures used by clp insn */
+struct clp_req_rsp_list_pci {
+ struct clp_req_list_pci request;
+ struct clp_rsp_list_pci response;
+} __packed;
+
+struct clp_req_rsp_set_pci {
+ struct clp_req_set_pci request;
+ struct clp_rsp_set_pci response;
+} __packed;
+
+struct clp_req_rsp_query_pci {
+ struct clp_req_query_pci request;
+ struct clp_rsp_query_pci response;
+} __packed;
+
+struct clp_req_rsp_query_pci_grp {
+ struct clp_req_query_pci_grp request;
+ struct clp_rsp_query_pci_grp response;
+} __packed;
+
+#endif
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
new file mode 100644
index 000000000000..30b4c179c38c
--- /dev/null
+++ b/arch/s390/include/asm/pci_dma.h
@@ -0,0 +1,196 @@
+#ifndef _ASM_S390_PCI_DMA_H
+#define _ASM_S390_PCI_DMA_H
+
+/* I/O Translation Anchor (IOTA) */
+enum zpci_ioat_dtype {
+ ZPCI_IOTA_STO = 0,
+ ZPCI_IOTA_RTTO = 1,
+ ZPCI_IOTA_RSTO = 2,
+ ZPCI_IOTA_RFTO = 3,
+ ZPCI_IOTA_PFAA = 4,
+ ZPCI_IOTA_IOPFAA = 5,
+ ZPCI_IOTA_IOPTO = 7
+};
+
+#define ZPCI_IOTA_IOT_ENABLED 0x800UL
+#define ZPCI_IOTA_DT_ST (ZPCI_IOTA_STO << 2)
+#define ZPCI_IOTA_DT_RT (ZPCI_IOTA_RTTO << 2)
+#define ZPCI_IOTA_DT_RS (ZPCI_IOTA_RSTO << 2)
+#define ZPCI_IOTA_DT_RF (ZPCI_IOTA_RFTO << 2)
+#define ZPCI_IOTA_DT_PF (ZPCI_IOTA_PFAA << 2)
+#define ZPCI_IOTA_FS_4K 0
+#define ZPCI_IOTA_FS_1M 1
+#define ZPCI_IOTA_FS_2G 2
+#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
+
+#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
+#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
+#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
+#define ZPCI_IOTA_RFTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RF)
+#define ZPCI_IOTA_RFAA_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_PF | ZPCI_IOTA_FS_2G)
+
+/* I/O Region and segment tables */
+#define ZPCI_INDEX_MASK 0x7ffUL
+
+#define ZPCI_TABLE_TYPE_MASK 0xc
+#define ZPCI_TABLE_TYPE_RFX 0xc
+#define ZPCI_TABLE_TYPE_RSX 0x8
+#define ZPCI_TABLE_TYPE_RTX 0x4
+#define ZPCI_TABLE_TYPE_SX 0x0
+
+#define ZPCI_TABLE_LEN_RFX 0x3
+#define ZPCI_TABLE_LEN_RSX 0x3
+#define ZPCI_TABLE_LEN_RTX 0x3
+
+#define ZPCI_TABLE_OFFSET_MASK 0xc0
+#define ZPCI_TABLE_SIZE 0x4000
+#define ZPCI_TABLE_ALIGN ZPCI_TABLE_SIZE
+#define ZPCI_TABLE_ENTRY_SIZE (sizeof(unsigned long))
+#define ZPCI_TABLE_ENTRIES (ZPCI_TABLE_SIZE / ZPCI_TABLE_ENTRY_SIZE)
+
+#define ZPCI_TABLE_BITS 11
+#define ZPCI_PT_BITS 8
+#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + PAGE_SHIFT)
+#define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS)
+
+#define ZPCI_RTE_FLAG_MASK 0x3fffUL
+#define ZPCI_RTE_ADDR_MASK (~ZPCI_RTE_FLAG_MASK)
+#define ZPCI_STE_FLAG_MASK 0x7ffUL
+#define ZPCI_STE_ADDR_MASK (~ZPCI_STE_FLAG_MASK)
+
+/* I/O Page tables */
+#define ZPCI_PTE_VALID_MASK 0x400
+#define ZPCI_PTE_INVALID 0x400
+#define ZPCI_PTE_VALID 0x000
+#define ZPCI_PT_SIZE 0x800
+#define ZPCI_PT_ALIGN ZPCI_PT_SIZE
+#define ZPCI_PT_ENTRIES (ZPCI_PT_SIZE / ZPCI_TABLE_ENTRY_SIZE)
+#define ZPCI_PT_MASK (ZPCI_PT_ENTRIES - 1)
+
+#define ZPCI_PTE_FLAG_MASK 0xfffUL
+#define ZPCI_PTE_ADDR_MASK (~ZPCI_PTE_FLAG_MASK)
+
+/* Shared bits */
+#define ZPCI_TABLE_VALID 0x00
+#define ZPCI_TABLE_INVALID 0x20
+#define ZPCI_TABLE_PROTECTED 0x200
+#define ZPCI_TABLE_UNPROTECTED 0x000
+
+#define ZPCI_TABLE_VALID_MASK 0x20
+#define ZPCI_TABLE_PROT_MASK 0x200
+
+static inline unsigned int calc_rtx(dma_addr_t ptr)
+{
+ return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_sx(dma_addr_t ptr)
+{
+ return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_px(dma_addr_t ptr)
+{
+ return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
+}
+
+static inline void set_pt_pfaa(unsigned long *entry, void *pfaa)
+{
+ *entry &= ZPCI_PTE_FLAG_MASK;
+ *entry |= ((unsigned long) pfaa & ZPCI_PTE_ADDR_MASK);
+}
+
+static inline void set_rt_sto(unsigned long *entry, void *sto)
+{
+ *entry &= ZPCI_RTE_FLAG_MASK;
+ *entry |= ((unsigned long) sto & ZPCI_RTE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_RTX;
+}
+
+static inline void set_st_pto(unsigned long *entry, void *pto)
+{
+ *entry &= ZPCI_STE_FLAG_MASK;
+ *entry |= ((unsigned long) pto & ZPCI_STE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_SX;
+}
+
+static inline void validate_rt_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+ *entry |= ZPCI_TABLE_LEN_RTX;
+}
+
+static inline void validate_st_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+}
+
+static inline void invalidate_table_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry |= ZPCI_TABLE_INVALID;
+}
+
+static inline void invalidate_pt_entry(unsigned long *entry)
+{
+ WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
+ *entry &= ~ZPCI_PTE_VALID_MASK;
+ *entry |= ZPCI_PTE_INVALID;
+}
+
+static inline void validate_pt_entry(unsigned long *entry)
+{
+ WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
+ *entry &= ~ZPCI_PTE_VALID_MASK;
+ *entry |= ZPCI_PTE_VALID;
+}
+
+static inline void entry_set_protected(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_PROT_MASK;
+ *entry |= ZPCI_TABLE_PROTECTED;
+}
+
+static inline void entry_clr_protected(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_PROT_MASK;
+ *entry |= ZPCI_TABLE_UNPROTECTED;
+}
+
+static inline int reg_entry_isvalid(unsigned long entry)
+{
+ return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
+}
+
+static inline int pt_entry_isvalid(unsigned long entry)
+{
+ return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
+}
+
+static inline int entry_isprotected(unsigned long entry)
+{
+ return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED;
+}
+
+static inline unsigned long *get_rt_sto(unsigned long entry)
+{
+ return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
+ ? (unsigned long *) (entry & ZPCI_RTE_ADDR_MASK)
+ : NULL;
+}
+
+static inline unsigned long *get_st_pto(unsigned long entry)
+{
+ return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
+ ? (unsigned long *) (entry & ZPCI_STE_ADDR_MASK)
+ : NULL;
+}
+
+/* Prototypes */
+int zpci_dma_init_device(struct zpci_dev *);
+void zpci_dma_exit_device(struct zpci_dev *);
+
+#endif
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
new file mode 100644
index 000000000000..1486a98d5dad
--- /dev/null
+++ b/arch/s390/include/asm/pci_insn.h
@@ -0,0 +1,280 @@
+#ifndef _ASM_S390_PCI_INSN_H
+#define _ASM_S390_PCI_INSN_H
+
+#include <linux/delay.h>
+
+#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
+
+/* Load/Store status codes */
+#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
+#define ZPCI_PCI_ST_FUNC_IN_ERR 8
+#define ZPCI_PCI_ST_BLOCKED 12
+#define ZPCI_PCI_ST_INSUF_RES 16
+#define ZPCI_PCI_ST_INVAL_AS 20
+#define ZPCI_PCI_ST_FUNC_ALREADY_ENABLED 24
+#define ZPCI_PCI_ST_DMA_AS_NOT_ENABLED 28
+#define ZPCI_PCI_ST_2ND_OP_IN_INV_AS 36
+#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40
+#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44
+
+/* Load/Store return codes */
+#define ZPCI_PCI_LS_OK 0
+#define ZPCI_PCI_LS_ERR 1
+#define ZPCI_PCI_LS_BUSY 2
+#define ZPCI_PCI_LS_INVAL_HANDLE 3
+
+/* Load/Store address space identifiers */
+#define ZPCI_PCIAS_MEMIO_0 0
+#define ZPCI_PCIAS_MEMIO_1 1
+#define ZPCI_PCIAS_MEMIO_2 2
+#define ZPCI_PCIAS_MEMIO_3 3
+#define ZPCI_PCIAS_MEMIO_4 4
+#define ZPCI_PCIAS_MEMIO_5 5
+#define ZPCI_PCIAS_CFGSPC 15
+
+/* Modify PCI Function Controls */
+#define ZPCI_MOD_FC_REG_INT 2
+#define ZPCI_MOD_FC_DEREG_INT 3
+#define ZPCI_MOD_FC_REG_IOAT 4
+#define ZPCI_MOD_FC_DEREG_IOAT 5
+#define ZPCI_MOD_FC_REREG_IOAT 6
+#define ZPCI_MOD_FC_RESET_ERROR 7
+#define ZPCI_MOD_FC_RESET_BLOCK 9
+#define ZPCI_MOD_FC_SET_MEASURE 10
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* Function Information Block */
+struct zpci_fib {
+ u32 fmt : 8; /* format */
+ u32 : 24;
+ u32 reserved1;
+ u8 fc; /* function controls */
+ u8 reserved2;
+ u16 reserved3;
+ u32 reserved4;
+ u64 pba; /* PCI base address */
+ u64 pal; /* PCI address limit */
+ u64 iota; /* I/O Translation Anchor */
+ u32 : 1;
+ u32 isc : 3; /* Interrupt subclass */
+ u32 noi : 12; /* Number of interrupts */
+ u32 : 2;
+ u32 aibvo : 6; /* Adapter interrupt bit vector offset */
+ u32 sum : 1; /* Adapter int summary bit enabled */
+ u32 : 1;
+ u32 aisbo : 6; /* Adapter int summary bit offset */
+ u32 reserved5;
+ u64 aibv; /* Adapter int bit vector address */
+ u64 aisb; /* Adapter int summary bit address */
+ u64 fmb_addr; /* Function measurement block address and key */
+ u64 reserved6;
+ u64 reserved7;
+} __packed;
+
+/* Modify PCI Function Controls */
+static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
+{
+ u8 cc;
+
+ asm volatile (
+ " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
+ : : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+static inline int mpcifc_instr(u64 req, struct zpci_fib *fib)
+{
+ u8 cc, status;
+
+ do {
+ cc = __mpcifc(req, fib, &status);
+ if (cc == 2)
+ msleep(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
+ __func__, cc, status);
+ return (cc) ? -EIO : 0;
+}
+
+/* Refresh PCI Translations */
+static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
+{
+ register u64 __addr asm("2") = addr;
+ register u64 __range asm("3") = range;
+ u8 cc;
+
+ asm volatile (
+ " .insn rre,0xb9d30000,%[fn],%[addr]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [fn] "+d" (fn)
+ : [addr] "d" (__addr), "d" (__range)
+ : "cc");
+ *status = fn >> 24 & 0xff;
+ return cc;
+}
+
+static inline int rpcit_instr(u64 fn, u64 addr, u64 range)
+{
+ u8 cc, status;
+
+ do {
+ cc = __rpcit(fn, addr, range, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
+ __func__, cc, status, addr, range);
+ return (cc) ? -EIO : 0;
+}
+
+/* Store PCI function controls */
+static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status)
+{
+ u64 fn = (u64) handle << 32 | space << 16;
+ u8 cc;
+
+ asm volatile (
+ " .insn rxy,0xe300000000d4,%[fn],%[fib]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib)
+ : : "cc");
+ *status = fn >> 24 & 0xff;
+ return cc;
+}
+
+/* Set Interruption Controls */
+static inline void sic_instr(u16 ctl, char *unused, u8 isc)
+{
+ asm volatile (
+ " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
+ : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
+}
+
+/* PCI Load */
+static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ u64 __data;
+ u8 cc;
+
+ asm volatile (
+ " .insn rre,0xb9d20000,%[data],%[req]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req)
+ : "d" (__offset)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ *data = __data;
+ return cc;
+}
+
+static inline int pcilg_instr(u64 *data, u64 req, u64 offset)
+{
+ u8 cc, status;
+
+ do {
+ cc = __pcilg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc) {
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ /* TODO: on IO errors set data to 0xff...
+ * here or in users of pcilg (le conversion)?
+ */
+ }
+ return (cc) ? -EIO : 0;
+}
+
+/* PCI Store */
+static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ u8 cc;
+
+ asm volatile (
+ " .insn rre,0xb9d00000,%[data],%[req]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [req] "+d" (__req)
+ : "d" (__offset), [data] "d" (data)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ return cc;
+}
+
+static inline int pcistg_instr(u64 data, u64 req, u64 offset)
+{
+ u8 cc, status;
+
+ do {
+ cc = __pcistg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc) ? -EIO : 0;
+}
+
+/* PCI Store Block */
+static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
+{
+ u8 cc;
+
+ asm volatile (
+ " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [req] "+d" (req)
+ : [offset] "d" (offset), [data] "Q" (*data)
+ : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+static inline int pcistb_instr(const u64 *data, u64 req, u64 offset)
+{
+ u8 cc, status;
+
+ do {
+ cc = __pcistb(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc) ? -EIO : 0;
+}
+
+#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
new file mode 100644
index 000000000000..5fd81f31d6c7
--- /dev/null
+++ b/arch/s390/include/asm/pci_io.h
@@ -0,0 +1,194 @@
+#ifndef _ASM_S390_PCI_IO_H
+#define _ASM_S390_PCI_IO_H
+
+#ifdef CONFIG_PCI
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <asm/pci_insn.h>
+
+/* I/O Map */
+#define ZPCI_IOMAP_MAX_ENTRIES 0x7fff
+#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000ULL
+#define ZPCI_IOMAP_ADDR_IDX_MASK 0x7fff000000000000ULL
+#define ZPCI_IOMAP_ADDR_OFF_MASK 0x0000ffffffffffffULL
+
+struct zpci_iomap_entry {
+ u32 fh;
+ u8 bar;
+};
+
+extern struct zpci_iomap_entry *zpci_iomap_start;
+
+#define ZPCI_IDX(addr) \
+ (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> 48)
+#define ZPCI_OFFSET(addr) \
+ ((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
+
+#define ZPCI_CREATE_REQ(handle, space, len) \
+ ((u64) handle << 32 | space << 16 | len)
+
+#define zpci_read(LENGTH, RETTYPE) \
+static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
+{ \
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
+ u64 data; \
+ int rc; \
+ \
+ rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \
+ if (rc) \
+ data = -1ULL; \
+ return (RETTYPE) data; \
+}
+
+#define zpci_write(LENGTH, VALTYPE) \
+static inline void zpci_write_##VALTYPE(VALTYPE val, \
+ const volatile void __iomem *addr) \
+{ \
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
+ u64 data = (VALTYPE) val; \
+ \
+ pcistg_instr(data, req, ZPCI_OFFSET(addr)); \
+}
+
+zpci_read(8, u64)
+zpci_read(4, u32)
+zpci_read(2, u16)
+zpci_read(1, u8)
+zpci_write(8, u64)
+zpci_write(4, u32)
+zpci_write(2, u16)
+zpci_write(1, u8)
+
+static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
+{
+ u64 val;
+
+ switch (len) {
+ case 1:
+ val = (u64) *((u8 *) data);
+ break;
+ case 2:
+ val = (u64) *((u16 *) data);
+ break;
+ case 4:
+ val = (u64) *((u32 *) data);
+ break;
+ case 8:
+ val = (u64) *((u64 *) data);
+ break;
+ default:
+ val = 0; /* let FW report error */
+ break;
+ }
+ return pcistg_instr(val, req, offset);
+}
+
+static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
+{
+ u64 data;
+ u8 cc;
+
+ cc = pcilg_instr(&data, req, offset);
+ switch (len) {
+ case 1:
+ *((u8 *) dst) = (u8) data;
+ break;
+ case 2:
+ *((u16 *) dst) = (u16) data;
+ break;
+ case 4:
+ *((u32 *) dst) = (u32) data;
+ break;
+ case 8:
+ *((u64 *) dst) = (u64) data;
+ break;
+ }
+ return cc;
+}
+
+static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
+{
+ return pcistb_instr(data, req, offset);
+}
+
+static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
+{
+ int count = len > max ? max : len, size = 1;
+
+ while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
+ dst = dst >> 1;
+ src = src >> 1;
+ size = size << 1;
+ }
+ return size;
+}
+
+static inline int zpci_memcpy_fromio(void *dst,
+ const volatile void __iomem *src,
+ unsigned long n)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
+ u64 req, offset = ZPCI_OFFSET(src);
+ int size, rc = 0;
+
+ while (n > 0) {
+ size = zpci_get_max_write_size((u64) src, (u64) dst, n, 8);
+ req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
+ rc = zpci_read_single(req, dst, offset, size);
+ if (rc)
+ break;
+ offset += size;
+ dst += size;
+ n -= size;
+ }
+ return rc;
+}
+
+static inline int zpci_memcpy_toio(volatile void __iomem *dst,
+ const void *src, unsigned long n)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
+ u64 req, offset = ZPCI_OFFSET(dst);
+ int size, rc = 0;
+
+ if (!src)
+ return -EINVAL;
+
+ while (n > 0) {
+ size = zpci_get_max_write_size((u64) dst, (u64) src, n, 128);
+ req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
+
+ if (size > 8) /* main path */
+ rc = zpci_write_block(req, src, offset);
+ else
+ rc = zpci_write_single(req, src, offset, size);
+ if (rc)
+ break;
+ offset += size;
+ src += size;
+ n -= size;
+ }
+ return rc;
+}
+
+static inline int zpci_memset_io(volatile void __iomem *dst,
+ unsigned char val, size_t count)
+{
+ u8 *src = kmalloc(count, GFP_KERNEL);
+ int rc;
+
+ if (src == NULL)
+ return -ENOMEM;
+ memset(src, val, count);
+
+ rc = zpci_memcpy_toio(dst, src, count);
+ kfree(src);
+ return rc;
+}
+
+#endif /* CONFIG_PCI */
+
+#endif /* _ASM_S390_PCI_IO_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index c814e6f5b57d..c928dc1938f2 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -35,7 +35,6 @@
extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
extern void paging_init(void);
extern void vmem_map_init(void);
-extern void fault_init(void);
/*
* The S390 doesn't have any external MMU info: the kernel page
@@ -336,6 +335,8 @@ extern unsigned long MODULES_END;
#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
+#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
+
/* Bits in the segment table entry */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
@@ -435,6 +436,7 @@ static inline int pgd_bad(pgd_t pgd) { return 0; }
static inline int pud_present(pud_t pud) { return 1; }
static inline int pud_none(pud_t pud) { return 0; }
+static inline int pud_large(pud_t pud) { return 0; }
static inline int pud_bad(pud_t pud) { return 0; }
#else /* CONFIG_64BIT */
@@ -480,6 +482,13 @@ static inline int pud_none(pud_t pud)
return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
}
+static inline int pud_large(pud_t pud)
+{
+ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
+ return 0;
+ return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
+}
+
static inline int pud_bad(pud_t pud)
{
/*
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index e62a555557ee..833788693f09 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -55,5 +55,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info);
void sclp_get_ipl_info(struct sclp_ipl_info *info);
bool sclp_has_linemode(void);
bool sclp_has_vt220(void);
+int sclp_pci_configure(u32 fid);
+int sclp_pci_deconfigure(u32 fid);
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 9935cbd6a46f..05425b18c0aa 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -8,32 +8,34 @@ struct cpu;
#ifdef CONFIG_SCHED_BOOK
-extern unsigned char cpu_socket_id[NR_CPUS];
-#define topology_physical_package_id(cpu) (cpu_socket_id[cpu])
+struct cpu_topology_s390 {
+ unsigned short core_id;
+ unsigned short socket_id;
+ unsigned short book_id;
+ cpumask_t core_mask;
+ cpumask_t book_mask;
+};
+
+extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
+#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
+#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
+#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
-extern unsigned char cpu_core_id[NR_CPUS];
-extern cpumask_t cpu_core_map[NR_CPUS];
+#define mc_capable() 1
static inline const struct cpumask *cpu_coregroup_mask(int cpu)
{
- return &cpu_core_map[cpu];
+ return &cpu_topology[cpu].core_mask;
}
-#define topology_core_id(cpu) (cpu_core_id[cpu])
-#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
-#define mc_capable() (1)
-
-extern unsigned char cpu_book_id[NR_CPUS];
-extern cpumask_t cpu_book_map[NR_CPUS];
-
static inline const struct cpumask *cpu_book_mask(int cpu)
{
- return &cpu_book_map[cpu];
+ return &cpu_topology[cpu].book_mask;
}
-#define topology_book_id(cpu) (cpu_book_id[cpu])
-#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
-
int topology_cpu_init(struct cpu *);
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
diff --git a/arch/s390/include/asm/vga.h b/arch/s390/include/asm/vga.h
new file mode 100644
index 000000000000..d375526c261f
--- /dev/null
+++ b/arch/s390/include/asm/vga.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_S390_VGA_H
+#define _ASM_S390_VGA_H
+
+/* Avoid compile errors due to missing asm/vga.h */
+
+#endif /* _ASM_S390_VGA_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 4da52fe31743..2ac311ef5c9b 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
- sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o
+ sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index f00286bd2ef9..a7f9abd98cf2 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -83,22 +83,29 @@ enum {
U4_12, /* 4 bit unsigned value starting at 12 */
U4_16, /* 4 bit unsigned value starting at 16 */
U4_20, /* 4 bit unsigned value starting at 20 */
+ U4_24, /* 4 bit unsigned value starting at 24 */
+ U4_28, /* 4 bit unsigned value starting at 28 */
U4_32, /* 4 bit unsigned value starting at 32 */
+ U4_36, /* 4 bit unsigned value starting at 36 */
U8_8, /* 8 bit unsigned value starting at 8 */
U8_16, /* 8 bit unsigned value starting at 16 */
U8_24, /* 8 bit unsigned value starting at 24 */
U8_32, /* 8 bit unsigned value starting at 32 */
I8_8, /* 8 bit signed value starting at 8 */
I8_32, /* 8 bit signed value starting at 32 */
+ J12_12, /* PC relative offset at 12 */
I16_16, /* 16 bit signed value starting at 16 */
I16_32, /* 32 bit signed value starting at 16 */
U16_16, /* 16 bit unsigned value starting at 16 */
U16_32, /* 32 bit unsigned value starting at 16 */
J16_16, /* PC relative jump offset at 16 */
+ J16_32, /* PC relative offset at 16 */
+ I24_24, /* 24 bit signed value starting at 24 */
J32_16, /* PC relative long offset at 16 */
I32_16, /* 32 bit signed value starting at 16 */
U32_16, /* 32 bit unsigned value starting at 16 */
M_16, /* 4 bit optional mask starting at 16 */
+ M_20, /* 4 bit optional mask starting at 20 */
RO_28, /* optional GPR starting at position 28 */
};
@@ -109,6 +116,8 @@ enum {
enum {
INSTR_INVALID,
INSTR_E,
+ INSTR_IE_UU,
+ INSTR_MII_UPI,
INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU,
INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0,
INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP,
@@ -118,13 +127,15 @@ enum {
INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF,
INSTR_RRE_RR, INSTR_RRE_RR_OPT,
INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR,
- INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR,
- INSTR_RRF_R0RR2, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF,
- INSTR_RRF_U0RR, INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
+ INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_FUFF2, INSTR_RRF_M0RR,
+ INSTR_RRF_R0RR, INSTR_RRF_R0RR2, INSTR_RRF_RMRR, INSTR_RRF_RURR,
+ INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR, INSTR_RRF_UUFF,
+ INSTR_RRF_UUFR, INSTR_RRF_UURF,
+ INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD,
INSTR_RSI_RRP,
- INSTR_RSL_R0RD,
+ INSTR_RSL_LRDFU, INSTR_RSL_R0RD,
INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
INSTR_RSY_RDRM,
INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
@@ -136,6 +147,7 @@ enum {
INSTR_SIL_RDI, INSTR_SIL_RDU,
INSTR_SIY_IRD, INSTR_SIY_URD,
INSTR_SI_URD,
+ INSTR_SMI_U0RDP,
INSTR_SSE_RDRD,
INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2,
INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
@@ -191,31 +203,42 @@ static const struct operand operands[] =
[U4_12] = { 4, 12, 0 },
[U4_16] = { 4, 16, 0 },
[U4_20] = { 4, 20, 0 },
+ [U4_24] = { 4, 24, 0 },
+ [U4_28] = { 4, 28, 0 },
[U4_32] = { 4, 32, 0 },
+ [U4_36] = { 4, 36, 0 },
[U8_8] = { 8, 8, 0 },
[U8_16] = { 8, 16, 0 },
[U8_24] = { 8, 24, 0 },
[U8_32] = { 8, 32, 0 },
+ [J12_12] = { 12, 12, OPERAND_PCREL },
[I16_16] = { 16, 16, OPERAND_SIGNED },
[U16_16] = { 16, 16, 0 },
[U16_32] = { 16, 32, 0 },
[J16_16] = { 16, 16, OPERAND_PCREL },
+ [J16_32] = { 16, 32, OPERAND_PCREL },
[I16_32] = { 16, 32, OPERAND_SIGNED },
+ [I24_24] = { 24, 24, OPERAND_SIGNED },
[J32_16] = { 32, 16, OPERAND_PCREL },
[I32_16] = { 32, 16, OPERAND_SIGNED },
[U32_16] = { 32, 16, 0 },
[M_16] = { 4, 16, 0 },
+ [M_20] = { 4, 20, 0 },
[RO_28] = { 4, 28, OPERAND_GPR }
};
static const unsigned char formats[][7] = {
[INSTR_E] = { 0xff, 0,0,0,0,0,0 },
+ [INSTR_IE_UU] = { 0xff, U4_24,U4_28,0,0,0,0 },
+ [INSTR_MII_UPI] = { 0xff, U4_8,J12_12,I24_24 },
+ [INSTR_RIE_R0IU] = { 0xff, R_8,I16_16,U4_32,0,0,0 },
[INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 },
+ [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 },
[INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 },
[INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
[INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 },
[INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 },
- [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 },
+ [INSTR_RIE_RUPU] = { 0xff, R_8,U8_32,U4_12,J16_16,0,0 },
[INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 },
[INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 },
[INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 },
@@ -245,14 +268,18 @@ static const unsigned char formats[][7] = {
[INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 },
[INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 },
[INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },
+ [INSTR_RRF_FUFF2] = { 0xff, F_24,F_28,F_16,U4_20,0,0 },
[INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 },
[INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 },
[INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 },
+ [INSTR_RRF_RMRR] = { 0xff, R_24,R_16,R_28,M_20,0,0 },
[INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },
[INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 },
[INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 },
[INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 },
[INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 },
+ [INSTR_RRF_UUFR] = { 0xff, F_24,U4_16,R_28,U4_20,0,0 },
+ [INSTR_RRF_UURF] = { 0xff, R_24,U4_16,F_28,U4_20,0,0 },
[INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 },
[INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 },
[INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 },
@@ -264,12 +291,13 @@ static const unsigned char formats[][7] = {
[INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
[INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
[INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
+ [INSTR_RSL_LRDFU] = { 0xff, F_32,D_20,L4_8,B_16,U4_36,0 },
[INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 },
[INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
[INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
+ [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
[INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
[INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
- [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
[INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
[INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
[INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 },
@@ -289,9 +317,10 @@ static const unsigned char formats[][7] = {
[INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 },
[INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 },
[INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 },
+ [INSTR_SMI_U0RDP] = { 0xff, U4_8,J16_32,D_20,B_16,0,0 },
[INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 },
- [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
- [INSTR_SSF_RRDRD2]= { 0x00, R_8,D_20,B_16,D_36,B_32,0 },
+ [INSTR_SSF_RRDRD] = { 0x0f, D_20,B_16,D_36,B_32,R_8,0 },
+ [INSTR_SSF_RRDRD2]= { 0x0f, R_8,D_20,B_16,D_36,B_32,0 },
[INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
[INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
[INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
@@ -304,46 +333,157 @@ static const unsigned char formats[][7] = {
enum {
LONG_INSN_ALGHSIK,
+ LONG_INSN_ALHHHR,
+ LONG_INSN_ALHHLR,
LONG_INSN_ALHSIK,
+ LONG_INSN_ALSIHN,
+ LONG_INSN_CDFBRA,
+ LONG_INSN_CDGBRA,
+ LONG_INSN_CDGTRA,
+ LONG_INSN_CDLFBR,
+ LONG_INSN_CDLFTR,
+ LONG_INSN_CDLGBR,
+ LONG_INSN_CDLGTR,
+ LONG_INSN_CEFBRA,
+ LONG_INSN_CEGBRA,
+ LONG_INSN_CELFBR,
+ LONG_INSN_CELGBR,
+ LONG_INSN_CFDBRA,
+ LONG_INSN_CFEBRA,
+ LONG_INSN_CFXBRA,
+ LONG_INSN_CGDBRA,
+ LONG_INSN_CGDTRA,
+ LONG_INSN_CGEBRA,
+ LONG_INSN_CGXBRA,
+ LONG_INSN_CGXTRA,
+ LONG_INSN_CLFDBR,
+ LONG_INSN_CLFDTR,
+ LONG_INSN_CLFEBR,
LONG_INSN_CLFHSI,
+ LONG_INSN_CLFXBR,
+ LONG_INSN_CLFXTR,
+ LONG_INSN_CLGDBR,
+ LONG_INSN_CLGDTR,
+ LONG_INSN_CLGEBR,
LONG_INSN_CLGFRL,
LONG_INSN_CLGHRL,
LONG_INSN_CLGHSI,
+ LONG_INSN_CLGXBR,
+ LONG_INSN_CLGXTR,
LONG_INSN_CLHHSI,
+ LONG_INSN_CXFBRA,
+ LONG_INSN_CXGBRA,
+ LONG_INSN_CXGTRA,
+ LONG_INSN_CXLFBR,
+ LONG_INSN_CXLFTR,
+ LONG_INSN_CXLGBR,
+ LONG_INSN_CXLGTR,
+ LONG_INSN_FIDBRA,
+ LONG_INSN_FIEBRA,
+ LONG_INSN_FIXBRA,
+ LONG_INSN_LDXBRA,
+ LONG_INSN_LEDBRA,
+ LONG_INSN_LEXBRA,
+ LONG_INSN_LLGFAT,
LONG_INSN_LLGFRL,
LONG_INSN_LLGHRL,
+ LONG_INSN_LLGTAT,
LONG_INSN_POPCNT,
+ LONG_INSN_RIEMIT,
+ LONG_INSN_RINEXT,
+ LONG_INSN_RISBGN,
LONG_INSN_RISBHG,
LONG_INSN_RISBLG,
- LONG_INSN_RINEXT,
- LONG_INSN_RIEMIT,
+ LONG_INSN_SLHHHR,
+ LONG_INSN_SLHHLR,
LONG_INSN_TABORT,
LONG_INSN_TBEGIN,
LONG_INSN_TBEGINC,
+ LONG_INSN_PCISTG,
+ LONG_INSN_MPCIFC,
+ LONG_INSN_STPCIFC,
+ LONG_INSN_PCISTB,
};
static char *long_insn_name[] = {
[LONG_INSN_ALGHSIK] = "alghsik",
+ [LONG_INSN_ALHHHR] = "alhhhr",
+ [LONG_INSN_ALHHLR] = "alhhlr",
[LONG_INSN_ALHSIK] = "alhsik",
+ [LONG_INSN_ALSIHN] = "alsihn",
+ [LONG_INSN_CDFBRA] = "cdfbra",
+ [LONG_INSN_CDGBRA] = "cdgbra",
+ [LONG_INSN_CDGTRA] = "cdgtra",
+ [LONG_INSN_CDLFBR] = "cdlfbr",
+ [LONG_INSN_CDLFTR] = "cdlftr",
+ [LONG_INSN_CDLGBR] = "cdlgbr",
+ [LONG_INSN_CDLGTR] = "cdlgtr",
+ [LONG_INSN_CEFBRA] = "cefbra",
+ [LONG_INSN_CEGBRA] = "cegbra",
+ [LONG_INSN_CELFBR] = "celfbr",
+ [LONG_INSN_CELGBR] = "celgbr",
+ [LONG_INSN_CFDBRA] = "cfdbra",
+ [LONG_INSN_CFEBRA] = "cfebra",
+ [LONG_INSN_CFXBRA] = "cfxbra",
+ [LONG_INSN_CGDBRA] = "cgdbra",
+ [LONG_INSN_CGDTRA] = "cgdtra",
+ [LONG_INSN_CGEBRA] = "cgebra",
+ [LONG_INSN_CGXBRA] = "cgxbra",
+ [LONG_INSN_CGXTRA] = "cgxtra",
+ [LONG_INSN_CLFDBR] = "clfdbr",
+ [LONG_INSN_CLFDTR] = "clfdtr",
+ [LONG_INSN_CLFEBR] = "clfebr",
[LONG_INSN_CLFHSI] = "clfhsi",
+ [LONG_INSN_CLFXBR] = "clfxbr",
+ [LONG_INSN_CLFXTR] = "clfxtr",
+ [LONG_INSN_CLGDBR] = "clgdbr",
+ [LONG_INSN_CLGDTR] = "clgdtr",
+ [LONG_INSN_CLGEBR] = "clgebr",
[LONG_INSN_CLGFRL] = "clgfrl",
[LONG_INSN_CLGHRL] = "clghrl",
[LONG_INSN_CLGHSI] = "clghsi",
+ [LONG_INSN_CLGXBR] = "clgxbr",
+ [LONG_INSN_CLGXTR] = "clgxtr",
[LONG_INSN_CLHHSI] = "clhhsi",
+ [LONG_INSN_CXFBRA] = "cxfbra",
+ [LONG_INSN_CXGBRA] = "cxgbra",
+ [LONG_INSN_CXGTRA] = "cxgtra",
+ [LONG_INSN_CXLFBR] = "cxlfbr",
+ [LONG_INSN_CXLFTR] = "cxlftr",
+ [LONG_INSN_CXLGBR] = "cxlgbr",
+ [LONG_INSN_CXLGTR] = "cxlgtr",
+ [LONG_INSN_FIDBRA] = "fidbra",
+ [LONG_INSN_FIEBRA] = "fiebra",
+ [LONG_INSN_FIXBRA] = "fixbra",
+ [LONG_INSN_LDXBRA] = "ldxbra",
+ [LONG_INSN_LEDBRA] = "ledbra",
+ [LONG_INSN_LEXBRA] = "lexbra",
+ [LONG_INSN_LLGFAT] = "llgfat",
[LONG_INSN_LLGFRL] = "llgfrl",
[LONG_INSN_LLGHRL] = "llghrl",
+ [LONG_INSN_LLGTAT] = "llgtat",
[LONG_INSN_POPCNT] = "popcnt",
+ [LONG_INSN_RIEMIT] = "riemit",
+ [LONG_INSN_RINEXT] = "rinext",
+ [LONG_INSN_RISBGN] = "risbgn",
[LONG_INSN_RISBHG] = "risbhg",
[LONG_INSN_RISBLG] = "risblg",
- [LONG_INSN_RINEXT] = "rinext",
- [LONG_INSN_RIEMIT] = "riemit",
+ [LONG_INSN_SLHHHR] = "slhhhr",
+ [LONG_INSN_SLHHLR] = "slhhlr",
[LONG_INSN_TABORT] = "tabort",
[LONG_INSN_TBEGIN] = "tbegin",
[LONG_INSN_TBEGINC] = "tbeginc",
+ [LONG_INSN_PCISTG] = "pcistg",
+ [LONG_INSN_MPCIFC] = "mpcifc",
+ [LONG_INSN_STPCIFC] = "stpcifc",
+ [LONG_INSN_PCISTB] = "pcistb",
};
static struct insn opcode[] = {
#ifdef CONFIG_64BIT
+ { "bprp", 0xc5, INSTR_MII_UPI },
+ { "bpp", 0xc7, INSTR_SMI_U0RDP },
+ { "trtr", 0xd0, INSTR_SS_L0RDRD },
{ "lmd", 0xef, INSTR_SS_RRRDRD3 },
#endif
{ "spm", 0x04, INSTR_RR_R0 },
@@ -378,7 +518,6 @@ static struct insn opcode[] = {
{ "lcdr", 0x23, INSTR_RR_FF },
{ "hdr", 0x24, INSTR_RR_FF },
{ "ldxr", 0x25, INSTR_RR_FF },
- { "lrdr", 0x25, INSTR_RR_FF },
{ "mxr", 0x26, INSTR_RR_FF },
{ "mxdr", 0x27, INSTR_RR_FF },
{ "ldr", 0x28, INSTR_RR_FF },
@@ -395,7 +534,6 @@ static struct insn opcode[] = {
{ "lcer", 0x33, INSTR_RR_FF },
{ "her", 0x34, INSTR_RR_FF },
{ "ledr", 0x35, INSTR_RR_FF },
- { "lrer", 0x35, INSTR_RR_FF },
{ "axr", 0x36, INSTR_RR_FF },
{ "sxr", 0x37, INSTR_RR_FF },
{ "ler", 0x38, INSTR_RR_FF },
@@ -403,7 +541,6 @@ static struct insn opcode[] = {
{ "aer", 0x3a, INSTR_RR_FF },
{ "ser", 0x3b, INSTR_RR_FF },
{ "mder", 0x3c, INSTR_RR_FF },
- { "mer", 0x3c, INSTR_RR_FF },
{ "der", 0x3d, INSTR_RR_FF },
{ "aur", 0x3e, INSTR_RR_FF },
{ "sur", 0x3f, INSTR_RR_FF },
@@ -454,7 +591,6 @@ static struct insn opcode[] = {
{ "ae", 0x7a, INSTR_RX_FRRD },
{ "se", 0x7b, INSTR_RX_FRRD },
{ "mde", 0x7c, INSTR_RX_FRRD },
- { "me", 0x7c, INSTR_RX_FRRD },
{ "de", 0x7d, INSTR_RX_FRRD },
{ "au", 0x7e, INSTR_RX_FRRD },
{ "su", 0x7f, INSTR_RX_FRRD },
@@ -534,9 +670,9 @@ static struct insn opcode[] = {
static struct insn opcode_01[] = {
#ifdef CONFIG_64BIT
- { "sam64", 0x0e, INSTR_E },
- { "pfpo", 0x0a, INSTR_E },
{ "ptff", 0x04, INSTR_E },
+ { "pfpo", 0x0a, INSTR_E },
+ { "sam64", 0x0e, INSTR_E },
#endif
{ "pr", 0x01, INSTR_E },
{ "upt", 0x02, INSTR_E },
@@ -605,19 +741,28 @@ static struct insn opcode_aa[] = {
static struct insn opcode_b2[] = {
#ifdef CONFIG_64BIT
- { "sske", 0x2b, INSTR_RRF_M0RR },
{ "stckf", 0x7c, INSTR_S_RD },
- { "cu21", 0xa6, INSTR_RRF_M0RR },
- { "cuutf", 0xa6, INSTR_RRF_M0RR },
- { "cu12", 0xa7, INSTR_RRF_M0RR },
- { "cutfu", 0xa7, INSTR_RRF_M0RR },
+ { "lpp", 0x80, INSTR_S_RD },
+ { "lcctl", 0x84, INSTR_S_RD },
+ { "lpctl", 0x85, INSTR_S_RD },
+ { "qsi", 0x86, INSTR_S_RD },
+ { "lsctl", 0x87, INSTR_S_RD },
+ { "qctri", 0x8e, INSTR_S_RD },
{ "stfle", 0xb0, INSTR_S_RD },
{ "lpswe", 0xb2, INSTR_S_RD },
+ { "srnmb", 0xb8, INSTR_S_RD },
{ "srnmt", 0xb9, INSTR_S_RD },
{ "lfas", 0xbd, INSTR_S_RD },
- { "etndg", 0xec, INSTR_RRE_R0 },
+ { "scctr", 0xe0, INSTR_RRE_RR },
+ { "spctr", 0xe1, INSTR_RRE_RR },
+ { "ecctr", 0xe4, INSTR_RRE_RR },
+ { "epctr", 0xe5, INSTR_RRE_RR },
+ { "ppa", 0xe8, INSTR_RRF_U0RR },
+ { "etnd", 0xec, INSTR_RRE_R0 },
+ { "ecpga", 0xed, INSTR_RRE_RR },
+ { "tend", 0xf8, INSTR_S_00 },
+ { "niai", 0xfa, INSTR_IE_UU },
{ { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
- { "tend", 0xf8, INSTR_S_RD },
#endif
{ "stidp", 0x02, INSTR_S_RD },
{ "sck", 0x04, INSTR_S_RD },
@@ -635,8 +780,8 @@ static struct insn opcode_b2[] = {
{ "sie", 0x14, INSTR_S_RD },
{ "pc", 0x18, INSTR_S_RD },
{ "sac", 0x19, INSTR_S_RD },
- { "servc", 0x20, INSTR_RRE_RR },
{ "cfc", 0x1a, INSTR_S_RD },
+ { "servc", 0x20, INSTR_RRE_RR },
{ "ipte", 0x21, INSTR_RRE_RR },
{ "ipm", 0x22, INSTR_RRE_R0 },
{ "ivsk", 0x23, INSTR_RRE_RR },
@@ -647,9 +792,9 @@ static struct insn opcode_b2[] = {
{ "pt", 0x28, INSTR_RRE_RR },
{ "iske", 0x29, INSTR_RRE_RR },
{ "rrbe", 0x2a, INSTR_RRE_RR },
- { "sske", 0x2b, INSTR_RRE_RR },
+ { "sske", 0x2b, INSTR_RRF_M0RR },
{ "tb", 0x2c, INSTR_RRE_0R },
- { "dxr", 0x2d, INSTR_RRE_F0 },
+ { "dxr", 0x2d, INSTR_RRE_FF },
{ "pgin", 0x2e, INSTR_RRE_RR },
{ "pgout", 0x2f, INSTR_RRE_RR },
{ "csch", 0x30, INSTR_S_00 },
@@ -667,8 +812,8 @@ static struct insn opcode_b2[] = {
{ "schm", 0x3c, INSTR_S_00 },
{ "bakr", 0x40, INSTR_RRE_RR },
{ "cksm", 0x41, INSTR_RRE_RR },
- { "sqdr", 0x44, INSTR_RRE_F0 },
- { "sqer", 0x45, INSTR_RRE_F0 },
+ { "sqdr", 0x44, INSTR_RRE_FF },
+ { "sqer", 0x45, INSTR_RRE_FF },
{ "stura", 0x46, INSTR_RRE_RR },
{ "msta", 0x47, INSTR_RRE_R0 },
{ "palb", 0x48, INSTR_RRE_00 },
@@ -694,14 +839,14 @@ static struct insn opcode_b2[] = {
{ "rp", 0x77, INSTR_S_RD },
{ "stcke", 0x78, INSTR_S_RD },
{ "sacf", 0x79, INSTR_S_RD },
- { "spp", 0x80, INSTR_S_RD },
{ "stsi", 0x7d, INSTR_S_RD },
+ { "spp", 0x80, INSTR_S_RD },
{ "srnm", 0x99, INSTR_S_RD },
{ "stfpc", 0x9c, INSTR_S_RD },
{ "lfpc", 0x9d, INSTR_S_RD },
{ "tre", 0xa5, INSTR_RRE_RR },
- { "cuutf", 0xa6, INSTR_RRE_RR },
- { "cutfu", 0xa7, INSTR_RRE_RR },
+ { "cuutf", 0xa6, INSTR_RRF_M0RR },
+ { "cutfu", 0xa7, INSTR_RRF_M0RR },
{ "stfl", 0xb1, INSTR_S_RD },
{ "trap4", 0xff, INSTR_S_RD },
{ "", 0, INSTR_INVALID }
@@ -715,72 +860,87 @@ static struct insn opcode_b3[] = {
{ "myr", 0x3b, INSTR_RRF_F0FF },
{ "mayhr", 0x3c, INSTR_RRF_F0FF },
{ "myhr", 0x3d, INSTR_RRF_F0FF },
- { "cegbr", 0xa4, INSTR_RRE_RR },
- { "cdgbr", 0xa5, INSTR_RRE_RR },
- { "cxgbr", 0xa6, INSTR_RRE_RR },
- { "cgebr", 0xa8, INSTR_RRF_U0RF },
- { "cgdbr", 0xa9, INSTR_RRF_U0RF },
- { "cgxbr", 0xaa, INSTR_RRF_U0RF },
- { "cfer", 0xb8, INSTR_RRF_U0RF },
- { "cfdr", 0xb9, INSTR_RRF_U0RF },
- { "cfxr", 0xba, INSTR_RRF_U0RF },
- { "cegr", 0xc4, INSTR_RRE_RR },
- { "cdgr", 0xc5, INSTR_RRE_RR },
- { "cxgr", 0xc6, INSTR_RRE_RR },
- { "cger", 0xc8, INSTR_RRF_U0RF },
- { "cgdr", 0xc9, INSTR_RRF_U0RF },
- { "cgxr", 0xca, INSTR_RRF_U0RF },
{ "lpdfr", 0x70, INSTR_RRE_FF },
{ "lndfr", 0x71, INSTR_RRE_FF },
{ "cpsdr", 0x72, INSTR_RRF_F0FF2 },
{ "lcdfr", 0x73, INSTR_RRE_FF },
+ { "sfasr", 0x85, INSTR_RRE_R0 },
+ { { 0, LONG_INSN_CELFBR }, 0x90, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CDLFBR }, 0x91, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CXLFBR }, 0x92, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CEFBRA }, 0x94, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CDFBRA }, 0x95, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CXFBRA }, 0x96, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CFEBRA }, 0x98, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CFDBRA }, 0x99, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CFXBRA }, 0x9a, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CLFEBR }, 0x9c, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLFDBR }, 0x9d, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLFXBR }, 0x9e, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CELGBR }, 0xa0, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CDLGBR }, 0xa1, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CXLGBR }, 0xa2, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CEGBRA }, 0xa4, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CDGBRA }, 0xa5, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CXGBRA }, 0xa6, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CGEBRA }, 0xa8, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CGDBRA }, 0xa9, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CGXBRA }, 0xaa, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CLGEBR }, 0xac, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLGDBR }, 0xad, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLGXBR }, 0xae, INSTR_RRF_UUFR },
{ "ldgr", 0xc1, INSTR_RRE_FR },
+ { "cegr", 0xc4, INSTR_RRE_FR },
+ { "cdgr", 0xc5, INSTR_RRE_FR },
+ { "cxgr", 0xc6, INSTR_RRE_FR },
+ { "cger", 0xc8, INSTR_RRF_U0RF },
+ { "cgdr", 0xc9, INSTR_RRF_U0RF },
+ { "cgxr", 0xca, INSTR_RRF_U0RF },
{ "lgdr", 0xcd, INSTR_RRE_RF },
- { "adtr", 0xd2, INSTR_RRR_F0FF },
- { "axtr", 0xda, INSTR_RRR_F0FF },
- { "cdtr", 0xe4, INSTR_RRE_FF },
- { "cxtr", 0xec, INSTR_RRE_FF },
+ { "mdtra", 0xd0, INSTR_RRF_FUFF2 },
+ { "ddtra", 0xd1, INSTR_RRF_FUFF2 },
+ { "adtra", 0xd2, INSTR_RRF_FUFF2 },
+ { "sdtra", 0xd3, INSTR_RRF_FUFF2 },
+ { "ldetr", 0xd4, INSTR_RRF_0UFF },
+ { "ledtr", 0xd5, INSTR_RRF_UUFF },
+ { "ltdtr", 0xd6, INSTR_RRE_FF },
+ { "fidtr", 0xd7, INSTR_RRF_UUFF },
+ { "mxtra", 0xd8, INSTR_RRF_FUFF2 },
+ { "dxtra", 0xd9, INSTR_RRF_FUFF2 },
+ { "axtra", 0xda, INSTR_RRF_FUFF2 },
+ { "sxtra", 0xdb, INSTR_RRF_FUFF2 },
+ { "lxdtr", 0xdc, INSTR_RRF_0UFF },
+ { "ldxtr", 0xdd, INSTR_RRF_UUFF },
+ { "ltxtr", 0xde, INSTR_RRE_FF },
+ { "fixtr", 0xdf, INSTR_RRF_UUFF },
{ "kdtr", 0xe0, INSTR_RRE_FF },
- { "kxtr", 0xe8, INSTR_RRE_FF },
- { "cedtr", 0xf4, INSTR_RRE_FF },
- { "cextr", 0xfc, INSTR_RRE_FF },
- { "cdgtr", 0xf1, INSTR_RRE_FR },
- { "cxgtr", 0xf9, INSTR_RRE_FR },
- { "cdstr", 0xf3, INSTR_RRE_FR },
- { "cxstr", 0xfb, INSTR_RRE_FR },
- { "cdutr", 0xf2, INSTR_RRE_FR },
- { "cxutr", 0xfa, INSTR_RRE_FR },
- { "cgdtr", 0xe1, INSTR_RRF_U0RF },
- { "cgxtr", 0xe9, INSTR_RRF_U0RF },
- { "csdtr", 0xe3, INSTR_RRE_RF },
- { "csxtr", 0xeb, INSTR_RRE_RF },
+ { { 0, LONG_INSN_CGDTRA }, 0xe1, INSTR_RRF_UURF },
{ "cudtr", 0xe2, INSTR_RRE_RF },
- { "cuxtr", 0xea, INSTR_RRE_RF },
- { "ddtr", 0xd1, INSTR_RRR_F0FF },
- { "dxtr", 0xd9, INSTR_RRR_F0FF },
+ { "csdtr", 0xe3, INSTR_RRE_RF },
+ { "cdtr", 0xe4, INSTR_RRE_FF },
{ "eedtr", 0xe5, INSTR_RRE_RF },
- { "eextr", 0xed, INSTR_RRE_RF },
{ "esdtr", 0xe7, INSTR_RRE_RF },
+ { "kxtr", 0xe8, INSTR_RRE_FF },
+ { { 0, LONG_INSN_CGXTRA }, 0xe9, INSTR_RRF_UUFR },
+ { "cuxtr", 0xea, INSTR_RRE_RF },
+ { "csxtr", 0xeb, INSTR_RRE_RF },
+ { "cxtr", 0xec, INSTR_RRE_FF },
+ { "eextr", 0xed, INSTR_RRE_RF },
{ "esxtr", 0xef, INSTR_RRE_RF },
- { "iedtr", 0xf6, INSTR_RRF_F0FR },
- { "iextr", 0xfe, INSTR_RRF_F0FR },
- { "ltdtr", 0xd6, INSTR_RRE_FF },
- { "ltxtr", 0xde, INSTR_RRE_FF },
- { "fidtr", 0xd7, INSTR_RRF_UUFF },
- { "fixtr", 0xdf, INSTR_RRF_UUFF },
- { "ldetr", 0xd4, INSTR_RRF_0UFF },
- { "lxdtr", 0xdc, INSTR_RRF_0UFF },
- { "ledtr", 0xd5, INSTR_RRF_UUFF },
- { "ldxtr", 0xdd, INSTR_RRF_UUFF },
- { "mdtr", 0xd0, INSTR_RRR_F0FF },
- { "mxtr", 0xd8, INSTR_RRR_F0FF },
+ { { 0, LONG_INSN_CDGTRA }, 0xf1, INSTR_RRF_UUFR },
+ { "cdutr", 0xf2, INSTR_RRE_FR },
+ { "cdstr", 0xf3, INSTR_RRE_FR },
+ { "cedtr", 0xf4, INSTR_RRE_FF },
{ "qadtr", 0xf5, INSTR_RRF_FUFF },
- { "qaxtr", 0xfd, INSTR_RRF_FUFF },
+ { "iedtr", 0xf6, INSTR_RRF_F0FR },
{ "rrdtr", 0xf7, INSTR_RRF_FFRU },
+ { { 0, LONG_INSN_CXGTRA }, 0xf9, INSTR_RRF_UURF },
+ { "cxutr", 0xfa, INSTR_RRE_FR },
+ { "cxstr", 0xfb, INSTR_RRE_FR },
+ { "cextr", 0xfc, INSTR_RRE_FF },
+ { "qaxtr", 0xfd, INSTR_RRF_FUFF },
+ { "iextr", 0xfe, INSTR_RRF_F0FR },
{ "rrxtr", 0xff, INSTR_RRF_FFRU },
- { "sfasr", 0x85, INSTR_RRE_R0 },
- { "sdtr", 0xd3, INSTR_RRR_F0FF },
- { "sxtr", 0xdb, INSTR_RRR_F0FF },
#endif
{ "lpebr", 0x00, INSTR_RRE_FF },
{ "lnebr", 0x01, INSTR_RRE_FF },
@@ -827,10 +987,10 @@ static struct insn opcode_b3[] = {
{ "lnxbr", 0x41, INSTR_RRE_FF },
{ "ltxbr", 0x42, INSTR_RRE_FF },
{ "lcxbr", 0x43, INSTR_RRE_FF },
- { "ledbr", 0x44, INSTR_RRE_FF },
- { "ldxbr", 0x45, INSTR_RRE_FF },
- { "lexbr", 0x46, INSTR_RRE_FF },
- { "fixbr", 0x47, INSTR_RRF_U0FF },
+ { { 0, LONG_INSN_LEDBRA }, 0x44, INSTR_RRF_UUFF },
+ { { 0, LONG_INSN_LDXBRA }, 0x45, INSTR_RRF_UUFF },
+ { { 0, LONG_INSN_LEXBRA }, 0x46, INSTR_RRF_UUFF },
+ { { 0, LONG_INSN_FIXBRA }, 0x47, INSTR_RRF_UUFF },
{ "kxbr", 0x48, INSTR_RRE_FF },
{ "cxbr", 0x49, INSTR_RRE_FF },
{ "axbr", 0x4a, INSTR_RRE_FF },
@@ -840,24 +1000,24 @@ static struct insn opcode_b3[] = {
{ "tbedr", 0x50, INSTR_RRF_U0FF },
{ "tbdr", 0x51, INSTR_RRF_U0FF },
{ "diebr", 0x53, INSTR_RRF_FUFF },
- { "fiebr", 0x57, INSTR_RRF_U0FF },
- { "thder", 0x58, INSTR_RRE_RR },
- { "thdr", 0x59, INSTR_RRE_RR },
+ { { 0, LONG_INSN_FIEBRA }, 0x57, INSTR_RRF_UUFF },
+ { "thder", 0x58, INSTR_RRE_FF },
+ { "thdr", 0x59, INSTR_RRE_FF },
{ "didbr", 0x5b, INSTR_RRF_FUFF },
- { "fidbr", 0x5f, INSTR_RRF_U0FF },
+ { { 0, LONG_INSN_FIDBRA }, 0x5f, INSTR_RRF_UUFF },
{ "lpxr", 0x60, INSTR_RRE_FF },
{ "lnxr", 0x61, INSTR_RRE_FF },
{ "ltxr", 0x62, INSTR_RRE_FF },
{ "lcxr", 0x63, INSTR_RRE_FF },
- { "lxr", 0x65, INSTR_RRE_RR },
+ { "lxr", 0x65, INSTR_RRE_FF },
{ "lexr", 0x66, INSTR_RRE_FF },
- { "fixr", 0x67, INSTR_RRF_U0FF },
+ { "fixr", 0x67, INSTR_RRE_FF },
{ "cxr", 0x69, INSTR_RRE_FF },
- { "lzer", 0x74, INSTR_RRE_R0 },
- { "lzdr", 0x75, INSTR_RRE_R0 },
- { "lzxr", 0x76, INSTR_RRE_R0 },
- { "fier", 0x77, INSTR_RRF_U0FF },
- { "fidr", 0x7f, INSTR_RRF_U0FF },
+ { "lzer", 0x74, INSTR_RRE_F0 },
+ { "lzdr", 0x75, INSTR_RRE_F0 },
+ { "lzxr", 0x76, INSTR_RRE_F0 },
+ { "fier", 0x77, INSTR_RRE_FF },
+ { "fidr", 0x7f, INSTR_RRE_FF },
{ "sfpc", 0x84, INSTR_RRE_RR_OPT },
{ "efpc", 0x8c, INSTR_RRE_RR_OPT },
{ "cefbr", 0x94, INSTR_RRE_RF },
@@ -866,9 +1026,12 @@ static struct insn opcode_b3[] = {
{ "cfebr", 0x98, INSTR_RRF_U0RF },
{ "cfdbr", 0x99, INSTR_RRF_U0RF },
{ "cfxbr", 0x9a, INSTR_RRF_U0RF },
- { "cefr", 0xb4, INSTR_RRE_RF },
- { "cdfr", 0xb5, INSTR_RRE_RF },
- { "cxfr", 0xb6, INSTR_RRE_RF },
+ { "cefr", 0xb4, INSTR_RRE_FR },
+ { "cdfr", 0xb5, INSTR_RRE_FR },
+ { "cxfr", 0xb6, INSTR_RRE_FR },
+ { "cfer", 0xb8, INSTR_RRF_U0RF },
+ { "cfdr", 0xb9, INSTR_RRF_U0RF },
+ { "cfxr", 0xba, INSTR_RRF_U0RF },
{ "", 0, INSTR_INVALID }
};
@@ -910,7 +1073,23 @@ static struct insn opcode_b9[] = {
{ "lhr", 0x27, INSTR_RRE_RR },
{ "cgfr", 0x30, INSTR_RRE_RR },
{ "clgfr", 0x31, INSTR_RRE_RR },
+ { "cfdtr", 0x41, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLGDTR }, 0x42, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLFDTR }, 0x43, INSTR_RRF_UURF },
{ "bctgr", 0x46, INSTR_RRE_RR },
+ { "cfxtr", 0x49, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLGXTR }, 0x4a, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CLFXTR }, 0x4b, INSTR_RRF_UUFR },
+ { "cdftr", 0x51, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CDLGTR }, 0x52, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CDLFTR }, 0x53, INSTR_RRF_UUFR },
+ { "cxftr", 0x59, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CXLGTR }, 0x5a, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CXLFTR }, 0x5b, INSTR_RRF_UUFR },
+ { "cgrt", 0x60, INSTR_RRF_U0RR },
+ { "clgrt", 0x61, INSTR_RRF_U0RR },
+ { "crt", 0x72, INSTR_RRF_U0RR },
+ { "clrt", 0x73, INSTR_RRF_U0RR },
{ "ngr", 0x80, INSTR_RRE_RR },
{ "ogr", 0x81, INSTR_RRE_RR },
{ "xgr", 0x82, INSTR_RRE_RR },
@@ -923,32 +1102,34 @@ static struct insn opcode_b9[] = {
{ "slbgr", 0x89, INSTR_RRE_RR },
{ "cspg", 0x8a, INSTR_RRE_RR },
{ "idte", 0x8e, INSTR_RRF_R0RR },
+ { "crdte", 0x8f, INSTR_RRF_RMRR },
{ "llcr", 0x94, INSTR_RRE_RR },
{ "llhr", 0x95, INSTR_RRE_RR },
{ "esea", 0x9d, INSTR_RRE_R0 },
+ { "ptf", 0xa2, INSTR_RRE_R0 },
{ "lptea", 0xaa, INSTR_RRF_RURR },
+ { "rrbm", 0xae, INSTR_RRE_RR },
+ { "pfmf", 0xaf, INSTR_RRE_RR },
{ "cu14", 0xb0, INSTR_RRF_M0RR },
{ "cu24", 0xb1, INSTR_RRF_M0RR },
- { "cu41", 0xb2, INSTR_RRF_M0RR },
- { "cu42", 0xb3, INSTR_RRF_M0RR },
- { "crt", 0x72, INSTR_RRF_U0RR },
- { "cgrt", 0x60, INSTR_RRF_U0RR },
- { "clrt", 0x73, INSTR_RRF_U0RR },
- { "clgrt", 0x61, INSTR_RRF_U0RR },
- { "ptf", 0xa2, INSTR_RRE_R0 },
- { "pfmf", 0xaf, INSTR_RRE_RR },
- { "trte", 0xbf, INSTR_RRF_M0RR },
+ { "cu41", 0xb2, INSTR_RRE_RR },
+ { "cu42", 0xb3, INSTR_RRE_RR },
{ "trtre", 0xbd, INSTR_RRF_M0RR },
+ { "srstu", 0xbe, INSTR_RRE_RR },
+ { "trte", 0xbf, INSTR_RRF_M0RR },
{ "ahhhr", 0xc8, INSTR_RRF_R0RR2 },
{ "shhhr", 0xc9, INSTR_RRF_R0RR2 },
- { "alhhh", 0xca, INSTR_RRF_R0RR2 },
- { "alhhl", 0xca, INSTR_RRF_R0RR2 },
- { "slhhh", 0xcb, INSTR_RRF_R0RR2 },
- { "chhr ", 0xcd, INSTR_RRE_RR },
+ { { 0, LONG_INSN_ALHHHR }, 0xca, INSTR_RRF_R0RR2 },
+ { { 0, LONG_INSN_SLHHHR }, 0xcb, INSTR_RRF_R0RR2 },
+ { "chhr", 0xcd, INSTR_RRE_RR },
{ "clhhr", 0xcf, INSTR_RRE_RR },
+ { { 0, LONG_INSN_PCISTG }, 0xd0, INSTR_RRE_RR },
+ { "pcilg", 0xd2, INSTR_RRE_RR },
+ { "rpcit", 0xd3, INSTR_RRE_RR },
{ "ahhlr", 0xd8, INSTR_RRF_R0RR2 },
{ "shhlr", 0xd9, INSTR_RRF_R0RR2 },
- { "slhhl", 0xdb, INSTR_RRF_R0RR2 },
+ { { 0, LONG_INSN_ALHHLR }, 0xda, INSTR_RRF_R0RR2 },
+ { { 0, LONG_INSN_SLHHLR }, 0xdb, INSTR_RRF_R0RR2 },
{ "chlr", 0xdd, INSTR_RRE_RR },
{ "clhlr", 0xdf, INSTR_RRE_RR },
{ { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR },
@@ -976,13 +1157,9 @@ static struct insn opcode_b9[] = {
{ "kimd", 0x3e, INSTR_RRE_RR },
{ "klmd", 0x3f, INSTR_RRE_RR },
{ "epsw", 0x8d, INSTR_RRE_RR },
- { "trtt", 0x90, INSTR_RRE_RR },
{ "trtt", 0x90, INSTR_RRF_M0RR },
- { "trto", 0x91, INSTR_RRE_RR },
{ "trto", 0x91, INSTR_RRF_M0RR },
- { "trot", 0x92, INSTR_RRE_RR },
{ "trot", 0x92, INSTR_RRF_M0RR },
- { "troo", 0x93, INSTR_RRE_RR },
{ "troo", 0x93, INSTR_RRF_M0RR },
{ "mlr", 0x96, INSTR_RRE_RR },
{ "dlr", 0x97, INSTR_RRE_RR },
@@ -1013,6 +1190,8 @@ static struct insn opcode_c0[] = {
static struct insn opcode_c2[] = {
#ifdef CONFIG_64BIT
+ { "msgfi", 0x00, INSTR_RIL_RI },
+ { "msfi", 0x01, INSTR_RIL_RI },
{ "slgfi", 0x04, INSTR_RIL_RU },
{ "slfi", 0x05, INSTR_RIL_RU },
{ "agfi", 0x08, INSTR_RIL_RI },
@@ -1023,43 +1202,41 @@ static struct insn opcode_c2[] = {
{ "cfi", 0x0d, INSTR_RIL_RI },
{ "clgfi", 0x0e, INSTR_RIL_RU },
{ "clfi", 0x0f, INSTR_RIL_RU },
- { "msfi", 0x01, INSTR_RIL_RI },
- { "msgfi", 0x00, INSTR_RIL_RI },
#endif
{ "", 0, INSTR_INVALID }
};
static struct insn opcode_c4[] = {
#ifdef CONFIG_64BIT
- { "lrl", 0x0d, INSTR_RIL_RP },
+ { "llhrl", 0x02, INSTR_RIL_RP },
+ { "lghrl", 0x04, INSTR_RIL_RP },
+ { "lhrl", 0x05, INSTR_RIL_RP },
+ { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP },
+ { "sthrl", 0x07, INSTR_RIL_RP },
{ "lgrl", 0x08, INSTR_RIL_RP },
+ { "stgrl", 0x0b, INSTR_RIL_RP },
{ "lgfrl", 0x0c, INSTR_RIL_RP },
- { "lhrl", 0x05, INSTR_RIL_RP },
- { "lghrl", 0x04, INSTR_RIL_RP },
+ { "lrl", 0x0d, INSTR_RIL_RP },
{ { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP },
- { "llhrl", 0x02, INSTR_RIL_RP },
- { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP },
{ "strl", 0x0f, INSTR_RIL_RP },
- { "stgrl", 0x0b, INSTR_RIL_RP },
- { "sthrl", 0x07, INSTR_RIL_RP },
#endif
{ "", 0, INSTR_INVALID }
};
static struct insn opcode_c6[] = {
#ifdef CONFIG_64BIT
- { "crl", 0x0d, INSTR_RIL_RP },
- { "cgrl", 0x08, INSTR_RIL_RP },
- { "cgfrl", 0x0c, INSTR_RIL_RP },
- { "chrl", 0x05, INSTR_RIL_RP },
+ { "exrl", 0x00, INSTR_RIL_RP },
+ { "pfdrl", 0x02, INSTR_RIL_UP },
{ "cghrl", 0x04, INSTR_RIL_RP },
- { "clrl", 0x0f, INSTR_RIL_RP },
+ { "chrl", 0x05, INSTR_RIL_RP },
+ { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP },
+ { "clhrl", 0x07, INSTR_RIL_RP },
+ { "cgrl", 0x08, INSTR_RIL_RP },
{ "clgrl", 0x0a, INSTR_RIL_RP },
+ { "cgfrl", 0x0c, INSTR_RIL_RP },
+ { "crl", 0x0d, INSTR_RIL_RP },
{ { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP },
- { "clhrl", 0x07, INSTR_RIL_RP },
- { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP },
- { "pfdrl", 0x02, INSTR_RIL_UP },
- { "exrl", 0x00, INSTR_RIL_RP },
+ { "clrl", 0x0f, INSTR_RIL_RP },
#endif
{ "", 0, INSTR_INVALID }
};
@@ -1070,7 +1247,7 @@ static struct insn opcode_c8[] = {
{ "ectg", 0x01, INSTR_SSF_RRDRD },
{ "csst", 0x02, INSTR_SSF_RRDRD },
{ "lpd", 0x04, INSTR_SSF_RRDRD2 },
- { "lpdg ", 0x05, INSTR_SSF_RRDRD2 },
+ { "lpdg", 0x05, INSTR_SSF_RRDRD2 },
#endif
{ "", 0, INSTR_INVALID }
};
@@ -1080,9 +1257,9 @@ static struct insn opcode_cc[] = {
{ "brcth", 0x06, INSTR_RIL_RP },
{ "aih", 0x08, INSTR_RIL_RI },
{ "alsih", 0x0a, INSTR_RIL_RI },
- { "alsih", 0x0b, INSTR_RIL_RI },
+ { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI },
{ "cih", 0x0d, INSTR_RIL_RI },
- { "clih ", 0x0f, INSTR_RIL_RI },
+ { "clih", 0x0f, INSTR_RIL_RI },
#endif
{ "", 0, INSTR_INVALID }
};
@@ -1116,11 +1293,15 @@ static struct insn opcode_e3[] = {
{ "cg", 0x20, INSTR_RXY_RRRD },
{ "clg", 0x21, INSTR_RXY_RRRD },
{ "stg", 0x24, INSTR_RXY_RRRD },
+ { "ntstg", 0x25, INSTR_RXY_RRRD },
{ "cvdy", 0x26, INSTR_RXY_RRRD },
{ "cvdg", 0x2e, INSTR_RXY_RRRD },
{ "strvg", 0x2f, INSTR_RXY_RRRD },
{ "cgf", 0x30, INSTR_RXY_RRRD },
{ "clgf", 0x31, INSTR_RXY_RRRD },
+ { "ltgf", 0x32, INSTR_RXY_RRRD },
+ { "cgh", 0x34, INSTR_RXY_RRRD },
+ { "pfd", 0x36, INSTR_RXY_URRD },
{ "strvh", 0x3f, INSTR_RXY_RRRD },
{ "bctg", 0x46, INSTR_RXY_RRRD },
{ "sty", 0x50, INSTR_RXY_RRRD },
@@ -1133,21 +1314,25 @@ static struct insn opcode_e3[] = {
{ "cy", 0x59, INSTR_RXY_RRRD },
{ "ay", 0x5a, INSTR_RXY_RRRD },
{ "sy", 0x5b, INSTR_RXY_RRRD },
+ { "mfy", 0x5c, INSTR_RXY_RRRD },
{ "aly", 0x5e, INSTR_RXY_RRRD },
{ "sly", 0x5f, INSTR_RXY_RRRD },
{ "sthy", 0x70, INSTR_RXY_RRRD },
{ "lay", 0x71, INSTR_RXY_RRRD },
{ "stcy", 0x72, INSTR_RXY_RRRD },
{ "icy", 0x73, INSTR_RXY_RRRD },
+ { "laey", 0x75, INSTR_RXY_RRRD },
{ "lb", 0x76, INSTR_RXY_RRRD },
{ "lgb", 0x77, INSTR_RXY_RRRD },
{ "lhy", 0x78, INSTR_RXY_RRRD },
{ "chy", 0x79, INSTR_RXY_RRRD },
{ "ahy", 0x7a, INSTR_RXY_RRRD },
{ "shy", 0x7b, INSTR_RXY_RRRD },
+ { "mhy", 0x7c, INSTR_RXY_RRRD },
{ "ng", 0x80, INSTR_RXY_RRRD },
{ "og", 0x81, INSTR_RXY_RRRD },
{ "xg", 0x82, INSTR_RXY_RRRD },
+ { "lgat", 0x85, INSTR_RXY_RRRD },
{ "mlg", 0x86, INSTR_RXY_RRRD },
{ "dlg", 0x87, INSTR_RXY_RRRD },
{ "alcg", 0x88, INSTR_RXY_RRRD },
@@ -1158,23 +1343,22 @@ static struct insn opcode_e3[] = {
{ "llgh", 0x91, INSTR_RXY_RRRD },
{ "llc", 0x94, INSTR_RXY_RRRD },
{ "llh", 0x95, INSTR_RXY_RRRD },
- { "cgh", 0x34, INSTR_RXY_RRRD },
- { "laey", 0x75, INSTR_RXY_RRRD },
- { "ltgf", 0x32, INSTR_RXY_RRRD },
- { "mfy", 0x5c, INSTR_RXY_RRRD },
- { "mhy", 0x7c, INSTR_RXY_RRRD },
- { "pfd", 0x36, INSTR_RXY_URRD },
+ { { 0, LONG_INSN_LLGTAT }, 0x9c, INSTR_RXY_RRRD },
+ { { 0, LONG_INSN_LLGFAT }, 0x9d, INSTR_RXY_RRRD },
+ { "lat", 0x9f, INSTR_RXY_RRRD },
{ "lbh", 0xc0, INSTR_RXY_RRRD },
{ "llch", 0xc2, INSTR_RXY_RRRD },
{ "stch", 0xc3, INSTR_RXY_RRRD },
{ "lhh", 0xc4, INSTR_RXY_RRRD },
{ "llhh", 0xc6, INSTR_RXY_RRRD },
{ "sthh", 0xc7, INSTR_RXY_RRRD },
+ { "lfhat", 0xc8, INSTR_RXY_RRRD },
{ "lfh", 0xca, INSTR_RXY_RRRD },
{ "stfh", 0xcb, INSTR_RXY_RRRD },
{ "chf", 0xcd, INSTR_RXY_RRRD },
{ "clhf", 0xcf, INSTR_RXY_RRRD },
- { "ntstg", 0x25, INSTR_RXY_RRRD },
+ { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD },
+ { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD },
#endif
{ "lrv", 0x1e, INSTR_RXY_RRRD },
{ "lrvh", 0x1f, INSTR_RXY_RRRD },
@@ -1189,15 +1373,15 @@ static struct insn opcode_e3[] = {
static struct insn opcode_e5[] = {
#ifdef CONFIG_64BIT
{ "strag", 0x02, INSTR_SSE_RDRD },
+ { "mvhhi", 0x44, INSTR_SIL_RDI },
+ { "mvghi", 0x48, INSTR_SIL_RDI },
+ { "mvhi", 0x4c, INSTR_SIL_RDI },
{ "chhsi", 0x54, INSTR_SIL_RDI },
- { "chsi", 0x5c, INSTR_SIL_RDI },
- { "cghsi", 0x58, INSTR_SIL_RDI },
{ { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU },
- { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
+ { "cghsi", 0x58, INSTR_SIL_RDI },
{ { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU },
- { "mvhhi", 0x44, INSTR_SIL_RDI },
- { "mvhi", 0x4c, INSTR_SIL_RDI },
- { "mvghi", 0x48, INSTR_SIL_RDI },
+ { "chsi", 0x5c, INSTR_SIL_RDI },
+ { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
{ { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU },
{ { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU },
#endif
@@ -1220,9 +1404,11 @@ static struct insn opcode_eb[] = {
{ "rllg", 0x1c, INSTR_RSY_RRRD },
{ "clmh", 0x20, INSTR_RSY_RURD },
{ "clmy", 0x21, INSTR_RSY_RURD },
+ { "clt", 0x23, INSTR_RSY_RURD },
{ "stmg", 0x24, INSTR_RSY_RRRD },
{ "stctg", 0x25, INSTR_RSY_CCRD },
{ "stmh", 0x26, INSTR_RSY_RRRD },
+ { "clgt", 0x2b, INSTR_RSY_RURD },
{ "stcmh", 0x2c, INSTR_RSY_RURD },
{ "stcmy", 0x2d, INSTR_RSY_RURD },
{ "lctlg", 0x2f, INSTR_RSY_CCRD },
@@ -1231,16 +1417,17 @@ static struct insn opcode_eb[] = {
{ "cdsg", 0x3e, INSTR_RSY_RRRD },
{ "bxhg", 0x44, INSTR_RSY_RRRD },
{ "bxleg", 0x45, INSTR_RSY_RRRD },
+ { "ecag", 0x4c, INSTR_RSY_RRRD },
{ "tmy", 0x51, INSTR_SIY_URD },
{ "mviy", 0x52, INSTR_SIY_URD },
{ "niy", 0x54, INSTR_SIY_URD },
{ "cliy", 0x55, INSTR_SIY_URD },
{ "oiy", 0x56, INSTR_SIY_URD },
{ "xiy", 0x57, INSTR_SIY_URD },
- { "lric", 0x60, INSTR_RSY_RDRM },
- { "stric", 0x61, INSTR_RSY_RDRM },
- { "mric", 0x62, INSTR_RSY_RDRM },
- { "icmh", 0x80, INSTR_RSE_RURD },
+ { "asi", 0x6a, INSTR_SIY_IRD },
+ { "alsi", 0x6e, INSTR_SIY_IRD },
+ { "agsi", 0x7a, INSTR_SIY_IRD },
+ { "algsi", 0x7e, INSTR_SIY_IRD },
{ "icmh", 0x80, INSTR_RSY_RURD },
{ "icmy", 0x81, INSTR_RSY_RURD },
{ "clclu", 0x8f, INSTR_RSY_RRRD },
@@ -1249,11 +1436,8 @@ static struct insn opcode_eb[] = {
{ "lmy", 0x98, INSTR_RSY_RRRD },
{ "lamy", 0x9a, INSTR_RSY_AARD },
{ "stamy", 0x9b, INSTR_RSY_AARD },
- { "asi", 0x6a, INSTR_SIY_IRD },
- { "agsi", 0x7a, INSTR_SIY_IRD },
- { "alsi", 0x6e, INSTR_SIY_IRD },
- { "algsi", 0x7e, INSTR_SIY_IRD },
- { "ecag", 0x4c, INSTR_RSY_RRRD },
+ { { 0, LONG_INSN_PCISTB }, 0xd0, INSTR_RSY_RRRD },
+ { "sic", 0xd1, INSTR_RSY_RRRD },
{ "srak", 0xdc, INSTR_RSY_RRRD },
{ "slak", 0xdd, INSTR_RSY_RRRD },
{ "srlk", 0xde, INSTR_RSY_RRRD },
@@ -1272,6 +1456,9 @@ static struct insn opcode_eb[] = {
{ "lax", 0xf7, INSTR_RSY_RRRD },
{ "laa", 0xf8, INSTR_RSY_RRRD },
{ "laal", 0xfa, INSTR_RSY_RRRD },
+ { "lric", 0x60, INSTR_RSY_RDRM },
+ { "stric", 0x61, INSTR_RSY_RDRM },
+ { "mric", 0x62, INSTR_RSY_RDRM },
#endif
{ "rll", 0x1d, INSTR_RSY_RRRD },
{ "mvclu", 0x8e, INSTR_RSY_RRRD },
@@ -1283,36 +1470,37 @@ static struct insn opcode_ec[] = {
#ifdef CONFIG_64BIT
{ "brxhg", 0x44, INSTR_RIE_RRP },
{ "brxlg", 0x45, INSTR_RIE_RRP },
- { "crb", 0xf6, INSTR_RRS_RRRDU },
- { "cgrb", 0xe4, INSTR_RRS_RRRDU },
- { "crj", 0x76, INSTR_RIE_RRPU },
+ { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
+ { "rnsbg", 0x54, INSTR_RIE_RRUUU },
+ { "risbg", 0x55, INSTR_RIE_RRUUU },
+ { "rosbg", 0x56, INSTR_RIE_RRUUU },
+ { "rxsbg", 0x57, INSTR_RIE_RRUUU },
+ { { 0, LONG_INSN_RISBGN }, 0x59, INSTR_RIE_RRUUU },
+ { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU },
{ "cgrj", 0x64, INSTR_RIE_RRPU },
- { "cib", 0xfe, INSTR_RIS_RURDI },
- { "cgib", 0xfc, INSTR_RIS_RURDI },
- { "cij", 0x7e, INSTR_RIE_RUPI },
- { "cgij", 0x7c, INSTR_RIE_RUPI },
- { "cit", 0x72, INSTR_RIE_R0IU },
+ { "clgrj", 0x65, INSTR_RIE_RRPU },
{ "cgit", 0x70, INSTR_RIE_R0IU },
- { "clrb", 0xf7, INSTR_RRS_RRRDU },
- { "clgrb", 0xe5, INSTR_RRS_RRRDU },
+ { "clgit", 0x71, INSTR_RIE_R0UU },
+ { "cit", 0x72, INSTR_RIE_R0IU },
+ { "clfit", 0x73, INSTR_RIE_R0UU },
+ { "crj", 0x76, INSTR_RIE_RRPU },
{ "clrj", 0x77, INSTR_RIE_RRPU },
- { "clgrj", 0x65, INSTR_RIE_RRPU },
- { "clib", 0xff, INSTR_RIS_RURDU },
- { "clgib", 0xfd, INSTR_RIS_RURDU },
- { "clij", 0x7f, INSTR_RIE_RUPU },
+ { "cgij", 0x7c, INSTR_RIE_RUPI },
{ "clgij", 0x7d, INSTR_RIE_RUPU },
- { "clfit", 0x73, INSTR_RIE_R0UU },
- { "clgit", 0x71, INSTR_RIE_R0UU },
- { "rnsbg", 0x54, INSTR_RIE_RRUUU },
- { "rxsbg", 0x57, INSTR_RIE_RRUUU },
- { "rosbg", 0x56, INSTR_RIE_RRUUU },
- { "risbg", 0x55, INSTR_RIE_RRUUU },
- { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
- { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU },
+ { "cij", 0x7e, INSTR_RIE_RUPI },
+ { "clij", 0x7f, INSTR_RIE_RUPU },
{ "ahik", 0xd8, INSTR_RIE_RRI0 },
{ "aghik", 0xd9, INSTR_RIE_RRI0 },
{ { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 },
{ { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 },
+ { "cgrb", 0xe4, INSTR_RRS_RRRDU },
+ { "clgrb", 0xe5, INSTR_RRS_RRRDU },
+ { "crb", 0xf6, INSTR_RRS_RRRDU },
+ { "clrb", 0xf7, INSTR_RRS_RRRDU },
+ { "cgib", 0xfc, INSTR_RIS_RURDI },
+ { "clgib", 0xfd, INSTR_RIS_RURDU },
+ { "cib", 0xfe, INSTR_RIS_RURDI },
+ { "clib", 0xff, INSTR_RIS_RURDU },
#endif
{ "", 0, INSTR_INVALID }
};
@@ -1325,20 +1513,24 @@ static struct insn opcode_ed[] = {
{ "my", 0x3b, INSTR_RXF_FRRDF },
{ "mayh", 0x3c, INSTR_RXF_FRRDF },
{ "myh", 0x3d, INSTR_RXF_FRRDF },
- { "ley", 0x64, INSTR_RXY_FRRD },
- { "ldy", 0x65, INSTR_RXY_FRRD },
- { "stey", 0x66, INSTR_RXY_FRRD },
- { "stdy", 0x67, INSTR_RXY_FRRD },
{ "sldt", 0x40, INSTR_RXF_FRRDF },
- { "slxt", 0x48, INSTR_RXF_FRRDF },
{ "srdt", 0x41, INSTR_RXF_FRRDF },
+ { "slxt", 0x48, INSTR_RXF_FRRDF },
{ "srxt", 0x49, INSTR_RXF_FRRDF },
{ "tdcet", 0x50, INSTR_RXE_FRRD },
- { "tdcdt", 0x54, INSTR_RXE_FRRD },
- { "tdcxt", 0x58, INSTR_RXE_FRRD },
{ "tdget", 0x51, INSTR_RXE_FRRD },
+ { "tdcdt", 0x54, INSTR_RXE_FRRD },
{ "tdgdt", 0x55, INSTR_RXE_FRRD },
+ { "tdcxt", 0x58, INSTR_RXE_FRRD },
{ "tdgxt", 0x59, INSTR_RXE_FRRD },
+ { "ley", 0x64, INSTR_RXY_FRRD },
+ { "ldy", 0x65, INSTR_RXY_FRRD },
+ { "stey", 0x66, INSTR_RXY_FRRD },
+ { "stdy", 0x67, INSTR_RXY_FRRD },
+ { "czdt", 0xa8, INSTR_RSL_LRDFU },
+ { "czxt", 0xa9, INSTR_RSL_LRDFU },
+ { "cdzt", 0xaa, INSTR_RSL_LRDFU },
+ { "cxzt", 0xab, INSTR_RSL_LRDFU },
#endif
{ "ldeb", 0x04, INSTR_RXE_FRRD },
{ "lxdb", 0x05, INSTR_RXE_FRRD },
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index aa8f2ba6289b..550228523267 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -231,12 +231,12 @@ sysc_work:
jo sysc_mcck_pending
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
jo sysc_reschedule
+ tm __TI_flags+3(%r12),_TIF_PER_TRAP
+ jo sysc_singlestep
tm __TI_flags+3(%r12),_TIF_SIGPENDING
jo sysc_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
jo sysc_notify_resume
- tm __TI_flags+3(%r12),_TIF_PER_TRAP
- jo sysc_singlestep
j sysc_return # beware of critical section cleanup
#
@@ -259,7 +259,6 @@ sysc_mcck_pending:
# _TIF_SIGPENDING is set, call do_signal
#
sysc_sigpending:
- ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Ldo_signal)
basr %r14,%r1 # call do_signal
@@ -286,7 +285,7 @@ sysc_notify_resume:
# _TIF_PER_TRAP is set, call do_per_trap
#
sysc_singlestep:
- ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
+ ni __TI_flags+3(%r12),255-_TIF_PER_TRAP
lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Ldo_per_trap)
la %r14,BASED(sysc_return)
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index d8251b98f17a..2711936fe706 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -6,7 +6,6 @@
#include <asm/ptrace.h>
#include <asm/cputime.h>
-extern void (*pgm_check_table[128])(struct pt_regs *);
extern void *restart_stack;
void system_call(void);
@@ -25,6 +24,26 @@ void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
void do_asce_exception(struct pt_regs *regs);
+void addressing_exception(struct pt_regs *regs);
+void data_exception(struct pt_regs *regs);
+void default_trap_handler(struct pt_regs *regs);
+void divide_exception(struct pt_regs *regs);
+void execute_exception(struct pt_regs *regs);
+void hfp_divide_exception(struct pt_regs *regs);
+void hfp_overflow_exception(struct pt_regs *regs);
+void hfp_significance_exception(struct pt_regs *regs);
+void hfp_sqrt_exception(struct pt_regs *regs);
+void hfp_underflow_exception(struct pt_regs *regs);
+void illegal_op(struct pt_regs *regs);
+void operand_exception(struct pt_regs *regs);
+void overflow_exception(struct pt_regs *regs);
+void privileged_op(struct pt_regs *regs);
+void space_switch_exception(struct pt_regs *regs);
+void special_op_exception(struct pt_regs *regs);
+void specification_exception(struct pt_regs *regs);
+void transaction_exception(struct pt_regs *regs);
+void translation_exception(struct pt_regs *regs);
+
void do_per_trap(struct pt_regs *regs);
void syscall_trace(struct pt_regs *regs, int entryexit);
void kernel_stack_overflow(struct pt_regs * regs);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 499e95e90f38..6d34e0c97a39 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -80,14 +80,21 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
#endif
.endm
- .macro HANDLE_SIE_INTERCEPT scratch
+ .macro HANDLE_SIE_INTERCEPT scratch,pgmcheck
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
tmhh %r8,0x0001 # interrupting from user ?
jnz .+42
lgr \scratch,%r9
slg \scratch,BASED(.Lsie_loop)
clg \scratch,BASED(.Lsie_length)
+ .if \pgmcheck
+ # Some program interrupts are suppressing (e.g. protection).
+ # We must also check the instruction after SIE in that case.
+ # do_protection_exception will rewind to rewind_pad
+ jh .+22
+ .else
jhe .+22
+ .endif
lg %r9,BASED(.Lsie_loop)
SPP BASED(.Lhost_id) # set host id
#endif
@@ -262,12 +269,12 @@ sysc_work:
jo sysc_mcck_pending
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo sysc_reschedule
+ tm __TI_flags+7(%r12),_TIF_PER_TRAP
+ jo sysc_singlestep
tm __TI_flags+7(%r12),_TIF_SIGPENDING
jo sysc_sigpending
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo sysc_notify_resume
- tm __TI_flags+7(%r12),_TIF_PER_TRAP
- jo sysc_singlestep
j sysc_return # beware of critical section cleanup
#
@@ -288,7 +295,6 @@ sysc_mcck_pending:
# _TIF_SIGPENDING is set, call do_signal
#
sysc_sigpending:
- ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_signal
tm __TI_flags+7(%r12),_TIF_SYSCALL
@@ -313,7 +319,7 @@ sysc_notify_resume:
# _TIF_PER_TRAP is set, call do_per_trap
#
sysc_singlestep:
- ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
+ ni __TI_flags+7(%r12),255-_TIF_PER_TRAP
lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,sysc_return
jg do_per_trap
@@ -375,7 +381,7 @@ ENTRY(pgm_check_handler)
lg %r12,__LC_THREAD_INFO
larl %r13,system_call
lmg %r8,%r9,__LC_PGM_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14
+ HANDLE_SIE_INTERCEPT %r14,1
tmhh %r8,0x0001 # test problem state bit
jnz 1f # -> fault in user space
tmhh %r8,0x4000 # PER bit set in old PSW ?
@@ -413,9 +419,9 @@ ENTRY(pgm_check_handler)
larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11)
nill %r10,0x007f
- sll %r10,3
+ sll %r10,2
je sysc_return
- lg %r1,0(%r10,%r1) # load address of handler routine
+ lgf %r1,0(%r10,%r1) # load address of handler routine
lgr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # branch to interrupt-handler
j sysc_return
@@ -451,7 +457,7 @@ ENTRY(io_int_handler)
lg %r12,__LC_THREAD_INFO
larl %r13,system_call
lmg %r8,%r9,__LC_IO_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14
+ HANDLE_SIE_INTERCEPT %r14,0
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
tmhh %r8,0x0001 # interrupting from user?
jz io_skip
@@ -597,7 +603,7 @@ ENTRY(ext_int_handler)
lg %r12,__LC_THREAD_INFO
larl %r13,system_call
lmg %r8,%r9,__LC_EXT_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14
+ HANDLE_SIE_INTERCEPT %r14,0
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
tmhh %r8,0x0001 # interrupting from user ?
jz ext_skip
@@ -645,7 +651,7 @@ ENTRY(mcck_int_handler)
lg %r12,__LC_THREAD_INFO
larl %r13,system_call
lmg %r8,%r9,__LC_MCK_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14
+ HANDLE_SIE_INTERCEPT %r14,0
tm __LC_MCCK_CODE,0x80 # system damage?
jo mcck_panic # yes -> rest of mcck code invalid
lghi %r14,__LC_CPU_TIMER_SAVE_AREA
@@ -944,6 +950,13 @@ ENTRY(sie64a)
stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
+# some program checks are suppressing. C code (e.g. do_protection_exception)
+# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
+# instructions in the sie_loop should not cause program interrupts. So
+# lets use a nop (47 00 00 00) as a landing pad.
+# See also HANDLE_SIE_INTERCEPT
+rewind_pad:
+ nop 0
sie_loop:
lg %r14,__LC_THREAD_INFO # pointer thread_info struct
tm __TI_flags+7(%r14),_TIF_EXIT_SIE
@@ -983,6 +996,7 @@ sie_fault:
.Lhost_id:
.quad 0
+ EX_TABLE(rewind_pad,sie_fault)
EX_TABLE(sie_loop,sie_fault)
#endif
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 984726cbce16..fd8db63dfc94 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -393,30 +393,35 @@ ENTRY(startup_kdump)
xc 0x300(256),0x300
xc 0xe00(256),0xe00
stck __LC_LAST_UPDATE_CLOCK
- spt 5f-.LPG0(%r13)
- mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13)
+ spt 6f-.LPG0(%r13)
+ mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
#ifndef CONFIG_MARCH_G5
# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
.insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list
tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
jz 0f
- la %r0,0
+ la %r0,1
.insn s,0xb2b00000,__LC_STFL_FAC_LIST # store facility list extended
-0: l %r0,__LC_STFL_FAC_LIST
- n %r0,2f+8-.LPG0(%r13)
- cl %r0,2f+8-.LPG0(%r13)
- jne 1f
- l %r0,__LC_STFL_FAC_LIST+4
- n %r0,2f+12-.LPG0(%r13)
- cl %r0,2f+12-.LPG0(%r13)
- je 3f
-1: l %r15,.Lstack-.LPG0(%r13)
+ # verify if all required facilities are supported by the machine
+0: la %r1,__LC_STFL_FAC_LIST
+ la %r2,3f+8-.LPG0(%r13)
+ l %r3,0(%r2)
+1: l %r0,0(%r1)
+ n %r0,4(%r2)
+ cl %r0,4(%r2)
+ jne 2f
+ la %r1,4(%r1)
+ la %r2,4(%r2)
+ ahi %r3,-1
+ jnz 1b
+ j 4f
+2: l %r15,.Lstack-.LPG0(%r13)
ahi %r15,-96
la %r2,.Lals_string-.LPG0(%r13)
l %r3,.Lsclp_print-.LPG0(%r13)
basr %r14,%r3
- lpsw 2f-.LPG0(%r13) # machine type not good enough, crash
+ lpsw 3f-.LPG0(%r13) # machine type not good enough, crash
.Lals_string:
.asciz "The Linux kernel requires more recent processor hardware"
.Lsclp_print:
@@ -424,33 +429,42 @@ ENTRY(startup_kdump)
.Lstack:
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
.align 16
-2: .long 0x000a0000,0x8badcccc
+3: .long 0x000a0000,0x8badcccc
+
+# List of facilities that are required. If not all facilities are present
+# the kernel will crash. Format is number of facility words with bits set,
+# followed by the facility words.
+
#if defined(CONFIG_64BIT)
-#if defined(CONFIG_MARCH_Z196)
- .long 0xc100efe3, 0xf46c0000
+#if defined(CONFIG_MARCH_ZEC12)
+ .long 3, 0xc100efe3, 0xf46ce000, 0x00400000
+#elif defined(CONFIG_MARCH_Z196)
+ .long 2, 0xc100efe3, 0xf46c0000
#elif defined(CONFIG_MARCH_Z10)
- .long 0xc100efe3, 0xf0680000
+ .long 2, 0xc100efe3, 0xf0680000
#elif defined(CONFIG_MARCH_Z9_109)
- .long 0xc100efc3, 0x00000000
+ .long 1, 0xc100efc3
#elif defined(CONFIG_MARCH_Z990)
- .long 0xc0002000, 0x00000000
+ .long 1, 0xc0002000
#elif defined(CONFIG_MARCH_Z900)
- .long 0xc0000000, 0x00000000
+ .long 1, 0xc0000000
#endif
#else
-#if defined(CONFIG_MARCH_Z196)
- .long 0x8100c880, 0x00000000
+#if defined(CONFIG_MARCH_ZEC12)
+ .long 1, 0x8100c880
+#elif defined(CONFIG_MARCH_Z196)
+ .long 1, 0x8100c880
#elif defined(CONFIG_MARCH_Z10)
- .long 0x8100c880, 0x00000000
+ .long 1, 0x8100c880
#elif defined(CONFIG_MARCH_Z9_109)
- .long 0x8100c880, 0x00000000
+ .long 1, 0x8100c880
#elif defined(CONFIG_MARCH_Z990)
- .long 0x80002000, 0x00000000
+ .long 1, 0x80002000
#elif defined(CONFIG_MARCH_Z900)
- .long 0x80000000, 0x00000000
+ .long 1, 0x80000000
#endif
#endif
-3:
+4:
#endif
#ifdef CONFIG_64BIT
@@ -459,14 +473,14 @@ ENTRY(startup_kdump)
jg startup_continue
#else
/* Continue with 31bit startup code in head31.S */
- l %r13,4f-.LPG0(%r13)
+ l %r13,5f-.LPG0(%r13)
b 0(%r13)
.align 8
-4: .long startup_continue
+5: .long startup_continue
#endif
.align 8
-5: .long 0x7fffffff,0xffffffff
+6: .long 0x7fffffff,0xffffffff
#include "head_kdump.S"
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 6cdc55b26d68..bf24293970ce 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -58,6 +58,8 @@ static const struct irq_class intrclass_names[] = {
[IOINT_APB] = {.name = "APB", .desc = "[I/O] AP Bus"},
[IOINT_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
[IOINT_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
+ [IOINT_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
+ [IOINT_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
[NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
};
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
new file mode 100644
index 000000000000..14bdecb61923
--- /dev/null
+++ b/arch/s390/kernel/pgm_check.S
@@ -0,0 +1,152 @@
+/*
+ * Program check table.
+ *
+ * Copyright IBM Corp. 2012
+ */
+
+#include <linux/linkage.h>
+
+#ifdef CONFIG_32BIT
+#define PGM_CHECK_64BIT(handler) .long default_trap_handler
+#else
+#define PGM_CHECK_64BIT(handler) .long handler
+#endif
+
+#define PGM_CHECK(handler) .long handler
+#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
+
+/*
+ * The program check table contains exactly 128 (0x00-0x7f) entries. Each
+ * line defines the 31 and/or 64 bit function to be called corresponding
+ * to the program check interruption code.
+ */
+.section .rodata, "a"
+ENTRY(pgm_check_table)
+PGM_CHECK_DEFAULT /* 00 */
+PGM_CHECK(illegal_op) /* 01 */
+PGM_CHECK(privileged_op) /* 02 */
+PGM_CHECK(execute_exception) /* 03 */
+PGM_CHECK(do_protection_exception) /* 04 */
+PGM_CHECK(addressing_exception) /* 05 */
+PGM_CHECK(specification_exception) /* 06 */
+PGM_CHECK(data_exception) /* 07 */
+PGM_CHECK(overflow_exception) /* 08 */
+PGM_CHECK(divide_exception) /* 09 */
+PGM_CHECK(overflow_exception) /* 0a */
+PGM_CHECK(divide_exception) /* 0b */
+PGM_CHECK(hfp_overflow_exception) /* 0c */
+PGM_CHECK(hfp_underflow_exception) /* 0d */
+PGM_CHECK(hfp_significance_exception) /* 0e */
+PGM_CHECK(hfp_divide_exception) /* 0f */
+PGM_CHECK(do_dat_exception) /* 10 */
+PGM_CHECK(do_dat_exception) /* 11 */
+PGM_CHECK(translation_exception) /* 12 */
+PGM_CHECK(special_op_exception) /* 13 */
+PGM_CHECK_DEFAULT /* 14 */
+PGM_CHECK(operand_exception) /* 15 */
+PGM_CHECK_DEFAULT /* 16 */
+PGM_CHECK_DEFAULT /* 17 */
+PGM_CHECK_64BIT(transaction_exception) /* 18 */
+PGM_CHECK_DEFAULT /* 19 */
+PGM_CHECK_DEFAULT /* 1a */
+PGM_CHECK_DEFAULT /* 1b */
+PGM_CHECK(space_switch_exception) /* 1c */
+PGM_CHECK(hfp_sqrt_exception) /* 1d */
+PGM_CHECK_DEFAULT /* 1e */
+PGM_CHECK_DEFAULT /* 1f */
+PGM_CHECK_DEFAULT /* 20 */
+PGM_CHECK_DEFAULT /* 21 */
+PGM_CHECK_DEFAULT /* 22 */
+PGM_CHECK_DEFAULT /* 23 */
+PGM_CHECK_DEFAULT /* 24 */
+PGM_CHECK_DEFAULT /* 25 */
+PGM_CHECK_DEFAULT /* 26 */
+PGM_CHECK_DEFAULT /* 27 */
+PGM_CHECK_DEFAULT /* 28 */
+PGM_CHECK_DEFAULT /* 29 */
+PGM_CHECK_DEFAULT /* 2a */
+PGM_CHECK_DEFAULT /* 2b */
+PGM_CHECK_DEFAULT /* 2c */
+PGM_CHECK_DEFAULT /* 2d */
+PGM_CHECK_DEFAULT /* 2e */
+PGM_CHECK_DEFAULT /* 2f */
+PGM_CHECK_DEFAULT /* 30 */
+PGM_CHECK_DEFAULT /* 31 */
+PGM_CHECK_DEFAULT /* 32 */
+PGM_CHECK_DEFAULT /* 33 */
+PGM_CHECK_DEFAULT /* 34 */
+PGM_CHECK_DEFAULT /* 35 */
+PGM_CHECK_DEFAULT /* 36 */
+PGM_CHECK_DEFAULT /* 37 */
+PGM_CHECK_64BIT(do_asce_exception) /* 38 */
+PGM_CHECK_64BIT(do_dat_exception) /* 39 */
+PGM_CHECK_64BIT(do_dat_exception) /* 3a */
+PGM_CHECK_64BIT(do_dat_exception) /* 3b */
+PGM_CHECK_DEFAULT /* 3c */
+PGM_CHECK_DEFAULT /* 3d */
+PGM_CHECK_DEFAULT /* 3e */
+PGM_CHECK_DEFAULT /* 3f */
+PGM_CHECK_DEFAULT /* 40 */
+PGM_CHECK_DEFAULT /* 41 */
+PGM_CHECK_DEFAULT /* 42 */
+PGM_CHECK_DEFAULT /* 43 */
+PGM_CHECK_DEFAULT /* 44 */
+PGM_CHECK_DEFAULT /* 45 */
+PGM_CHECK_DEFAULT /* 46 */
+PGM_CHECK_DEFAULT /* 47 */
+PGM_CHECK_DEFAULT /* 48 */
+PGM_CHECK_DEFAULT /* 49 */
+PGM_CHECK_DEFAULT /* 4a */
+PGM_CHECK_DEFAULT /* 4b */
+PGM_CHECK_DEFAULT /* 4c */
+PGM_CHECK_DEFAULT /* 4d */
+PGM_CHECK_DEFAULT /* 4e */
+PGM_CHECK_DEFAULT /* 4f */
+PGM_CHECK_DEFAULT /* 50 */
+PGM_CHECK_DEFAULT /* 51 */
+PGM_CHECK_DEFAULT /* 52 */
+PGM_CHECK_DEFAULT /* 53 */
+PGM_CHECK_DEFAULT /* 54 */
+PGM_CHECK_DEFAULT /* 55 */
+PGM_CHECK_DEFAULT /* 56 */
+PGM_CHECK_DEFAULT /* 57 */
+PGM_CHECK_DEFAULT /* 58 */
+PGM_CHECK_DEFAULT /* 59 */
+PGM_CHECK_DEFAULT /* 5a */
+PGM_CHECK_DEFAULT /* 5b */
+PGM_CHECK_DEFAULT /* 5c */
+PGM_CHECK_DEFAULT /* 5d */
+PGM_CHECK_DEFAULT /* 5e */
+PGM_CHECK_DEFAULT /* 5f */
+PGM_CHECK_DEFAULT /* 60 */
+PGM_CHECK_DEFAULT /* 61 */
+PGM_CHECK_DEFAULT /* 62 */
+PGM_CHECK_DEFAULT /* 63 */
+PGM_CHECK_DEFAULT /* 64 */
+PGM_CHECK_DEFAULT /* 65 */
+PGM_CHECK_DEFAULT /* 66 */
+PGM_CHECK_DEFAULT /* 67 */
+PGM_CHECK_DEFAULT /* 68 */
+PGM_CHECK_DEFAULT /* 69 */
+PGM_CHECK_DEFAULT /* 6a */
+PGM_CHECK_DEFAULT /* 6b */
+PGM_CHECK_DEFAULT /* 6c */
+PGM_CHECK_DEFAULT /* 6d */
+PGM_CHECK_DEFAULT /* 6e */
+PGM_CHECK_DEFAULT /* 6f */
+PGM_CHECK_DEFAULT /* 70 */
+PGM_CHECK_DEFAULT /* 71 */
+PGM_CHECK_DEFAULT /* 72 */
+PGM_CHECK_DEFAULT /* 73 */
+PGM_CHECK_DEFAULT /* 74 */
+PGM_CHECK_DEFAULT /* 75 */
+PGM_CHECK_DEFAULT /* 76 */
+PGM_CHECK_DEFAULT /* 77 */
+PGM_CHECK_DEFAULT /* 78 */
+PGM_CHECK_DEFAULT /* 79 */
+PGM_CHECK_DEFAULT /* 7a */
+PGM_CHECK_DEFAULT /* 7b */
+PGM_CHECK_DEFAULT /* 7c */
+PGM_CHECK_DEFAULT /* 7d */
+PGM_CHECK_DEFAULT /* 7e */
+PGM_CHECK_DEFAULT /* 7f */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index b1f2be9aaaad..2568590973ad 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -777,40 +777,6 @@ static void __init reserve_crashkernel(void)
#endif
}
-static void __init init_storage_keys(unsigned long start, unsigned long end)
-{
- unsigned long boundary, function, size;
-
- while (start < end) {
- if (MACHINE_HAS_EDAT2) {
- /* set storage keys for a 2GB frame */
- function = 0x22000 | PAGE_DEFAULT_KEY;
- size = 1UL << 31;
- boundary = (start + size) & ~(size - 1);
- if (boundary <= end) {
- do {
- start = pfmf(function, start);
- } while (start < boundary);
- continue;
- }
- }
- if (MACHINE_HAS_EDAT1) {
- /* set storage keys for a 1MB frame */
- function = 0x21000 | PAGE_DEFAULT_KEY;
- size = 1UL << 20;
- boundary = (start + size) & ~(size - 1);
- if (boundary <= end) {
- do {
- start = pfmf(function, start);
- } while (start < boundary);
- continue;
- }
- }
- page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
- start += PAGE_SIZE;
- }
-}
-
static void __init setup_memory(void)
{
unsigned long bootmap_size;
@@ -889,7 +855,7 @@ static void __init setup_memory(void)
memblock_add_node(PFN_PHYS(start_chunk),
PFN_PHYS(end_chunk - start_chunk), 0);
pfn = max(start_chunk, start_pfn);
- init_storage_keys(PFN_PHYS(pfn), PFN_PHYS(end_chunk));
+ storage_key_init_range(PFN_PHYS(pfn), PFN_PHYS(end_chunk));
}
psw_set_key(PAGE_DEFAULT_KEY);
@@ -1040,6 +1006,9 @@ static void __init setup_hwcaps(void)
case 0x2818:
strcpy(elf_platform, "z196");
break;
+ case 0x2827:
+ strcpy(elf_platform, "zEC12");
+ break;
}
}
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index d1259d875074..c3ff70a7b247 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -461,6 +461,8 @@ void do_signal(struct pt_regs *regs)
/* Restart system call with magic TIF bit. */
regs->gprs[2] = regs->orig_gpr2;
set_thread_flag(TIF_SYSCALL);
+ if (test_thread_flag(TIF_SINGLE_STEP))
+ set_thread_flag(TIF_PER_TRAP);
break;
}
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index dd55f7c20104..f1aba87cceb8 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -29,48 +29,38 @@ struct mask_info {
cpumask_t mask;
};
-static int topology_enabled = 1;
+static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
-static void set_topology_timer(void);
-static DECLARE_WORK(topology_work, topology_work_fn);
-/* topology_lock protects the core linked list */
-static DEFINE_SPINLOCK(topology_lock);
-static struct mask_info core_info;
-cpumask_t cpu_core_map[NR_CPUS];
-unsigned char cpu_core_id[NR_CPUS];
-unsigned char cpu_socket_id[NR_CPUS];
+static int topology_enabled = 1;
+static DECLARE_WORK(topology_work, topology_work_fn);
+/* topology_lock protects the socket and book linked lists */
+static DEFINE_SPINLOCK(topology_lock);
+static struct mask_info socket_info;
static struct mask_info book_info;
-cpumask_t cpu_book_map[NR_CPUS];
-unsigned char cpu_book_id[NR_CPUS];
+
+struct cpu_topology_s390 cpu_topology[NR_CPUS];
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
cpumask_t mask;
- cpumask_clear(&mask);
- if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
- cpumask_copy(&mask, cpumask_of(cpu));
+ cpumask_copy(&mask, cpumask_of(cpu));
+ if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
return mask;
+ for (; info; info = info->next) {
+ if (cpumask_test_cpu(cpu, &info->mask))
+ return info->mask;
}
- while (info) {
- if (cpumask_test_cpu(cpu, &info->mask)) {
- mask = info->mask;
- break;
- }
- info = info->next;
- }
- if (cpumask_empty(&mask))
- cpumask_copy(&mask, cpumask_of(cpu));
return mask;
}
static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
struct mask_info *book,
- struct mask_info *core,
- int one_core_per_cpu)
+ struct mask_info *socket,
+ int one_socket_per_cpu)
{
unsigned int cpu;
@@ -80,28 +70,28 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
lcpu = smp_find_processor_id(rcpu);
- if (lcpu >= 0) {
- cpumask_set_cpu(lcpu, &book->mask);
- cpu_book_id[lcpu] = book->id;
- cpumask_set_cpu(lcpu, &core->mask);
- cpu_core_id[lcpu] = rcpu;
- if (one_core_per_cpu) {
- cpu_socket_id[lcpu] = rcpu;
- core = core->next;
- } else {
- cpu_socket_id[lcpu] = core->id;
- }
- smp_cpu_set_polarization(lcpu, tl_cpu->pp);
+ if (lcpu < 0)
+ continue;
+ cpumask_set_cpu(lcpu, &book->mask);
+ cpu_topology[lcpu].book_id = book->id;
+ cpumask_set_cpu(lcpu, &socket->mask);
+ cpu_topology[lcpu].core_id = rcpu;
+ if (one_socket_per_cpu) {
+ cpu_topology[lcpu].socket_id = rcpu;
+ socket = socket->next;
+ } else {
+ cpu_topology[lcpu].socket_id = socket->id;
}
+ smp_cpu_set_polarization(lcpu, tl_cpu->pp);
}
- return core;
+ return socket;
}
static void clear_masks(void)
{
struct mask_info *info;
- info = &core_info;
+ info = &socket_info;
while (info) {
cpumask_clear(&info->mask);
info = info->next;
@@ -120,9 +110,9 @@ static union topology_entry *next_tle(union topology_entry *tle)
return (union topology_entry *)((struct topology_container *)tle + 1);
}
-static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
+static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
{
- struct mask_info *core = &core_info;
+ struct mask_info *socket = &socket_info;
struct mask_info *book = &book_info;
union topology_entry *tle, *end;
@@ -135,11 +125,11 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
book->id = tle->container.id;
break;
case 1:
- core = core->next;
- core->id = tle->container.id;
+ socket = socket->next;
+ socket->id = tle->container.id;
break;
case 0:
- add_cpus_to_mask(&tle->cpu, book, core, 0);
+ add_cpus_to_mask(&tle->cpu, book, socket, 0);
break;
default:
clear_masks();
@@ -149,9 +139,9 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
}
}
-static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
+static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
{
- struct mask_info *core = &core_info;
+ struct mask_info *socket = &socket_info;
struct mask_info *book = &book_info;
union topology_entry *tle, *end;
@@ -164,7 +154,7 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
book->id = tle->container.id;
break;
case 0:
- core = add_cpus_to_mask(&tle->cpu, book, core, 1);
+ socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
break;
default:
clear_masks();
@@ -174,20 +164,20 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
}
}
-static void tl_to_cores(struct sysinfo_15_1_x *info)
+static void tl_to_masks(struct sysinfo_15_1_x *info)
{
struct cpuid cpu_id;
- get_cpu_id(&cpu_id);
spin_lock_irq(&topology_lock);
+ get_cpu_id(&cpu_id);
clear_masks();
switch (cpu_id.machine) {
case 0x2097:
case 0x2098:
- __tl_to_cores_z10(info);
+ __tl_to_masks_z10(info);
break;
default:
- __tl_to_cores_generic(info);
+ __tl_to_masks_generic(info);
}
spin_unlock_irq(&topology_lock);
}
@@ -232,15 +222,20 @@ int topology_set_cpu_management(int fc)
return rc;
}
-static void update_cpu_core_map(void)
+static void update_cpu_masks(void)
{
unsigned long flags;
int cpu;
spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) {
- cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
- cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
+ cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
+ cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
+ if (!MACHINE_HAS_TOPOLOGY) {
+ cpu_topology[cpu].core_id = cpu;
+ cpu_topology[cpu].socket_id = cpu;
+ cpu_topology[cpu].book_id = cpu;
+ }
}
spin_unlock_irqrestore(&topology_lock, flags);
}
@@ -260,13 +255,13 @@ int arch_update_cpu_topology(void)
int cpu;
if (!MACHINE_HAS_TOPOLOGY) {
- update_cpu_core_map();
+ update_cpu_masks();
topology_update_polarization_simple();
return 0;
}
store_topology(info);
- tl_to_cores(info);
- update_cpu_core_map();
+ tl_to_masks(info);
+ update_cpu_masks();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -355,7 +350,7 @@ void __init s390_init_cpu_topology(void)
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
printk(KERN_CONT " %d", info->mag[i]);
printk(KERN_CONT " / %d\n", info->mnest);
- alloc_masks(info, &core_info, 1);
+ alloc_masks(info, &socket_info, 1);
alloc_masks(info, &book_info, 2);
}
@@ -454,7 +449,7 @@ static int __init topology_init(void)
}
set_topology_timer();
out:
- update_cpu_core_map();
+ update_cpu_masks();
return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
}
device_initcall(topology_init);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 3d2b0fa37db0..70ecfc5fe8f0 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -41,8 +41,6 @@
#include <asm/ipl.h>
#include "entry.h"
-void (*pgm_check_table[128])(struct pt_regs *regs);
-
int show_unhandled_signals = 1;
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@@ -350,7 +348,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
force_sig_info(SIGTRAP, &info, current);
}
-static void default_trap_handler(struct pt_regs *regs)
+void default_trap_handler(struct pt_regs *regs)
{
if (user_mode(regs)) {
report_user_fault(regs, SIGSEGV);
@@ -360,9 +358,9 @@ static void default_trap_handler(struct pt_regs *regs)
}
#define DO_ERROR_INFO(name, signr, sicode, str) \
-static void name(struct pt_regs *regs) \
-{ \
- do_trap(regs, signr, sicode, str); \
+void name(struct pt_regs *regs) \
+{ \
+ do_trap(regs, signr, sicode, str); \
}
DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
@@ -417,7 +415,7 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc)
do_trap(regs, SIGFPE, si_code, "floating point exception");
}
-static void __kprobes illegal_op(struct pt_regs *regs)
+void __kprobes illegal_op(struct pt_regs *regs)
{
siginfo_t info;
__u8 opcode[6];
@@ -536,7 +534,7 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
#endif
-static void data_exception(struct pt_regs *regs)
+void data_exception(struct pt_regs *regs)
{
__u16 __user *location;
int signal = 0;
@@ -611,7 +609,7 @@ static void data_exception(struct pt_regs *regs)
do_trap(regs, signal, ILL_ILLOPN, "data exception");
}
-static void space_switch_exception(struct pt_regs *regs)
+void space_switch_exception(struct pt_regs *regs)
{
/* Set user psw back to home space mode. */
if (user_mode(regs))
@@ -629,43 +627,7 @@ void __kprobes kernel_stack_overflow(struct pt_regs * regs)
panic("Corrupt kernel stack, can't continue.");
}
-/* init is done in lowcore.S and head.S */
-
void __init trap_init(void)
{
- int i;
-
- for (i = 0; i < 128; i++)
- pgm_check_table[i] = &default_trap_handler;
- pgm_check_table[1] = &illegal_op;
- pgm_check_table[2] = &privileged_op;
- pgm_check_table[3] = &execute_exception;
- pgm_check_table[4] = &do_protection_exception;
- pgm_check_table[5] = &addressing_exception;
- pgm_check_table[6] = &specification_exception;
- pgm_check_table[7] = &data_exception;
- pgm_check_table[8] = &overflow_exception;
- pgm_check_table[9] = &divide_exception;
- pgm_check_table[0x0A] = &overflow_exception;
- pgm_check_table[0x0B] = &divide_exception;
- pgm_check_table[0x0C] = &hfp_overflow_exception;
- pgm_check_table[0x0D] = &hfp_underflow_exception;
- pgm_check_table[0x0E] = &hfp_significance_exception;
- pgm_check_table[0x0F] = &hfp_divide_exception;
- pgm_check_table[0x10] = &do_dat_exception;
- pgm_check_table[0x11] = &do_dat_exception;
- pgm_check_table[0x12] = &translation_exception;
- pgm_check_table[0x13] = &special_op_exception;
-#ifdef CONFIG_64BIT
- pgm_check_table[0x18] = &transaction_exception;
- pgm_check_table[0x38] = &do_asce_exception;
- pgm_check_table[0x39] = &do_dat_exception;
- pgm_check_table[0x3A] = &do_dat_exception;
- pgm_check_table[0x3B] = &do_dat_exception;
-#endif /* CONFIG_64BIT */
- pgm_check_table[0x15] = &operand_exception;
- pgm_check_table[0x1C] = &space_switch_exception;
- pgm_check_table[0x1D] = &hfp_sqrt_exception;
- /* Enable machine checks early. */
local_mcck_enable();
}
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 1bea6d1f55ab..640bea12303c 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,9 +2,9 @@
# Makefile for the linux s390-specific parts of the memory manager.
#
-obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
- page-states.o gup.o extable.o
-obj-$(CONFIG_CMM) += cmm.o
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o
-obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
+obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
+obj-y += page-states.o gup.o extable.o pageattr.o
+
+obj-$(CONFIG_CMM) += cmm.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index cbc6668acb85..04e4892247d2 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -150,6 +150,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pgd_t *pgd, unsigned long addr)
{
+ unsigned int prot;
pud_t *pud;
int i;
@@ -157,7 +158,11 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
st->current_address = addr;
pud = pud_offset(pgd, addr);
if (!pud_none(*pud))
- walk_pmd_level(m, st, pud, addr);
+ if (pud_large(*pud)) {
+ prot = pud_val(*pud) & _PAGE_RO;
+ note_page(m, st, prot, 2);
+ } else
+ walk_pmd_level(m, st, pud, addr);
else
note_page(m, st, _PAGE_INVALID, 2);
addr += PUD_SIZE;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 04ad4001a289..42601d6e166f 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -49,15 +49,19 @@
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
#define VM_FAULT_BADACCESS 0x040000
-#define VM_FAULT_SIGNAL 0x080000
+#define VM_FAULT_SIGNAL 0x080000
-static unsigned long store_indication;
+static unsigned long store_indication __read_mostly;
-void fault_init(void)
+#ifdef CONFIG_64BIT
+static int __init fault_init(void)
{
- if (test_facility(2) && test_facility(75))
+ if (test_facility(75))
store_indication = 0xc00;
+ return 0;
}
+early_initcall(fault_init);
+#endif
static inline int notify_page_fault(struct pt_regs *regs)
{
@@ -273,10 +277,16 @@ static inline int do_exception(struct pt_regs *regs, int access)
unsigned int flags;
int fault;
+ tsk = current;
+ /*
+ * The instruction that caused the program check has
+ * been nullified. Don't signal single step via SIGTRAP.
+ */
+ clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
+
if (notify_page_fault(regs))
return 0;
- tsk = current;
mm = tsk->mm;
trans_exc_code = regs->int_parm_long;
@@ -372,11 +382,6 @@ retry:
goto retry;
}
}
- /*
- * The instruction that caused the program check will
- * be repeated. Don't signal single step via SIGTRAP.
- */
- clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
fault = 0;
out_up:
up_read(&mm->mmap_sem);
@@ -423,6 +428,12 @@ void __kprobes do_asce_exception(struct pt_regs *regs)
struct vm_area_struct *vma;
unsigned long trans_exc_code;
+ /*
+ * The instruction that caused the program check has
+ * been nullified. Don't signal single step via SIGTRAP.
+ */
+ clear_tsk_thread_flag(current, TIF_PER_TRAP);
+
trans_exc_code = regs->int_parm_long;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto no_context;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 81e596c65dee..ae672f41c464 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -125,7 +125,6 @@ void __init paging_init(void)
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
- fault_init();
}
void __init mem_init(void)
@@ -159,34 +158,6 @@ void __init mem_init(void)
PFN_ALIGN((unsigned long)&_eshared) - 1);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long address;
- int i;
-
- for (i = 0; i < numpages; i++) {
- address = page_to_phys(page + i);
- pgd = pgd_offset_k(address);
- pud = pud_offset(pgd, address);
- pmd = pmd_offset(pud, address);
- pte = pte_offset_kernel(pmd, address);
- if (!enable) {
- __ptep_ipte(address, pte);
- pte_val(*pte) = _PAGE_TYPE_EMPTY;
- continue;
- }
- *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
- /* Flush cpu write queue. */
- mb();
- }
-}
-#endif
-
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
unsigned long addr = begin;
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 00be01c4b4f3..29ccee3651f4 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -2,11 +2,46 @@
* Copyright IBM Corp. 2011
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
+#include <linux/hugetlb.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/hugetlb.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
+#include <asm/page.h>
+
+void storage_key_init_range(unsigned long start, unsigned long end)
+{
+ unsigned long boundary, function, size;
+
+ while (start < end) {
+ if (MACHINE_HAS_EDAT2) {
+ /* set storage keys for a 2GB frame */
+ function = 0x22000 | PAGE_DEFAULT_KEY;
+ size = 1UL << 31;
+ boundary = (start + size) & ~(size - 1);
+ if (boundary <= end) {
+ do {
+ start = pfmf(function, start);
+ } while (start < boundary);
+ continue;
+ }
+ }
+ if (MACHINE_HAS_EDAT1) {
+ /* set storage keys for a 1MB frame */
+ function = 0x21000 | PAGE_DEFAULT_KEY;
+ size = 1UL << 20;
+ boundary = (start + size) & ~(size - 1);
+ if (boundary <= end) {
+ do {
+ start = pfmf(function, start);
+ } while (start < boundary);
+ continue;
+ }
+ }
+ page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
+ start += PAGE_SIZE;
+ }
+}
static pte_t *walk_page_table(unsigned long addr)
{
@@ -19,7 +54,7 @@ static pte_t *walk_page_table(unsigned long addr)
if (pgd_none(*pgdp))
return NULL;
pudp = pud_offset(pgdp, addr);
- if (pud_none(*pudp))
+ if (pud_none(*pudp) || pud_large(*pudp))
return NULL;
pmdp = pmd_offset(pudp, addr);
if (pmd_none(*pmdp) || pmd_large(*pmdp))
@@ -70,3 +105,46 @@ int set_memory_x(unsigned long addr, int numpages)
{
return 0;
}
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ unsigned long address;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int i;
+
+ for (i = 0; i < numpages; i++) {
+ address = page_to_phys(page + i);
+ pgd = pgd_offset_k(address);
+ pud = pud_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
+ pte = pte_offset_kernel(pmd, address);
+ if (!enable) {
+ __ptep_ipte(address, pte);
+ pte_val(*pte) = _PAGE_TYPE_EMPTY;
+ continue;
+ }
+ *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
+ }
+}
+
+#ifdef CONFIG_HIBERNATION
+bool kernel_page_present(struct page *page)
+{
+ unsigned long addr;
+ int cc;
+
+ addr = page_to_phys(page);
+ asm volatile(
+ " lra %1,0(%1)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (cc), "+a" (addr) : : "cc");
+ return cc == 0;
+}
+#endif /* CONFIG_HIBERNATION */
+
+#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index c8188a18af05..ae44d2a34313 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -881,22 +881,6 @@ int s390_enable_sie(void)
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
-#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
-bool kernel_page_present(struct page *page)
-{
- unsigned long addr;
- int cc;
-
- addr = page_to_phys(page);
- asm volatile(
- " lra %1,0(%1)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (cc), "+a" (addr) : : "cc");
- return cc == 0;
-}
-#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 387c7c60b5b8..6ed1426d27c5 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -89,6 +89,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
int ret = -ENOMEM;
while (address < end) {
+ pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
@@ -96,18 +97,24 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
goto out;
pgd_populate(&init_mm, pg_dir, pu_dir);
}
-
pu_dir = pud_offset(pg_dir, address);
+#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
+ if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
+ !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
+ pte_val(pte) |= _REGION3_ENTRY_LARGE;
+ pte_val(pte) |= _REGION_ENTRY_TYPE_R3;
+ pud_val(*pu_dir) = pte_val(pte);
+ address += PUD_SIZE;
+ continue;
+ }
+#endif
if (pud_none(*pu_dir)) {
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
pud_populate(&init_mm, pu_dir, pm_dir);
}
-
- pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pm_dir = pmd_offset(pu_dir, address);
-
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
@@ -160,6 +167,11 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
address += PUD_SIZE;
continue;
}
+ if (pud_large(*pu_dir)) {
+ pud_clear(pu_dir);
+ address += PUD_SIZE;
+ continue;
+ }
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
address += PMD_SIZE;
@@ -193,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
start_addr = (unsigned long) start;
end_addr = (unsigned long) (start + nr);
- for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
+ for (address = start_addr; address < end_addr;) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
@@ -212,10 +224,33 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
+#ifdef CONFIG_64BIT
+ /* Use 1MB frames for vmemmap if available. We always
+ * use large frames even if they are only partially
+ * used.
+ * Otherwise we would have also page tables since
+ * vmemmap_populate gets called for each section
+ * separately. */
+ if (MACHINE_HAS_EDAT1) {
+ void *new_page;
+
+ new_page = vmemmap_alloc_block(PMD_SIZE, node);
+ if (!new_page)
+ goto out;
+ pte = mk_pte_phys(__pa(new_page), PAGE_RW);
+ pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
+ pmd_val(*pm_dir) = pte_val(pte);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+#endif
pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
pmd_populate(&init_mm, pm_dir, pt_dir);
+ } else if (pmd_large(*pm_dir)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
}
pt_dir = pte_offset_kernel(pm_dir, address);
@@ -228,6 +263,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
*pt_dir = pte;
}
+ address += PAGE_SIZE;
}
memset(start, 0, nr * sizeof(struct page));
ret = 0;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 9b355b406afa..bb284419b0fd 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -341,6 +341,27 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
+ case BPF_S_ALU_MOD_X: /* A %= X */
+ jit->seen |= SEEN_XREG | SEEN_RET0;
+ /* ltr %r12,%r12 */
+ EMIT2(0x12cc);
+ /* jz <ret0> */
+ EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
+ /* lhi %r4,0 */
+ EMIT4(0xa7480000);
+ /* dr %r4,%r12 */
+ EMIT2(0x1d4c);
+ /* lr %r5,%r4 */
+ EMIT2(0x1854);
+ break;
+ case BPF_S_ALU_MOD_K: /* A %= K */
+ /* lhi %r4,0 */
+ EMIT4(0xa7480000);
+ /* d %r4,<d(K)>(%r13) */
+ EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
+ /* lr %r5,%r4 */
+ EMIT2(0x1854);
+ break;
case BPF_S_ALU_AND_X: /* A &= X */
jit->seen |= SEEN_XREG;
/* nr %r5,%r12 */
@@ -368,10 +389,17 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_DISP(0x5650d000, EMIT_CONST(K));
break;
case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
+ case BPF_S_ALU_XOR_X:
jit->seen |= SEEN_XREG;
/* xr %r5,%r12 */
EMIT2(0x175c);
break;
+ case BPF_S_ALU_XOR_K: /* A ^= K */
+ if (!K)
+ break;
+ /* x %r5,<d(K)>(%r13) */
+ EMIT4_DISP(0x5750d000, EMIT_CONST(K));
+ break;
case BPF_S_ALU_LSH_X: /* A <<= X; */
jit->seen |= SEEN_XREG;
/* sll %r5,0(%r12) */
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
new file mode 100644
index 000000000000..ab0827b6bc4b
--- /dev/null
+++ b/arch/s390/pci/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the s390 PCI subsystem.
+#
+
+obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \
+ pci_sysfs.o pci_event.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
new file mode 100644
index 000000000000..7ed38e5e3028
--- /dev/null
+++ b/arch/s390/pci/pci.c
@@ -0,0 +1,1103 @@
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ *
+ * The System z PCI code is a rewrite from a prototype by
+ * the following people (Kudoz!):
+ * Alexander Schmidt
+ * Christoph Raisch
+ * Hannes Hering
+ * Hoang-Nam Nguyen
+ * Jan-Bernd Themann
+ * Stefan Roscher
+ * Thomas Klein
+ */
+
+#define COMPONENT "zPCI"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+
+#include <asm/isc.h>
+#include <asm/airq.h>
+#include <asm/facility.h>
+#include <asm/pci_insn.h>
+#include <asm/pci_clp.h>
+#include <asm/pci_dma.h>
+
+#define DEBUG /* enable pr_debug */
+
+#define SIC_IRQ_MODE_ALL 0
+#define SIC_IRQ_MODE_SINGLE 1
+
+#define ZPCI_NR_DMA_SPACES 1
+#define ZPCI_MSI_VEC_BITS 6
+#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
+
+/* list of all detected zpci devices */
+LIST_HEAD(zpci_list);
+EXPORT_SYMBOL_GPL(zpci_list);
+DEFINE_MUTEX(zpci_list_lock);
+EXPORT_SYMBOL_GPL(zpci_list_lock);
+
+struct pci_hp_callback_ops hotplug_ops;
+EXPORT_SYMBOL_GPL(hotplug_ops);
+
+static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
+static DEFINE_SPINLOCK(zpci_domain_lock);
+
+struct callback {
+ irq_handler_t handler;
+ void *data;
+};
+
+struct zdev_irq_map {
+ unsigned long aibv; /* AI bit vector */
+ int msi_vecs; /* consecutive MSI-vectors used */
+ int __unused;
+ struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */
+ spinlock_t lock; /* protect callbacks against de-reg */
+};
+
+struct intr_bucket {
+ /* amap of adapters, one bit per dev, corresponds to one irq nr */
+ unsigned long *alloc;
+ /* AI summary bit, global page for all devices */
+ unsigned long *aisb;
+ /* pointer to aibv and callback data in zdev */
+ struct zdev_irq_map *imap[ZPCI_NR_DEVICES];
+ /* protects the whole bucket struct */
+ spinlock_t lock;
+};
+
+static struct intr_bucket *bucket;
+
+/* Adapter local summary indicator */
+static u8 *zpci_irq_si;
+
+static atomic_t irq_retries = ATOMIC_INIT(0);
+
+/* I/O Map */
+static DEFINE_SPINLOCK(zpci_iomap_lock);
+static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
+struct zpci_iomap_entry *zpci_iomap_start;
+EXPORT_SYMBOL_GPL(zpci_iomap_start);
+
+/* highest irq summary bit */
+static int __read_mostly aisb_max;
+
+static struct kmem_cache *zdev_irq_cache;
+
+static inline int irq_to_msi_nr(unsigned int irq)
+{
+ return irq & ZPCI_MSI_MASK;
+}
+
+static inline int irq_to_dev_nr(unsigned int irq)
+{
+ return irq >> ZPCI_MSI_VEC_BITS;
+}
+
+static inline struct zdev_irq_map *get_imap(unsigned int irq)
+{
+ return bucket->imap[irq_to_dev_nr(irq)];
+}
+
+struct zpci_dev *get_zdev(struct pci_dev *pdev)
+{
+ return (struct zpci_dev *) pdev->sysdata;
+}
+
+struct zpci_dev *get_zdev_by_fid(u32 fid)
+{
+ struct zpci_dev *tmp, *zdev = NULL;
+
+ mutex_lock(&zpci_list_lock);
+ list_for_each_entry(tmp, &zpci_list, entry) {
+ if (tmp->fid == fid) {
+ zdev = tmp;
+ break;
+ }
+ }
+ mutex_unlock(&zpci_list_lock);
+ return zdev;
+}
+
+bool zpci_fid_present(u32 fid)
+{
+ return (get_zdev_by_fid(fid) != NULL) ? true : false;
+}
+
+static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
+{
+ return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
+}
+
+int pci_domain_nr(struct pci_bus *bus)
+{
+ return ((struct zpci_dev *) bus->sysdata)->domain;
+}
+EXPORT_SYMBOL_GPL(pci_domain_nr);
+
+int pci_proc_domain(struct pci_bus *bus)
+{
+ return pci_domain_nr(bus);
+}
+EXPORT_SYMBOL_GPL(pci_proc_domain);
+
+/* Store PCI function information block */
+static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc)
+{
+ struct zpci_fib *fib;
+ u8 status, cc;
+
+ fib = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!fib)
+ return -ENOMEM;
+
+ do {
+ cc = __stpcifc(zdev->fh, 0, fib, &status);
+ if (cc == 2) {
+ msleep(ZPCI_INSN_BUSY_DELAY);
+ memset(fib, 0, PAGE_SIZE);
+ }
+ } while (cc == 2);
+
+ if (cc)
+ pr_err_once("%s: cc: %u status: %u\n",
+ __func__, cc, status);
+
+ /* Return PCI function controls */
+ *fc = fib->fc;
+
+ free_page((unsigned long) fib);
+ return (cc) ? -EIO : 0;
+}
+
+/* Modify PCI: Register adapter interruptions */
+static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
+ u64 aibv)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
+ struct zpci_fib *fib;
+ int rc;
+
+ fib = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!fib)
+ return -ENOMEM;
+
+ fib->isc = PCI_ISC;
+ fib->noi = zdev->irq_map->msi_vecs;
+ fib->sum = 1; /* enable summary notifications */
+ fib->aibv = aibv;
+ fib->aibvo = 0; /* every function has its own page */
+ fib->aisb = (u64) bucket->aisb + aisb / 8;
+ fib->aisbo = aisb & ZPCI_MSI_MASK;
+
+ rc = mpcifc_instr(req, fib);
+ pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
+
+ free_page((unsigned long) fib);
+ return rc;
+}
+
+struct mod_pci_args {
+ u64 base;
+ u64 limit;
+ u64 iota;
+};
+
+static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
+ struct zpci_fib *fib;
+ int rc;
+
+ /* The FIB must be available even if it's not used */
+ fib = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!fib)
+ return -ENOMEM;
+
+ fib->pba = args->base;
+ fib->pal = args->limit;
+ fib->iota = args->iota;
+
+ rc = mpcifc_instr(req, fib);
+ free_page((unsigned long) fib);
+ return rc;
+}
+
+/* Modify PCI: Register I/O address translation parameters */
+int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
+ u64 base, u64 limit, u64 iota)
+{
+ struct mod_pci_args args = { base, limit, iota };
+
+ WARN_ON_ONCE(iota & 0x3fff);
+ args.iota |= ZPCI_IOTA_RTTO_FLAG;
+ return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args);
+}
+
+/* Modify PCI: Unregister I/O address translation parameters */
+int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
+{
+ struct mod_pci_args args = { 0, 0, 0 };
+
+ return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args);
+}
+
+/* Modify PCI: Unregister adapter interruptions */
+static int zpci_unregister_airq(struct zpci_dev *zdev)
+{
+ struct mod_pci_args args = { 0, 0, 0 };
+
+ return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args);
+}
+
+#define ZPCI_PCIAS_CFGSPC 15
+
+static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
+ u64 data;
+ int rc;
+
+ rc = pcilg_instr(&data, req, offset);
+ data = data << ((8 - len) * 8);
+ data = le64_to_cpu(data);
+ if (!rc)
+ *val = (u32) data;
+ else
+ *val = 0xffffffff;
+ return rc;
+}
+
+static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
+ u64 data = val;
+ int rc;
+
+ data = cpu_to_le64(data);
+ data = data >> ((8 - len) * 8);
+ rc = pcistg_instr(data, req, offset);
+ return rc;
+}
+
+void synchronize_irq(unsigned int irq)
+{
+ /*
+ * Not needed, the handler is protected by a lock and IRQs that occur
+ * after the handler is deleted are just NOPs.
+ */
+}
+EXPORT_SYMBOL_GPL(synchronize_irq);
+
+void enable_irq(unsigned int irq)
+{
+ struct msi_desc *msi = irq_get_msi_desc(irq);
+
+ zpci_msi_set_mask_bits(msi, 1, 0);
+}
+EXPORT_SYMBOL_GPL(enable_irq);
+
+void disable_irq(unsigned int irq)
+{
+ struct msi_desc *msi = irq_get_msi_desc(irq);
+
+ zpci_msi_set_mask_bits(msi, 1, 1);
+}
+EXPORT_SYMBOL_GPL(disable_irq);
+
+void disable_irq_nosync(unsigned int irq)
+{
+ disable_irq(irq);
+}
+EXPORT_SYMBOL_GPL(disable_irq_nosync);
+
+unsigned long probe_irq_on(void)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(probe_irq_on);
+
+int probe_irq_off(unsigned long val)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(probe_irq_off);
+
+unsigned int probe_irq_mask(unsigned long val)
+{
+ return val;
+}
+EXPORT_SYMBOL_GPL(probe_irq_mask);
+
+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+{
+}
+
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size,
+ resource_size_t align)
+{
+ return 0;
+}
+
+/* combine single writes by using store-block insn */
+void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
+{
+ zpci_memcpy_toio(to, from, count);
+}
+
+/* Create a virtual mapping cookie for a PCI BAR */
+void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
+{
+ struct zpci_dev *zdev = get_zdev(pdev);
+ u64 addr;
+ int idx;
+
+ if ((bar & 7) != bar)
+ return NULL;
+
+ idx = zdev->bars[bar].map_idx;
+ spin_lock(&zpci_iomap_lock);
+ zpci_iomap_start[idx].fh = zdev->fh;
+ zpci_iomap_start[idx].bar = bar;
+ spin_unlock(&zpci_iomap_lock);
+
+ addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
+ return (void __iomem *) addr;
+}
+EXPORT_SYMBOL_GPL(pci_iomap);
+
+void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ unsigned int idx;
+
+ idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
+ spin_lock(&zpci_iomap_lock);
+ zpci_iomap_start[idx].fh = 0;
+ zpci_iomap_start[idx].bar = 0;
+ spin_unlock(&zpci_iomap_lock);
+}
+EXPORT_SYMBOL_GPL(pci_iounmap);
+
+static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ struct zpci_dev *zdev = get_zdev_by_bus(bus);
+
+ if (!zdev || devfn != ZPCI_DEVFN)
+ return 0;
+ return zpci_cfg_load(zdev, where, val, size);
+}
+
+static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
+{
+ struct zpci_dev *zdev = get_zdev_by_bus(bus);
+
+ if (!zdev || devfn != ZPCI_DEVFN)
+ return 0;
+ return zpci_cfg_store(zdev, where, val, size);
+}
+
+static struct pci_ops pci_root_ops = {
+ .read = pci_read,
+ .write = pci_write,
+};
+
+/* store the last handled bit to implement fair scheduling of devices */
+static DEFINE_PER_CPU(unsigned long, next_sbit);
+
+static void zpci_irq_handler(void *dont, void *need)
+{
+ unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit);
+ int rescan = 0, max = aisb_max;
+ struct zdev_irq_map *imap;
+
+ kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++;
+ sbit = start;
+
+scan:
+ /* find summary_bit */
+ for_each_set_bit_left_cont(sbit, bucket->aisb, max) {
+ clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6));
+ last = sbit;
+
+ /* find vector bit */
+ imap = bucket->imap[sbit];
+ for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
+ kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++;
+ clear_bit(63 - mbit, &imap->aibv);
+
+ spin_lock(&imap->lock);
+ if (imap->cb[mbit].handler)
+ imap->cb[mbit].handler(mbit,
+ imap->cb[mbit].data);
+ spin_unlock(&imap->lock);
+ }
+ }
+
+ if (rescan)
+ goto out;
+
+ /* scan the skipped bits */
+ if (start > 0) {
+ sbit = 0;
+ max = start;
+ start = 0;
+ goto scan;
+ }
+
+ /* enable interrupts again */
+ sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+
+ /* check again to not lose initiative */
+ rmb();
+ max = aisb_max;
+ sbit = find_first_bit_left(bucket->aisb, max);
+ if (sbit != max) {
+ atomic_inc(&irq_retries);
+ rescan++;
+ goto scan;
+ }
+out:
+ /* store next device bit to scan */
+ __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last;
+}
+
+/* msi_vecs - number of requested interrupts, 0 place function to error state */
+static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs)
+{
+ struct zpci_dev *zdev = get_zdev(pdev);
+ unsigned int aisb, msi_nr;
+ struct msi_desc *msi;
+ int rc;
+
+ /* store the number of used MSI vectors */
+ zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS);
+
+ spin_lock(&bucket->lock);
+ aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE);
+ /* alloc map exhausted? */
+ if (aisb == PAGE_SIZE) {
+ spin_unlock(&bucket->lock);
+ return -EIO;
+ }
+ set_bit(aisb, bucket->alloc);
+ spin_unlock(&bucket->lock);
+
+ zdev->aisb = aisb;
+ if (aisb + 1 > aisb_max)
+ aisb_max = aisb + 1;
+
+ /* wire up IRQ shortcut pointer */
+ bucket->imap[zdev->aisb] = zdev->irq_map;
+ pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map);
+
+ /* TODO: irq number 0 wont be found if we return less than requested MSIs.
+ * ignore it for now and fix in common code.
+ */
+ msi_nr = aisb << ZPCI_MSI_VEC_BITS;
+
+ list_for_each_entry(msi, &pdev->msi_list, list) {
+ rc = zpci_setup_msi_irq(zdev, msi, msi_nr,
+ aisb << ZPCI_MSI_VEC_BITS);
+ if (rc)
+ return rc;
+ msi_nr++;
+ }
+
+ rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv);
+ if (rc) {
+ clear_bit(aisb, bucket->alloc);
+ dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
+ return rc;
+ }
+ return (zdev->irq_map->msi_vecs == msi_vecs) ?
+ 0 : zdev->irq_map->msi_vecs;
+}
+
+static void zpci_teardown_msi(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = get_zdev(pdev);
+ struct msi_desc *msi;
+ int aisb, rc;
+
+ rc = zpci_unregister_airq(zdev);
+ if (rc) {
+ dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
+ return;
+ }
+
+ msi = list_first_entry(&pdev->msi_list, struct msi_desc, list);
+ aisb = irq_to_dev_nr(msi->irq);
+
+ list_for_each_entry(msi, &pdev->msi_list, list)
+ zpci_teardown_msi_irq(zdev, msi);
+
+ clear_bit(aisb, bucket->alloc);
+ if (aisb + 1 == aisb_max)
+ aisb_max--;
+}
+
+int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
+ if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
+ return -EINVAL;
+ return zpci_setup_msi(pdev, nvec);
+}
+
+void arch_teardown_msi_irqs(struct pci_dev *pdev)
+{
+ pr_info("%s: on pdev: %p\n", __func__, pdev);
+ zpci_teardown_msi(pdev);
+}
+
+static void zpci_map_resources(struct zpci_dev *zdev)
+{
+ struct pci_dev *pdev = zdev->pdev;
+ resource_size_t len;
+ int i;
+
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ continue;
+ pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
+ pdev->resource[i].end = pdev->resource[i].start + len - 1;
+ pr_debug("BAR%i: -> start: %Lx end: %Lx\n",
+ i, pdev->resource[i].start, pdev->resource[i].end);
+ }
+};
+
+static void zpci_unmap_resources(struct pci_dev *pdev)
+{
+ resource_size_t len;
+ int i;
+
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ continue;
+ pci_iounmap(pdev, (void *) pdev->resource[i].start);
+ }
+};
+
+struct zpci_dev *zpci_alloc_device(void)
+{
+ struct zpci_dev *zdev;
+
+ /* Alloc memory for our private pci device data */
+ zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
+ if (!zdev)
+ return ERR_PTR(-ENOMEM);
+
+ /* Alloc aibv & callback space */
+ zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL);
+ if (!zdev->irq_map)
+ goto error;
+ WARN_ON((u64) zdev->irq_map & 0xff);
+ return zdev;
+
+error:
+ kfree(zdev);
+ return ERR_PTR(-ENOMEM);
+}
+
+void zpci_free_device(struct zpci_dev *zdev)
+{
+ kmem_cache_free(zdev_irq_cache, zdev->irq_map);
+ kfree(zdev);
+}
+
+/* Called on removal of pci_dev, leaves zpci and bus device */
+static void zpci_remove_device(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = get_zdev(pdev);
+
+ dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
+ zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ zpci_dma_exit_device(zdev);
+ zpci_sysfs_remove_device(&pdev->dev);
+ zpci_unmap_resources(pdev);
+ list_del(&zdev->entry); /* can be called from init */
+ zdev->pdev = NULL;
+}
+
+static void zpci_scan_devices(void)
+{
+ struct zpci_dev *zdev;
+
+ mutex_lock(&zpci_list_lock);
+ list_for_each_entry(zdev, &zpci_list, entry)
+ if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
+ zpci_scan_device(zdev);
+ mutex_unlock(&zpci_list_lock);
+}
+
+/*
+ * Too late for any s390 specific setup, since interrupts must be set up
+ * already which requires DMA setup too and the pci scan will access the
+ * config space, which only works if the function handle is enabled.
+ */
+int pcibios_enable_device(struct pci_dev *pdev, int mask)
+{
+ struct resource *res;
+ u16 cmd;
+ int i;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ res = &pdev->resource[i];
+
+ if (res->flags & IORESOURCE_IO)
+ return -EINVAL;
+
+ if (res->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ return 0;
+}
+
+void pcibios_disable_device(struct pci_dev *pdev)
+{
+ zpci_remove_device(pdev);
+ pdev->sysdata = NULL;
+}
+
+int pcibios_add_platform_entries(struct pci_dev *pdev)
+{
+ return zpci_sysfs_add_device(&pdev->dev);
+}
+
+int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data)
+{
+ int msi_nr = irq_to_msi_nr(irq);
+ struct zdev_irq_map *imap;
+ struct msi_desc *msi;
+
+ msi = irq_get_msi_desc(irq);
+ if (!msi)
+ return -EIO;
+
+ imap = get_imap(irq);
+ spin_lock_init(&imap->lock);
+
+ pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr);
+ imap->cb[msi_nr].handler = handler;
+ imap->cb[msi_nr].data = data;
+
+ /*
+ * The generic MSI code returns with the interrupt disabled on the
+ * card, using the MSI mask bits. Firmware doesn't appear to unmask
+ * at that level, so we do it here by hand.
+ */
+ zpci_msi_set_mask_bits(msi, 1, 0);
+ return 0;
+}
+
+void zpci_free_irq(unsigned int irq)
+{
+ struct zdev_irq_map *imap = get_imap(irq);
+ int msi_nr = irq_to_msi_nr(irq);
+ unsigned long flags;
+
+ pr_debug("%s: for irq: %d\n", __func__, irq);
+
+ spin_lock_irqsave(&imap->lock, flags);
+ imap->cb[msi_nr].handler = NULL;
+ imap->cb[msi_nr].data = NULL;
+ spin_unlock_irqrestore(&imap->lock, flags);
+}
+
+int request_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long irqflags, const char *devname, void *dev_id)
+{
+ pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n",
+ __func__, irq, handler, irqflags, devname);
+
+ return zpci_request_irq(irq, handler, dev_id);
+}
+EXPORT_SYMBOL_GPL(request_irq);
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ zpci_free_irq(irq);
+}
+EXPORT_SYMBOL_GPL(free_irq);
+
+static int __init zpci_irq_init(void)
+{
+ int cpu, rc;
+
+ bucket = kzalloc(sizeof(*bucket), GFP_KERNEL);
+ if (!bucket)
+ return -ENOMEM;
+
+ bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL);
+ if (!bucket->aisb) {
+ rc = -ENOMEM;
+ goto out_aisb;
+ }
+
+ bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL);
+ if (!bucket->alloc) {
+ rc = -ENOMEM;
+ goto out_alloc;
+ }
+
+ isc_register(PCI_ISC);
+ zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC);
+ if (IS_ERR(zpci_irq_si)) {
+ rc = PTR_ERR(zpci_irq_si);
+ zpci_irq_si = NULL;
+ goto out_ai;
+ }
+
+ for_each_online_cpu(cpu)
+ per_cpu(next_sbit, cpu) = 0;
+
+ spin_lock_init(&bucket->lock);
+ /* set summary to 1 to be called every time for the ISC */
+ *zpci_irq_si = 1;
+ sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ return 0;
+
+out_ai:
+ isc_unregister(PCI_ISC);
+ free_page((unsigned long) bucket->alloc);
+out_alloc:
+ free_page((unsigned long) bucket->aisb);
+out_aisb:
+ kfree(bucket);
+ return rc;
+}
+
+static void zpci_irq_exit(void)
+{
+ free_page((unsigned long) bucket->alloc);
+ free_page((unsigned long) bucket->aisb);
+ s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC);
+ isc_unregister(PCI_ISC);
+ kfree(bucket);
+}
+
+static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
+ unsigned long flags, int domain)
+{
+ struct resource *r;
+ char *name;
+ int rc;
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return ERR_PTR(-ENOMEM);
+ r->start = start;
+ r->end = r->start + size - 1;
+ r->flags = flags;
+ r->parent = &iomem_resource;
+ name = kmalloc(18, GFP_KERNEL);
+ if (!name) {
+ kfree(r);
+ return ERR_PTR(-ENOMEM);
+ }
+ sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR);
+ r->name = name;
+
+ rc = request_resource(&iomem_resource, r);
+ if (rc)
+ pr_debug("request resource %pR failed\n", r);
+ return r;
+}
+
+static int zpci_alloc_iomap(struct zpci_dev *zdev)
+{
+ int entry;
+
+ spin_lock(&zpci_iomap_lock);
+ entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
+ if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
+ spin_unlock(&zpci_iomap_lock);
+ return -ENOSPC;
+ }
+ set_bit(entry, zpci_iomap);
+ spin_unlock(&zpci_iomap_lock);
+ return entry;
+}
+
+static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
+{
+ spin_lock(&zpci_iomap_lock);
+ memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
+ clear_bit(entry, zpci_iomap);
+ spin_unlock(&zpci_iomap_lock);
+}
+
+static int zpci_create_device_bus(struct zpci_dev *zdev)
+{
+ struct resource *res;
+ LIST_HEAD(resources);
+ int i;
+
+ /* allocate mapping entry for each used bar */
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ unsigned long addr, size, flags;
+ int entry;
+
+ if (!zdev->bars[i].size)
+ continue;
+ entry = zpci_alloc_iomap(zdev);
+ if (entry < 0)
+ return entry;
+ zdev->bars[i].map_idx = entry;
+
+ /* only MMIO is supported */
+ flags = IORESOURCE_MEM;
+ if (zdev->bars[i].val & 8)
+ flags |= IORESOURCE_PREFETCH;
+ if (zdev->bars[i].val & 4)
+ flags |= IORESOURCE_MEM_64;
+
+ addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
+
+ size = 1UL << zdev->bars[i].size;
+
+ res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain);
+ if (IS_ERR(res)) {
+ zpci_free_iomap(zdev, entry);
+ return PTR_ERR(res);
+ }
+ pci_add_resource(&resources, res);
+ }
+
+ zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
+ zdev, &resources);
+ if (!zdev->bus)
+ return -EIO;
+
+ zdev->bus->max_bus_speed = zdev->max_bus_speed;
+ return 0;
+}
+
+static int zpci_alloc_domain(struct zpci_dev *zdev)
+{
+ spin_lock(&zpci_domain_lock);
+ zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
+ if (zdev->domain == ZPCI_NR_DEVICES) {
+ spin_unlock(&zpci_domain_lock);
+ return -ENOSPC;
+ }
+ set_bit(zdev->domain, zpci_domain);
+ spin_unlock(&zpci_domain_lock);
+ return 0;
+}
+
+static void zpci_free_domain(struct zpci_dev *zdev)
+{
+ spin_lock(&zpci_domain_lock);
+ clear_bit(zdev->domain, zpci_domain);
+ spin_unlock(&zpci_domain_lock);
+}
+
+int zpci_enable_device(struct zpci_dev *zdev)
+{
+ int rc;
+
+ rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
+ if (rc)
+ goto out;
+ pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
+
+ rc = zpci_dma_init_device(zdev);
+ if (rc)
+ goto out_dma;
+ return 0;
+
+out_dma:
+ clp_disable_fh(zdev);
+out:
+ return rc;
+}
+EXPORT_SYMBOL_GPL(zpci_enable_device);
+
+int zpci_create_device(struct zpci_dev *zdev)
+{
+ int rc;
+
+ rc = zpci_alloc_domain(zdev);
+ if (rc)
+ goto out;
+
+ rc = zpci_create_device_bus(zdev);
+ if (rc)
+ goto out_bus;
+
+ mutex_lock(&zpci_list_lock);
+ list_add_tail(&zdev->entry, &zpci_list);
+ if (hotplug_ops.create_slot)
+ hotplug_ops.create_slot(zdev);
+ mutex_unlock(&zpci_list_lock);
+
+ if (zdev->state == ZPCI_FN_STATE_STANDBY)
+ return 0;
+
+ rc = zpci_enable_device(zdev);
+ if (rc)
+ goto out_start;
+ return 0;
+
+out_start:
+ mutex_lock(&zpci_list_lock);
+ list_del(&zdev->entry);
+ if (hotplug_ops.remove_slot)
+ hotplug_ops.remove_slot(zdev);
+ mutex_unlock(&zpci_list_lock);
+out_bus:
+ zpci_free_domain(zdev);
+out:
+ return rc;
+}
+
+void zpci_stop_device(struct zpci_dev *zdev)
+{
+ zpci_dma_exit_device(zdev);
+ /*
+ * Note: SCLP disables fh via set-pci-fn so don't
+ * do that here.
+ */
+}
+EXPORT_SYMBOL_GPL(zpci_stop_device);
+
+int zpci_scan_device(struct zpci_dev *zdev)
+{
+ zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN);
+ if (!zdev->pdev) {
+ pr_err("pci_scan_single_device failed for fid: 0x%x\n",
+ zdev->fid);
+ goto out;
+ }
+
+ zpci_map_resources(zdev);
+ pci_bus_add_devices(zdev->bus);
+
+ /* now that pdev was added to the bus mark it as used */
+ zdev->state = ZPCI_FN_STATE_ONLINE;
+ return 0;
+
+out:
+ zpci_dma_exit_device(zdev);
+ clp_disable_fh(zdev);
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(zpci_scan_device);
+
+static inline int barsize(u8 size)
+{
+ return (size) ? (1 << size) >> 10 : 0;
+}
+
+static int zpci_mem_init(void)
+{
+ zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map),
+ L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL);
+ if (!zdev_irq_cache)
+ goto error_zdev;
+
+ /* TODO: use realloc */
+ zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
+ GFP_KERNEL);
+ if (!zpci_iomap_start)
+ goto error_iomap;
+ return 0;
+
+error_iomap:
+ kmem_cache_destroy(zdev_irq_cache);
+error_zdev:
+ return -ENOMEM;
+}
+
+static void zpci_mem_exit(void)
+{
+ kfree(zpci_iomap_start);
+ kmem_cache_destroy(zdev_irq_cache);
+}
+
+unsigned int pci_probe = 1;
+EXPORT_SYMBOL_GPL(pci_probe);
+
+char * __init pcibios_setup(char *str)
+{
+ if (!strcmp(str, "off")) {
+ pci_probe = 0;
+ return NULL;
+ }
+ return str;
+}
+
+static int __init pci_base_init(void)
+{
+ int rc;
+
+ if (!pci_probe)
+ return 0;
+
+ if (!test_facility(2) || !test_facility(69)
+ || !test_facility(71) || !test_facility(72))
+ return 0;
+
+ pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n",
+ test_facility(69), test_facility(70),
+ test_facility(71));
+
+ rc = zpci_mem_init();
+ if (rc)
+ goto out_mem;
+
+ rc = zpci_msihash_init();
+ if (rc)
+ goto out_hash;
+
+ rc = zpci_irq_init();
+ if (rc)
+ goto out_irq;
+
+ rc = zpci_dma_init();
+ if (rc)
+ goto out_dma;
+
+ rc = clp_find_pci_devices();
+ if (rc)
+ goto out_find;
+
+ zpci_scan_devices();
+ return 0;
+
+out_find:
+ zpci_dma_exit();
+out_dma:
+ zpci_irq_exit();
+out_irq:
+ zpci_msihash_exit();
+out_hash:
+ zpci_mem_exit();
+out_mem:
+ return rc;
+}
+subsys_initcall(pci_base_init);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
new file mode 100644
index 000000000000..7f4ce8d874a4
--- /dev/null
+++ b/arch/s390/pci/pci_clp.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define COMPONENT "zPCI"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/pci_clp.h>
+
+/*
+ * Call Logical Processor
+ * Retry logic is handled by the caller.
+ */
+static inline u8 clp_instr(void *req)
+{
+ u64 ilpm;
+ u8 cc;
+
+ asm volatile (
+ " .insn rrf,0xb9a00000,%[ilpm],%[req],0x0,0x2\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [ilpm] "=d" (ilpm)
+ : [req] "a" (req)
+ : "cc", "memory");
+ return cc;
+}
+
+static void *clp_alloc_block(void)
+{
+ struct page *page = alloc_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE));
+ return (page) ? page_address(page) : NULL;
+}
+
+static void clp_free_block(void *ptr)
+{
+ free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
+}
+
+static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
+ struct clp_rsp_query_pci_grp *response)
+{
+ zdev->tlb_refresh = response->refresh;
+ zdev->dma_mask = response->dasm;
+ zdev->msi_addr = response->msia;
+
+ pr_debug("Supported number of MSI vectors: %u\n", response->noi);
+ switch (response->version) {
+ case 1:
+ zdev->max_bus_speed = PCIE_SPEED_5_0GT;
+ break;
+ default:
+ zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
+ break;
+ }
+}
+
+static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
+{
+ struct clp_req_rsp_query_pci_grp *rrb;
+ int rc;
+
+ rrb = clp_alloc_block();
+ if (!rrb)
+ return -ENOMEM;
+
+ memset(rrb, 0, sizeof(*rrb));
+ rrb->request.hdr.len = sizeof(rrb->request);
+ rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
+ rrb->response.hdr.len = sizeof(rrb->response);
+ rrb->request.pfgid = pfgid;
+
+ rc = clp_instr(rrb);
+ if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
+ clp_store_query_pci_fngrp(zdev, &rrb->response);
+ else {
+ pr_err("Query PCI FNGRP failed with response: %x cc: %d\n",
+ rrb->response.hdr.rsp, rc);
+ rc = -EIO;
+ }
+ clp_free_block(rrb);
+ return rc;
+}
+
+static int clp_store_query_pci_fn(struct zpci_dev *zdev,
+ struct clp_rsp_query_pci *response)
+{
+ int i;
+
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ zdev->bars[i].val = le32_to_cpu(response->bar[i]);
+ zdev->bars[i].size = response->bar_size[i];
+ }
+ zdev->start_dma = response->sdma;
+ zdev->end_dma = response->edma;
+ zdev->pchid = response->pchid;
+ zdev->pfgid = response->pfgid;
+ return 0;
+}
+
+static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
+{
+ struct clp_req_rsp_query_pci *rrb;
+ int rc;
+
+ rrb = clp_alloc_block();
+ if (!rrb)
+ return -ENOMEM;
+
+ memset(rrb, 0, sizeof(*rrb));
+ rrb->request.hdr.len = sizeof(rrb->request);
+ rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
+ rrb->response.hdr.len = sizeof(rrb->response);
+ rrb->request.fh = fh;
+
+ rc = clp_instr(rrb);
+ if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
+ rc = clp_store_query_pci_fn(zdev, &rrb->response);
+ if (rc)
+ goto out;
+ if (rrb->response.pfgid)
+ rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
+ } else {
+ pr_err("Query PCI failed with response: %x cc: %d\n",
+ rrb->response.hdr.rsp, rc);
+ rc = -EIO;
+ }
+out:
+ clp_free_block(rrb);
+ return rc;
+}
+
+int clp_add_pci_device(u32 fid, u32 fh, int configured)
+{
+ struct zpci_dev *zdev;
+ int rc;
+
+ zdev = zpci_alloc_device();
+ if (IS_ERR(zdev))
+ return PTR_ERR(zdev);
+
+ zdev->fh = fh;
+ zdev->fid = fid;
+
+ /* Query function properties and update zdev */
+ rc = clp_query_pci_fn(zdev, fh);
+ if (rc)
+ goto error;
+
+ if (configured)
+ zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ else
+ zdev->state = ZPCI_FN_STATE_STANDBY;
+
+ rc = zpci_create_device(zdev);
+ if (rc)
+ goto error;
+ return 0;
+
+error:
+ zpci_free_device(zdev);
+ return rc;
+}
+
+/*
+ * Enable/Disable a given PCI function defined by its function handle.
+ */
+static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
+{
+ struct clp_req_rsp_set_pci *rrb;
+ int rc, retries = 1000;
+
+ rrb = clp_alloc_block();
+ if (!rrb)
+ return -ENOMEM;
+
+ do {
+ memset(rrb, 0, sizeof(*rrb));
+ rrb->request.hdr.len = sizeof(rrb->request);
+ rrb->request.hdr.cmd = CLP_SET_PCI_FN;
+ rrb->response.hdr.len = sizeof(rrb->response);
+ rrb->request.fh = *fh;
+ rrb->request.oc = command;
+ rrb->request.ndas = nr_dma_as;
+
+ rc = clp_instr(rrb);
+ if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
+ retries--;
+ if (retries < 0)
+ break;
+ msleep(1);
+ }
+ } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
+
+ if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
+ *fh = rrb->response.fh;
+ else {
+ pr_err("Set PCI FN failed with response: %x cc: %d\n",
+ rrb->response.hdr.rsp, rc);
+ rc = -EIO;
+ }
+ clp_free_block(rrb);
+ return rc;
+}
+
+int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
+{
+ u32 fh = zdev->fh;
+ int rc;
+
+ rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
+ if (!rc)
+ /* Success -> store enabled handle in zdev */
+ zdev->fh = fh;
+ return rc;
+}
+
+int clp_disable_fh(struct zpci_dev *zdev)
+{
+ u32 fh = zdev->fh;
+ int rc;
+
+ if (!zdev_enabled(zdev))
+ return 0;
+
+ dev_info(&zdev->pdev->dev, "disabling fn handle: 0x%x\n", fh);
+ rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
+ if (!rc)
+ /* Success -> store disabled handle in zdev */
+ zdev->fh = fh;
+ else
+ dev_err(&zdev->pdev->dev,
+ "Failed to disable fn handle: 0x%x\n", fh);
+ return rc;
+}
+
+static void clp_check_pcifn_entry(struct clp_fh_list_entry *entry)
+{
+ int present, rc;
+
+ if (!entry->vendor_id)
+ return;
+
+ /* TODO: be a little bit more scalable */
+ present = zpci_fid_present(entry->fid);
+
+ if (present)
+ pr_debug("%s: device %x already present\n", __func__, entry->fid);
+
+ /* skip already used functions */
+ if (present && entry->config_state)
+ return;
+
+ /* aev 306: function moved to stand-by state */
+ if (present && !entry->config_state) {
+ /*
+ * The handle is already disabled, that means no iota/irq freeing via
+ * the firmware interfaces anymore. Need to free resources manually
+ * (DMA memory, debug, sysfs)...
+ */
+ zpci_stop_device(get_zdev_by_fid(entry->fid));
+ return;
+ }
+
+ rc = clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
+ if (rc)
+ pr_err("Failed to add fid: 0x%x\n", entry->fid);
+}
+
+int clp_find_pci_devices(void)
+{
+ struct clp_req_rsp_list_pci *rrb;
+ u64 resume_token = 0;
+ int entries, i, rc;
+
+ rrb = clp_alloc_block();
+ if (!rrb)
+ return -ENOMEM;
+
+ do {
+ memset(rrb, 0, sizeof(*rrb));
+ rrb->request.hdr.len = sizeof(rrb->request);
+ rrb->request.hdr.cmd = CLP_LIST_PCI;
+ /* store as many entries as possible */
+ rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
+ rrb->request.resume_token = resume_token;
+
+ /* Get PCI function handle list */
+ rc = clp_instr(rrb);
+ if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
+ pr_err("List PCI failed with response: 0x%x cc: %d\n",
+ rrb->response.hdr.rsp, rc);
+ rc = -EIO;
+ goto out;
+ }
+
+ WARN_ON_ONCE(rrb->response.entry_size !=
+ sizeof(struct clp_fh_list_entry));
+
+ entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
+ rrb->response.entry_size;
+ pr_info("Detected number of PCI functions: %u\n", entries);
+
+ /* Store the returned resume token as input for the next call */
+ resume_token = rrb->response.resume_token;
+
+ for (i = 0; i < entries; i++)
+ clp_check_pcifn_entry(&rrb->response.fh_list[i]);
+ } while (resume_token);
+
+ pr_debug("Maximum number of supported PCI functions: %u\n",
+ rrb->response.max_fn);
+out:
+ clp_free_block(rrb);
+ return rc;
+}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
new file mode 100644
index 000000000000..c64b4b294b0a
--- /dev/null
+++ b/arch/s390/pci/pci_dma.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/iommu-helper.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <asm/pci_dma.h>
+
+static enum zpci_ioat_dtype zpci_ioat_dt = ZPCI_IOTA_RTTO;
+
+static struct kmem_cache *dma_region_table_cache;
+static struct kmem_cache *dma_page_table_cache;
+
+static unsigned long *dma_alloc_cpu_table(void)
+{
+ unsigned long *table, *entry;
+
+ table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
+ if (!table)
+ return NULL;
+
+ for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
+ *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
+ return table;
+}
+
+static void dma_free_cpu_table(void *table)
+{
+ kmem_cache_free(dma_region_table_cache, table);
+}
+
+static unsigned long *dma_alloc_page_table(void)
+{
+ unsigned long *table, *entry;
+
+ table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
+ if (!table)
+ return NULL;
+
+ for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
+ *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
+ return table;
+}
+
+static void dma_free_page_table(void *table)
+{
+ kmem_cache_free(dma_page_table_cache, table);
+}
+
+static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
+{
+ unsigned long *sto;
+
+ if (reg_entry_isvalid(*entry))
+ sto = get_rt_sto(*entry);
+ else {
+ sto = dma_alloc_cpu_table();
+ if (!sto)
+ return NULL;
+
+ set_rt_sto(entry, sto);
+ validate_rt_entry(entry);
+ entry_clr_protected(entry);
+ }
+ return sto;
+}
+
+static unsigned long *dma_get_page_table_origin(unsigned long *entry)
+{
+ unsigned long *pto;
+
+ if (reg_entry_isvalid(*entry))
+ pto = get_st_pto(*entry);
+ else {
+ pto = dma_alloc_page_table();
+ if (!pto)
+ return NULL;
+ set_st_pto(entry, pto);
+ validate_st_entry(entry);
+ entry_clr_protected(entry);
+ }
+ return pto;
+}
+
+static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
+{
+ unsigned long *sto, *pto;
+ unsigned int rtx, sx, px;
+
+ rtx = calc_rtx(dma_addr);
+ sto = dma_get_seg_table_origin(&rto[rtx]);
+ if (!sto)
+ return NULL;
+
+ sx = calc_sx(dma_addr);
+ pto = dma_get_page_table_origin(&sto[sx]);
+ if (!pto)
+ return NULL;
+
+ px = calc_px(dma_addr);
+ return &pto[px];
+}
+
+static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
+ dma_addr_t dma_addr, int flags)
+{
+ unsigned long *entry;
+
+ entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
+ if (!entry) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ if (flags & ZPCI_PTE_INVALID) {
+ invalidate_pt_entry(entry);
+ return;
+ } else {
+ set_pt_pfaa(entry, page_addr);
+ validate_pt_entry(entry);
+ }
+
+ if (flags & ZPCI_TABLE_PROTECTED)
+ entry_set_protected(entry);
+ else
+ entry_clr_protected(entry);
+}
+
+static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
+ dma_addr_t dma_addr, size_t size, int flags)
+{
+ unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ u8 *page_addr = (u8 *) (pa & PAGE_MASK);
+ dma_addr_t start_dma_addr = dma_addr;
+ unsigned long irq_flags;
+ int i, rc = 0;
+
+ if (!nr_pages)
+ return -EINVAL;
+
+ spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
+ if (!zdev->dma_table) {
+ dev_err(&zdev->pdev->dev, "Missing DMA table\n");
+ goto no_refresh;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
+ page_addr += PAGE_SIZE;
+ dma_addr += PAGE_SIZE;
+ }
+
+ /*
+ * rpcit is not required to establish new translations when previously
+ * invalid translation-table entries are validated, however it is
+ * required when altering previously valid entries.
+ */
+ if (!zdev->tlb_refresh &&
+ ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
+ /*
+ * TODO: also need to check that the old entry is indeed INVALID
+ * and not only for one page but for the whole range...
+ * -> now we WARN_ON in that case but with lazy unmap that
+ * needs to be redone!
+ */
+ goto no_refresh;
+ rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr,
+ nr_pages * PAGE_SIZE);
+
+no_refresh:
+ spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
+ return rc;
+}
+
+static void dma_free_seg_table(unsigned long entry)
+{
+ unsigned long *sto = get_rt_sto(entry);
+ int sx;
+
+ for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
+ if (reg_entry_isvalid(sto[sx]))
+ dma_free_page_table(get_st_pto(sto[sx]));
+
+ dma_free_cpu_table(sto);
+}
+
+static void dma_cleanup_tables(struct zpci_dev *zdev)
+{
+ unsigned long *table;
+ int rtx;
+
+ if (!zdev || !zdev->dma_table)
+ return;
+
+ table = zdev->dma_table;
+ for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+ if (reg_entry_isvalid(table[rtx]))
+ dma_free_seg_table(table[rtx]);
+
+ dma_free_cpu_table(table);
+ zdev->dma_table = NULL;
+}
+
+static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
+ int size)
+{
+ unsigned long boundary_size = 0x1000000;
+
+ return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
+ start, size, 0, boundary_size, 0);
+}
+
+static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
+{
+ unsigned long offset, flags;
+
+ spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
+ offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
+ if (offset == -1)
+ offset = __dma_alloc_iommu(zdev, 0, size);
+
+ if (offset != -1) {
+ zdev->next_bit = offset + size;
+ if (zdev->next_bit >= zdev->iommu_pages)
+ zdev->next_bit = 0;
+ }
+ spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
+ return offset;
+}
+
+static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
+ if (!zdev->iommu_bitmap)
+ goto out;
+ bitmap_clear(zdev->iommu_bitmap, offset, size);
+ if (offset >= zdev->next_bit)
+ zdev->next_bit = offset + size;
+out:
+ spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
+}
+
+int dma_set_mask(struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_set_mask);
+
+static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+ unsigned long nr_pages, iommu_page_index;
+ unsigned long pa = page_to_phys(page) + offset;
+ int flags = ZPCI_PTE_VALID;
+ dma_addr_t dma_addr;
+
+ WARN_ON_ONCE(offset > PAGE_SIZE);
+
+ /* This rounds up number of pages based on size and offset */
+ nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
+ iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
+ if (iommu_page_index == -1)
+ goto out_err;
+
+ /* Use rounded up size */
+ size = nr_pages * PAGE_SIZE;
+
+ dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
+ if (dma_addr + size > zdev->end_dma) {
+ dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
+ dma_addr, size, zdev->end_dma);
+ goto out_free;
+ }
+
+ if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
+ flags |= ZPCI_TABLE_PROTECTED;
+
+ if (!dma_update_trans(zdev, pa, dma_addr, size, flags))
+ return dma_addr + offset;
+
+out_free:
+ dma_free_iommu(zdev, iommu_page_index, nr_pages);
+out_err:
+ dev_err(dev, "Failed to map addr: %lx\n", pa);
+ return DMA_ERROR_CODE;
+}
+
+static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+ unsigned long iommu_page_index;
+ int npages;
+
+ npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
+ dma_addr = dma_addr & PAGE_MASK;
+ if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
+ ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
+ dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
+
+ iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
+ dma_free_iommu(zdev, iommu_page_index, npages);
+}
+
+static void *s390_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ struct page *page;
+ unsigned long pa;
+ dma_addr_t map;
+
+ size = PAGE_ALIGN(size);
+ page = alloc_pages(flag, get_order(size));
+ if (!page)
+ return NULL;
+ pa = page_to_phys(page);
+ memset((void *) pa, 0, size);
+
+ map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
+ size, DMA_BIDIRECTIONAL, NULL);
+ if (dma_mapping_error(dev, map)) {
+ free_pages(pa, get_order(size));
+ return NULL;
+ }
+
+ if (dma_handle)
+ *dma_handle = map;
+ return (void *) pa;
+}
+
+static void s390_dma_free(struct device *dev, size_t size,
+ void *pa, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size),
+ DMA_BIDIRECTIONAL, NULL);
+ free_pages((unsigned long) pa, get_order(size));
+}
+
+static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nr_elements, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ int mapped_elements = 0;
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nr_elements, i) {
+ struct page *page = sg_page(s);
+ s->dma_address = s390_dma_map_pages(dev, page, s->offset,
+ s->length, dir, NULL);
+ if (!dma_mapping_error(dev, s->dma_address)) {
+ s->dma_length = s->length;
+ mapped_elements++;
+ } else
+ goto unmap;
+ }
+out:
+ return mapped_elements;
+
+unmap:
+ for_each_sg(sg, s, mapped_elements, i) {
+ if (s->dma_address)
+ s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
+ dir, NULL);
+ s->dma_address = 0;
+ s->dma_length = 0;
+ }
+ mapped_elements = 0;
+ goto out;
+}
+
+static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nr_elements, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nr_elements, i) {
+ s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
+ s->dma_address = 0;
+ s->dma_length = 0;
+ }
+}
+
+int zpci_dma_init_device(struct zpci_dev *zdev)
+{
+ unsigned int bitmap_order;
+ int rc;
+
+ spin_lock_init(&zdev->iommu_bitmap_lock);
+ spin_lock_init(&zdev->dma_table_lock);
+
+ zdev->dma_table = dma_alloc_cpu_table();
+ if (!zdev->dma_table) {
+ rc = -ENOMEM;
+ goto out_clean;
+ }
+
+ zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
+ zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
+ bitmap_order = get_order(zdev->iommu_pages / 8);
+ pr_info("iommu_size: 0x%lx iommu_pages: 0x%lx bitmap_order: %i\n",
+ zdev->iommu_size, zdev->iommu_pages, bitmap_order);
+
+ zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ bitmap_order);
+ if (!zdev->iommu_bitmap) {
+ rc = -ENOMEM;
+ goto out_reg;
+ }
+
+ rc = zpci_register_ioat(zdev,
+ 0,
+ zdev->start_dma + PAGE_OFFSET,
+ zdev->start_dma + zdev->iommu_size - 1,
+ (u64) zdev->dma_table);
+ if (rc)
+ goto out_reg;
+ return 0;
+
+out_reg:
+ dma_free_cpu_table(zdev->dma_table);
+out_clean:
+ return rc;
+}
+
+void zpci_dma_exit_device(struct zpci_dev *zdev)
+{
+ zpci_unregister_ioat(zdev, 0);
+ dma_cleanup_tables(zdev);
+ free_pages((unsigned long) zdev->iommu_bitmap,
+ get_order(zdev->iommu_pages / 8));
+ zdev->iommu_bitmap = NULL;
+ zdev->next_bit = 0;
+}
+
+static int __init dma_alloc_cpu_table_caches(void)
+{
+ dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
+ ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
+ 0, NULL);
+ if (!dma_region_table_cache)
+ return -ENOMEM;
+
+ dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
+ ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
+ 0, NULL);
+ if (!dma_page_table_cache) {
+ kmem_cache_destroy(dma_region_table_cache);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int __init zpci_dma_init(void)
+{
+ return dma_alloc_cpu_table_caches();
+}
+
+void zpci_dma_exit(void)
+{
+ kmem_cache_destroy(dma_page_table_cache);
+ kmem_cache_destroy(dma_region_table_cache);
+}
+
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
+static int __init dma_debug_do_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+ return 0;
+}
+fs_initcall(dma_debug_do_init);
+
+struct dma_map_ops s390_dma_ops = {
+ .alloc = s390_dma_alloc,
+ .free = s390_dma_free,
+ .map_sg = s390_dma_map_sg,
+ .unmap_sg = s390_dma_unmap_sg,
+ .map_page = s390_dma_map_pages,
+ .unmap_page = s390_dma_unmap_pages,
+ /* if we support direct DMA this must be conditional */
+ .is_phys = 0,
+ /* dma_supported is unconditionally true without a callback */
+};
+EXPORT_SYMBOL_GPL(s390_dma_ops);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
new file mode 100644
index 000000000000..dbed8cd3370c
--- /dev/null
+++ b/arch/s390/pci/pci_event.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define COMPONENT "zPCI"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+/* Content Code Description for PCI Function Error */
+struct zpci_ccdf_err {
+ u32 reserved1;
+ u32 fh; /* function handle */
+ u32 fid; /* function id */
+ u32 ett : 4; /* expected table type */
+ u32 mvn : 12; /* MSI vector number */
+ u32 dmaas : 8; /* DMA address space */
+ u32 : 6;
+ u32 q : 1; /* event qualifier */
+ u32 rw : 1; /* read/write */
+ u64 faddr; /* failing address */
+ u32 reserved3;
+ u16 reserved4;
+ u16 pec; /* PCI event code */
+} __packed;
+
+/* Content Code Description for PCI Function Availability */
+struct zpci_ccdf_avail {
+ u32 reserved1;
+ u32 fh; /* function handle */
+ u32 fid; /* function id */
+ u32 reserved2;
+ u32 reserved3;
+ u32 reserved4;
+ u32 reserved5;
+ u16 reserved6;
+ u16 pec; /* PCI event code */
+} __packed;
+
+static void zpci_event_log_err(struct zpci_ccdf_err *ccdf)
+{
+ struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+
+ dev_err(&zdev->pdev->dev, "event code: 0x%x\n", ccdf->pec);
+}
+
+static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
+{
+ struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+
+ pr_err("%s%s: availability event: fh: 0x%x fid: 0x%x event code: 0x%x reason:",
+ (zdev) ? dev_driver_string(&zdev->pdev->dev) : "?",
+ (zdev) ? dev_name(&zdev->pdev->dev) : "?",
+ ccdf->fh, ccdf->fid, ccdf->pec);
+ print_hex_dump(KERN_CONT, "ccdf", DUMP_PREFIX_OFFSET,
+ 16, 1, ccdf, sizeof(*ccdf), false);
+
+ switch (ccdf->pec) {
+ case 0x0301:
+ zpci_enable_device(zdev);
+ break;
+ case 0x0302:
+ clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
+ break;
+ case 0x0306:
+ clp_find_pci_devices();
+ break;
+ default:
+ break;
+ }
+}
+
+void zpci_event_error(void *data)
+{
+ struct zpci_ccdf_err *ccdf = data;
+ struct zpci_dev *zdev;
+
+ zpci_event_log_err(ccdf);
+ zdev = get_zdev_by_fid(ccdf->fid);
+ if (!zdev) {
+ pr_err("Error event for unknown fid: %x", ccdf->fid);
+ return;
+ }
+}
+
+void zpci_event_availability(void *data)
+{
+ zpci_event_log_avail(data);
+}
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
new file mode 100644
index 000000000000..90fd3482b9e2
--- /dev/null
+++ b/arch/s390/pci/pci_msi.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define COMPONENT "zPCI"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/rculist.h>
+#include <linux/hash.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <asm/hw_irq.h>
+
+/* mapping of irq numbers to msi_desc */
+static struct hlist_head *msi_hash;
+static unsigned int msihash_shift = 6;
+#define msi_hashfn(nr) hash_long(nr, msihash_shift)
+
+static DEFINE_SPINLOCK(msi_map_lock);
+
+struct msi_desc *__irq_get_msi_desc(unsigned int irq)
+{
+ struct hlist_node *entry;
+ struct msi_map *map;
+
+ hlist_for_each_entry_rcu(map, entry,
+ &msi_hash[msi_hashfn(irq)], msi_chain)
+ if (map->irq == irq)
+ return map->msi;
+ return NULL;
+}
+
+int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
+{
+ if (msi->msi_attrib.is_msix) {
+ int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+ msi->masked = readl(msi->mask_base + offset);
+ writel(flag, msi->mask_base + offset);
+ } else {
+ if (msi->msi_attrib.maskbit) {
+ int pos;
+ u32 mask_bits;
+
+ pos = (long) msi->mask_base;
+ pci_read_config_dword(msi->dev, pos, &mask_bits);
+ mask_bits &= ~(mask);
+ mask_bits |= flag & mask;
+ pci_write_config_dword(msi->dev, pos, mask_bits);
+ } else {
+ return 0;
+ }
+ }
+
+ msi->msi_attrib.maskbit = !!flag;
+ return 1;
+}
+
+int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
+ unsigned int nr, int offset)
+{
+ struct msi_map *map;
+ struct msi_msg msg;
+ int rc;
+
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (map == NULL)
+ return -ENOMEM;
+
+ map->irq = nr;
+ map->msi = msi;
+ zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
+
+ pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
+ __func__, nr, msi_hashfn(nr));
+ hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]);
+
+ spin_lock(&msi_map_lock);
+ rc = irq_set_msi_desc(nr, msi);
+ if (rc) {
+ spin_unlock(&msi_map_lock);
+ hlist_del_rcu(&map->msi_chain);
+ kfree(map);
+ zdev->msi_map[nr & ZPCI_MSI_MASK] = NULL;
+ return rc;
+ }
+ spin_unlock(&msi_map_lock);
+
+ msg.data = nr - offset;
+ msg.address_lo = zdev->msi_addr & 0xffffffff;
+ msg.address_hi = zdev->msi_addr >> 32;
+ write_msi_msg(nr, &msg);
+ return 0;
+}
+
+void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi)
+{
+ int irq = msi->irq & ZPCI_MSI_MASK;
+ struct msi_map *map;
+
+ msi->msg.address_lo = 0;
+ msi->msg.address_hi = 0;
+ msi->msg.data = 0;
+ msi->irq = 0;
+ zpci_msi_set_mask_bits(msi, 1, 1);
+
+ spin_lock(&msi_map_lock);
+ map = zdev->msi_map[irq];
+ hlist_del_rcu(&map->msi_chain);
+ kfree(map);
+ zdev->msi_map[irq] = NULL;
+ spin_unlock(&msi_map_lock);
+}
+
+/*
+ * The msi hash table has 256 entries which is good for 4..20
+ * devices (a typical device allocates 10 + CPUs MSI's). Maybe make
+ * the hash table size adjustable later.
+ */
+int __init zpci_msihash_init(void)
+{
+ unsigned int i;
+
+ msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL);
+ if (!msi_hash)
+ return -ENOMEM;
+
+ for (i = 0; i < (1U << msihash_shift); i++)
+ INIT_HLIST_HEAD(&msi_hash[i]);
+ return 0;
+}
+
+void __init zpci_msihash_exit(void)
+{
+ kfree(msi_hash);
+}
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
new file mode 100644
index 000000000000..a42cce69d0a0
--- /dev/null
+++ b/arch/s390/pci/pci_sysfs.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define COMPONENT "zPCI"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/stat.h>
+#include <linux/pci.h>
+
+static ssize_t show_fid(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+
+ sprintf(buf, "0x%08x\n", zdev->fid);
+ return strlen(buf);
+}
+static DEVICE_ATTR(function_id, S_IRUGO, show_fid, NULL);
+
+static ssize_t show_fh(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+
+ sprintf(buf, "0x%08x\n", zdev->fh);
+ return strlen(buf);
+}
+static DEVICE_ATTR(function_handle, S_IRUGO, show_fh, NULL);
+
+static ssize_t show_pchid(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+
+ sprintf(buf, "0x%04x\n", zdev->pchid);
+ return strlen(buf);
+}
+static DEVICE_ATTR(pchid, S_IRUGO, show_pchid, NULL);
+
+static ssize_t show_pfgid(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+
+ sprintf(buf, "0x%02x\n", zdev->pfgid);
+ return strlen(buf);
+}
+static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL);
+
+static struct device_attribute *zpci_dev_attrs[] = {
+ &dev_attr_function_id,
+ &dev_attr_function_handle,
+ &dev_attr_pchid,
+ &dev_attr_pfgid,
+ NULL,
+};
+
+int zpci_sysfs_add_device(struct device *dev)
+{
+ int i, rc = 0;
+
+ for (i = 0; zpci_dev_attrs[i]; i++) {
+ rc = device_create_file(dev, zpci_dev_attrs[i]);
+ if (rc)
+ goto error;
+ }
+ return 0;
+
+error:
+ while (--i >= 0)
+ device_remove_file(dev, zpci_dev_attrs[i]);
+ return rc;
+}
+
+void zpci_sysfs_remove_device(struct device *dev)
+{
+ int i;
+
+ for (i = 0; zpci_dev_attrs[i]; i++)
+ device_remove_file(dev, zpci_dev_attrs[i]);
+}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index f34838839b08..29437eabe095 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -1,7 +1,7 @@
config VGA_ARB
bool "VGA Arbitration" if EXPERT
default y
- depends on PCI
+ depends on (PCI && !S390)
help
Some "legacy" VGA devices implemented on PCI typically have the same
hard-decoded addresses as they did on ISA. When multiple PCI devices
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index b0e46dede1a9..13e9e63a7266 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -151,4 +151,15 @@ config HOTPLUG_PCI_SGI
When in doubt, say N.
+config HOTPLUG_PCI_S390
+ tristate "System z PCI Hotplug Support"
+ depends on S390 && 64BIT
+ help
+ Say Y here if you want to use the System z PCI Hotplug
+ driver for PCI devices. Without this driver it is not
+ possible to access stand-by PCI functions nor to deconfigure
+ PCI functions.
+
+ When in doubt, say Y.
+
endif # HOTPLUG_PCI
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index c459cd4e39c2..47ec8c80e16d 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o
obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o
obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o
+obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o
# acpiphp_ibm extends acpiphp, so should be linked afterwards.
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
new file mode 100644
index 000000000000..dee68e0698e1
--- /dev/null
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -0,0 +1,252 @@
+/*
+ * PCI Hot Plug Controller Driver for System z
+ *
+ * Copyright 2012 IBM Corp.
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define COMPONENT "zPCI hpc"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/init.h>
+#include <asm/sclp.h>
+
+#define SLOT_NAME_SIZE 10
+static LIST_HEAD(s390_hotplug_slot_list);
+
+MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com");
+MODULE_DESCRIPTION("Hot Plug PCI Controller for System z");
+MODULE_LICENSE("GPL");
+
+static int zpci_fn_configured(enum zpci_state state)
+{
+ return state == ZPCI_FN_STATE_CONFIGURED ||
+ state == ZPCI_FN_STATE_ONLINE;
+}
+
+/*
+ * struct slot - slot information for each *physical* slot
+ */
+struct slot {
+ struct list_head slot_list;
+ struct hotplug_slot *hotplug_slot;
+ struct zpci_dev *zdev;
+};
+
+static int enable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+ int rc;
+
+ if (slot->zdev->state != ZPCI_FN_STATE_STANDBY)
+ return -EIO;
+
+ rc = sclp_pci_configure(slot->zdev->fid);
+ if (!rc) {
+ slot->zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ /* automatically scan the device after is was configured */
+ zpci_enable_device(slot->zdev);
+ zpci_scan_device(slot->zdev);
+ }
+ return rc;
+}
+
+static int disable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+ int rc;
+
+ if (!zpci_fn_configured(slot->zdev->state))
+ return -EIO;
+
+ /* TODO: we rely on the user to unbind/remove the device, is that plausible
+ * or do we need to trigger that here?
+ */
+ rc = sclp_pci_deconfigure(slot->zdev->fid);
+ if (!rc) {
+ /* Fixme: better call List-PCI to find the disabled FH
+ for the FID since the FH should be opaque... */
+ slot->zdev->fh &= 0x7fffffff;
+ slot->zdev->state = ZPCI_FN_STATE_STANDBY;
+ }
+ return rc;
+}
+
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ switch (slot->zdev->state) {
+ case ZPCI_FN_STATE_STANDBY:
+ *value = 0;
+ break;
+ default:
+ *value = 1;
+ break;
+ }
+ return 0;
+}
+
+static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ /* if the slot exits it always contains a function */
+ *value = 1;
+ return 0;
+}
+
+static void release_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ pr_debug("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
+}
+
+static struct hotplug_slot_ops s390_hotplug_slot_ops = {
+ .enable_slot = enable_slot,
+ .disable_slot = disable_slot,
+ .get_power_status = get_power_status,
+ .get_adapter_status = get_adapter_status,
+};
+
+static int init_pci_slot(struct zpci_dev *zdev)
+{
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *info;
+ char name[SLOT_NAME_SIZE];
+ struct slot *slot;
+ int rc;
+
+ if (!zdev)
+ return 0;
+
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ goto error;
+
+ hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
+ if (!hotplug_slot)
+ goto error_hp;
+ hotplug_slot->private = slot;
+
+ slot->hotplug_slot = hotplug_slot;
+ slot->zdev = zdev;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto error_info;
+ hotplug_slot->info = info;
+
+ hotplug_slot->ops = &s390_hotplug_slot_ops;
+ hotplug_slot->release = &release_slot;
+
+ get_power_status(hotplug_slot, &info->power_status);
+ get_adapter_status(hotplug_slot, &info->adapter_status);
+
+ snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
+ rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
+ ZPCI_DEVFN, name);
+ if (rc) {
+ pr_err("pci_hp_register failed with error %d\n", rc);
+ goto error_reg;
+ }
+ list_add(&slot->slot_list, &s390_hotplug_slot_list);
+ return 0;
+
+error_reg:
+ kfree(info);
+error_info:
+ kfree(hotplug_slot);
+error_hp:
+ kfree(slot);
+error:
+ return -ENOMEM;
+}
+
+static int __init init_pci_slots(void)
+{
+ struct zpci_dev *zdev;
+ int device = 0;
+
+ /*
+ * Create a structure for each slot, and register that slot
+ * with the pci_hotplug subsystem.
+ */
+ mutex_lock(&zpci_list_lock);
+ list_for_each_entry(zdev, &zpci_list, entry) {
+ init_pci_slot(zdev);
+ device++;
+ }
+
+ mutex_unlock(&zpci_list_lock);
+ return (device) ? 0 : -ENODEV;
+}
+
+static void exit_pci_slot(struct zpci_dev *zdev)
+{
+ struct list_head *tmp, *n;
+ struct slot *slot;
+
+ list_for_each_safe(tmp, n, &s390_hotplug_slot_list) {
+ slot = list_entry(tmp, struct slot, slot_list);
+ if (slot->zdev != zdev)
+ continue;
+ list_del(&slot->slot_list);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+}
+
+static void __exit exit_pci_slots(void)
+{
+ struct list_head *tmp, *n;
+ struct slot *slot;
+
+ /*
+ * Unregister all of our slots with the pci_hotplug subsystem.
+ * Memory will be freed in release_slot() callback after slot's
+ * lifespan is finished.
+ */
+ list_for_each_safe(tmp, n, &s390_hotplug_slot_list) {
+ slot = list_entry(tmp, struct slot, slot_list);
+ list_del(&slot->slot_list);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+}
+
+static int __init pci_hotplug_s390_init(void)
+{
+ /*
+ * Do specific initialization stuff for your driver here
+ * like initializing your controller hardware (if any) and
+ * determining the number of slots you have in the system
+ * right now.
+ */
+
+ if (!pci_probe)
+ return -EOPNOTSUPP;
+
+ /* register callbacks for slot handling from arch code */
+ mutex_lock(&zpci_list_lock);
+ hotplug_ops.create_slot = init_pci_slot;
+ hotplug_ops.remove_slot = exit_pci_slot;
+ mutex_unlock(&zpci_list_lock);
+ pr_info("registered hotplug slot callbacks\n");
+ return init_pci_slots();
+}
+
+static void __exit pci_hotplug_s390_exit(void)
+{
+ exit_pci_slots();
+}
+
+module_init(pci_hotplug_s390_init);
+module_exit(pci_hotplug_s390_exit);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index a825d78fd0aa..5099636a6e5f 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -207,6 +207,8 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
desc->masked = __msix_mask_irq(desc, flag);
}
+#ifdef CONFIG_GENERIC_HARDIRQS
+
static void msi_set_mask_bit(struct irq_data *data, u32 flag)
{
struct msi_desc *desc = irq_data_get_msi(data);
@@ -230,6 +232,8 @@ void unmask_msi_irq(struct irq_data *data)
msi_set_mask_bit(data, 0);
}
+#endif /* CONFIG_GENERIC_HARDIRQS */
+
void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
BUG_ON(entry->dev->current_state != PCI_D0);
@@ -337,8 +341,10 @@ static void free_msi_irqs(struct pci_dev *dev)
if (!entry->irq)
continue;
nvec = 1 << entry->msi_attrib.multiple;
+#ifdef CONFIG_GENERIC_HARDIRQS
for (i = 0; i < nvec; i++)
BUG_ON(irq_has_action(entry->irq + i));
+#endif
}
arch_teardown_msi_irqs(dev);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 0595c763dafd..29225e1c159c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -349,6 +349,16 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
return rc;
}
+static inline
+int _wait_for_empty_queues(struct dasd_device *device)
+{
+ if (device->block)
+ return list_empty(&device->ccw_queue) &&
+ list_empty(&device->block->ccw_queue);
+ else
+ return list_empty(&device->ccw_queue);
+}
+
/*
* Remove device from block device layer. Destroy dirty buffers.
* Forget format information. Check if the target level is basic
@@ -1841,6 +1851,13 @@ static void __dasd_device_check_expire(struct dasd_device *device)
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
(time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
+ if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /*
+ * IO in safe offline processing should not
+ * run out of retries
+ */
+ cqr->retries++;
+ }
if (device->discipline->term_IO(cqr) != 0) {
/* Hmpf, try again in 5 sec */
dev_err(&device->cdev->dev,
@@ -3024,11 +3041,11 @@ void dasd_generic_remove(struct ccw_device *cdev)
cdev->handler = NULL;
- dasd_remove_sysfs_files(cdev);
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return;
- if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+ !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/* Already doing offline processing */
dasd_put_device(device);
return;
@@ -3048,6 +3065,8 @@ void dasd_generic_remove(struct ccw_device *cdev)
*/
if (block)
dasd_free_block(block);
+
+ dasd_remove_sysfs_files(cdev);
}
/*
@@ -3126,16 +3145,13 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
{
struct dasd_device *device;
struct dasd_block *block;
- int max_count, open_count;
+ int max_count, open_count, rc;
+ rc = 0;
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return PTR_ERR(device);
- if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
- /* Already doing offline processing */
- dasd_put_device(device);
- return 0;
- }
+
/*
* We must make sure that this device is currently not in use.
* The open_count is increased for every opener, that includes
@@ -3159,6 +3175,54 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
return -EBUSY;
}
}
+
+ if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /*
+ * safe offline allready running
+ * could only be called by normal offline so safe_offline flag
+ * needs to be removed to run normal offline and kill all I/O
+ */
+ if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ /* Already doing normal offline processing */
+ dasd_put_device(device);
+ return -EBUSY;
+ } else
+ clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
+
+ } else
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ /* Already doing offline processing */
+ dasd_put_device(device);
+ return -EBUSY;
+ }
+
+ /*
+ * if safe_offline called set safe_offline_running flag and
+ * clear safe_offline so that a call to normal offline
+ * can overrun safe_offline processing
+ */
+ if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
+ !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /*
+ * If we want to set the device safe offline all IO operations
+ * should be finished before continuing the offline process
+ * so sync bdev first and then wait for our queues to become
+ * empty
+ */
+ /* sync blockdev and partitions */
+ rc = fsync_bdev(device->block->bdev);
+ if (rc != 0)
+ goto interrupted;
+
+ /* schedule device tasklet and wait for completion */
+ dasd_schedule_device_bh(device);
+ rc = wait_event_interruptible(shutdown_waitq,
+ _wait_for_empty_queues(device));
+ if (rc != 0)
+ goto interrupted;
+ }
+
+ set_bit(DASD_FLAG_OFFLINE, &device->flags);
dasd_set_target_state(device, DASD_STATE_NEW);
/* dasd_delete_device destroys the device reference. */
block = device->block;
@@ -3170,6 +3234,14 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
if (block)
dasd_free_block(block);
return 0;
+
+interrupted:
+ /* interrupted by signal */
+ clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
+ clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
+ clear_bit(DASD_FLAG_OFFLINE, &device->flags);
+ dasd_put_device(device);
+ return rc;
}
int dasd_generic_last_path_gone(struct dasd_device *device)
@@ -3489,15 +3561,6 @@ char *dasd_get_sense(struct irb *irb)
}
EXPORT_SYMBOL_GPL(dasd_get_sense);
-static inline int _wait_for_empty_queues(struct dasd_device *device)
-{
- if (device->block)
- return list_empty(&device->ccw_queue) &&
- list_empty(&device->block->ccw_queue);
- else
- return list_empty(&device->ccw_queue);
-}
-
void dasd_generic_shutdown(struct ccw_device *cdev)
{
struct dasd_device *device;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 20cfd028edcf..c196827c228f 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -952,6 +952,39 @@ static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
dasd_use_raw_store);
static ssize_t
+dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct dasd_device *device;
+ int rc;
+
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device)) {
+ rc = PTR_ERR(device);
+ goto out;
+ }
+
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
+ test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /* Already doing offline processing */
+ dasd_put_device(device);
+ rc = -EBUSY;
+ goto out;
+ }
+
+ set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
+ dasd_put_device(device);
+
+ rc = ccw_device_set_offline(cdev);
+
+out:
+ return rc ? rc : count;
+}
+
+static DEVICE_ATTR(safe_offline, 0200, NULL, dasd_safe_offline_store);
+
+static ssize_t
dasd_discipline_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1320,6 +1353,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_expires.attr,
&dev_attr_reservation_policy.attr,
&dev_attr_last_known_reservation_state.attr,
+ &dev_attr_safe_offline.attr,
NULL,
};
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 108332b44d98..806fe912d6e7 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1026,7 +1026,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
{
void *conf_data;
int conf_len, conf_data_saved;
- int rc;
+ int rc, path_err;
__u8 lpm, opm;
struct dasd_eckd_private *private, path_private;
struct dasd_path *path_data;
@@ -1037,6 +1037,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
path_data = &device->path_data;
opm = ccw_device_get_path_mask(device->cdev);
conf_data_saved = 0;
+ path_err = 0;
/* get configuration data per operational path */
for (lpm = 0x80; lpm; lpm>>= 1) {
if (!(lpm & opm))
@@ -1122,7 +1123,8 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
"the same device, path %02X leads to "
"device %s instead of %s\n", lpm,
print_path_uid, print_device_uid);
- return -EINVAL;
+ path_err = -EINVAL;
+ continue;
}
path_private.conf_data = NULL;
@@ -1142,7 +1144,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
kfree(conf_data);
}
- return 0;
+ return path_err;
}
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
@@ -3847,7 +3849,7 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
len = 0;
while (from <= to) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
from, ((int *) from)[0], ((int *) from)[1]);
@@ -3908,23 +3910,23 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
return;
}
/* dump the sense data */
- len = sprintf(page, KERN_ERR PRINTK_HEADER
+ len = sprintf(page, PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
"CS:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
req ? req->intrc : 0);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
@@ -3937,23 +3939,23 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
if (irb->ecw[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" 24 Byte: %x MSG %x, "
"%s MSGb to SYSOP\n",
irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
irb->ecw[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" 32 Byte: Format: %x "
"Exception class %x\n",
irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
}
} else {
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
- printk("%s", page);
+ printk(KERN_ERR "%s", page);
if (req) {
/* req == NULL for unsolicited interrupts */
@@ -3962,10 +3964,10 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
first = req->cpaddr;
for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
to = min(first + 6, last);
- len = sprintf(page, KERN_ERR PRINTK_HEADER
+ len = sprintf(page, PRINTK_HEADER
" Related CP in req: %p\n", req);
dasd_eckd_dump_ccw_range(first, to, page + len);
- printk("%s", page);
+ printk(KERN_ERR "%s", page);
/* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
@@ -3975,7 +3977,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
irb->scsw.cmd.cpa; /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
- len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
+ len += sprintf(page, PRINTK_HEADER "......\n");
}
to = min(fail + 1, last);
len += dasd_eckd_dump_ccw_range(from, to, page + len);
@@ -3984,11 +3986,11 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
from = max(from, ++to);
if (from < last - 1) {
from = last - 1; /* there is a gap - print header */
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
+ len += sprintf(page + len, PRINTK_HEADER "......\n");
}
len += dasd_eckd_dump_ccw_range(from, last, page + len);
if (len > 0)
- printk("%s", page);
+ printk(KERN_ERR "%s", page);
}
free_page((unsigned long) page);
}
@@ -4012,10 +4014,10 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
return;
}
/* dump the sense data */
- len = sprintf(page, KERN_ERR PRINTK_HEADER
+ len = sprintf(page, PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
"CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
@@ -4023,7 +4025,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
req ? req->intrc : 0);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing TCW: %p\n",
dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.tm.tcw);
@@ -4035,43 +4037,42 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
(struct tcw *)(unsigned long)irb->scsw.tm.tcw);
if (tsb) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->length %d\n", tsb->length);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->flags %x\n", tsb->flags);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->dcw_offset %d\n", tsb->dcw_offset);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->count %d\n", tsb->count);
residual = tsb->count - 28;
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" residual %d\n", residual);
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.dev_time %d\n",
tsb->tsa.iostat.dev_time);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.def_time %d\n",
tsb->tsa.iostat.def_time);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.queue_time %d\n",
tsb->tsa.iostat.queue_time);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.dev_busy_time %d\n",
tsb->tsa.iostat.dev_busy_time);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.dev_act_time %d\n",
tsb->tsa.iostat.dev_act_time);
sense = tsb->tsa.iostat.sense;
break;
case 2: /* ts_ddpc */
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
for (sl = 0; sl < 2; sl++) {
- len += sprintf(page + len,
- KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.ddpc.rcq %2d-%2d: ",
(8 * sl), ((8 * sl) + 7));
rcq = tsb->tsa.ddpc.rcq;
@@ -4084,15 +4085,14 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
sense = tsb->tsa.ddpc.sense;
break;
case 3: /* tsa_intrg */
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
- " tsb->tsa.intrg.: not supportet yet \n");
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->tsa.intrg.: not supportet yet\n");
break;
}
if (sense) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len,
- KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
@@ -4104,27 +4104,27 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
if (sense[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" 24 Byte: %x MSG %x, "
"%s MSGb to SYSOP\n",
sense[7] >> 4, sense[7] & 0x0f,
sense[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" 32 Byte: Format: %x "
"Exception class %x\n",
sense[6] & 0x0f, sense[22] >> 4);
}
} else {
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
} else {
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" SORRY - NO TSB DATA AVAILABLE\n");
}
- printk("%s", page);
+ printk(KERN_ERR "%s", page);
free_page((unsigned long) page);
}
@@ -4161,9 +4161,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
private = (struct dasd_eckd_private *) device->private;
/* Read Configuration Data */
- rc = dasd_eckd_read_conf(device);
- if (rc)
- goto out_err;
+ dasd_eckd_read_conf(device);
dasd_eckd_get_uid(device, &temp_uid);
/* Generate device unique id */
@@ -4183,9 +4181,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
dasd_eckd_validate_server(device, DASD_CQR_FLAGS_FAILFAST);
/* RE-Read Configuration Data */
- rc = dasd_eckd_read_conf(device);
- if (rc)
- goto out_err;
+ dasd_eckd_read_conf(device);
/* Read Feature Codes */
dasd_eckd_read_features(device);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index fb7f3bdc6604..eb748507c7fa 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -479,19 +479,19 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
"No memory to dump sense data");
return;
}
- len = sprintf(page, KERN_ERR PRINTK_HEADER
+ len = sprintf(page, PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X\n", req,
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
@@ -502,7 +502,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len += sprintf(page + len, "\n");
}
} else {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
printk(KERN_ERR "%s", page);
@@ -512,10 +512,9 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
act = req->cpaddr;
for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
end = min(act + 8, last);
- len = sprintf(page, KERN_ERR PRINTK_HEADER
- " Related CP in req: %p\n", req);
+ len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
while (act <= end) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
@@ -533,11 +532,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len = 0;
if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
+ len += sprintf(page + len, PRINTK_HEADER "......\n");
}
end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
while (act <= end) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
@@ -552,10 +551,10 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
/* print last CCWs */
if (act < last - 2) {
act = last - 2;
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
+ len += sprintf(page + len, PRINTK_HEADER "......\n");
}
while (act <= last) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 7ff93eea673d..899e3f5a56e5 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -516,6 +516,8 @@ struct dasd_block {
#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
#define DASD_FLAG_SUSPENDED 9 /* The device was suspended */
+#define DASD_FLAG_SAFE_OFFLINE 10 /* safe offline processing requested*/
+#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */
void dasd_put_device_wake(struct dasd_device *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 8252f37d04ed..03c0e0444553 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <asm/compat.h>
#include <asm/ccwdev.h>
+#include <asm/schid.h>
#include <asm/cmb.h>
#include <asm/uaccess.h>
@@ -308,11 +309,12 @@ static int dasd_ioctl_information(struct dasd_block *block,
unsigned int cmd, void __user *argp)
{
struct dasd_information2_t *dasd_info;
- unsigned long flags;
- int rc;
+ struct subchannel_id sch_id;
+ struct ccw_dev_id dev_id;
struct dasd_device *base;
struct ccw_device *cdev;
- struct ccw_dev_id dev_id;
+ unsigned long flags;
+ int rc;
base = block->base;
if (!base->discipline || !base->discipline->fill_info)
@@ -330,9 +332,10 @@ static int dasd_ioctl_information(struct dasd_block *block,
cdev = base->cdev;
ccw_device_get_id(cdev, &dev_id);
+ ccw_device_get_schid(cdev, &sch_id);
dasd_info->devno = dev_id.devno;
- dasd_info->schid = _ccw_device_get_subchannel_number(base->cdev);
+ dasd_info->schid = sch_id.sch_no;
dasd_info->cu_type = cdev->id.cu_type;
dasd_info->cu_model = cdev->id.cu_model;
dasd_info->dev_type = cdev->id.dev_type;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index d7e97ae9ef6d..25bcd4c0ed82 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 1999, 2009
+ * Copyright IBM Corp. 1999,2012
*
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -103,6 +103,7 @@ extern u64 sclp_facilities;
#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
+#define SCLP_HAS_PCI_RECONFIG (sclp_facilities & 0x0000000040000000ULL)
struct gds_subvector {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 71ea923c322d..c44d13f607bc 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2007, 2009
+ * Copyright IBM Corp. 2007,2012
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/mm.h>
@@ -19,10 +20,11 @@
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <asm/ctl_reg.h>
#include <asm/chpid.h>
-#include <asm/sclp.h>
#include <asm/setup.h>
-#include <asm/ctl_reg.h>
+#include <asm/page.h>
+#include <asm/sclp.h>
#include "sclp.h"
@@ -400,17 +402,15 @@ out:
static int sclp_assign_storage(u16 rn)
{
- unsigned long long start, address;
+ unsigned long long start;
int rc;
rc = do_assign_storage(0x000d0001, rn);
if (rc)
- goto out;
- start = address = rn2addr(rn);
- for (; address < start + rzm; address += PAGE_SIZE)
- page_set_storage_key(address, PAGE_DEFAULT_KEY, 0);
-out:
- return rc;
+ return rc;
+ start = rn2addr(rn);
+ storage_key_init_range(start, start + rzm);
+ return 0;
}
static int sclp_unassign_storage(u16 rn)
@@ -702,6 +702,67 @@ __initcall(sclp_detect_standby_memory);
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
+ * PCI I/O adapter configuration related functions.
+ */
+#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
+#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
+
+#define SCLP_RECONFIG_PCI_ATPYE 2
+
+struct pci_cfg_sccb {
+ struct sccb_header header;
+ u8 atype; /* adapter type */
+ u8 reserved1;
+ u16 reserved2;
+ u32 aid; /* adapter identifier */
+} __packed;
+
+static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
+{
+ struct pci_cfg_sccb *sccb;
+ int rc;
+
+ if (!SCLP_HAS_PCI_RECONFIG)
+ return -EOPNOTSUPP;
+
+ sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ return -ENOMEM;
+
+ sccb->header.length = PAGE_SIZE;
+ sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
+ sccb->aid = fid;
+ rc = do_sync_request(cmd, sccb);
+ if (rc)
+ goto out;
+ switch (sccb->header.response_code) {
+ case 0x0020:
+ case 0x0120:
+ break;
+ default:
+ pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
+ cmd, sccb->header.response_code);
+ rc = -EIO;
+ break;
+ }
+out:
+ free_page((unsigned long) sccb);
+ return rc;
+}
+
+int sclp_pci_configure(u32 fid)
+{
+ return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
+}
+EXPORT_SYMBOL(sclp_pci_configure);
+
+int sclp_pci_deconfigure(u32 fid)
+{
+ return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
+}
+EXPORT_SYMBOL(sclp_pci_deconfigure);
+
+/*
* Channel path configuration related functions.
*/
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 731470e68493..84846c2b96d3 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -65,10 +65,18 @@ static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
}
}
-static int ccwgroup_set_online(struct ccwgroup_device *gdev)
+/**
+ * ccwgroup_set_online() - enable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the online state.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_online(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
- int ret = 0;
+ int ret = -EINVAL;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
@@ -84,11 +92,20 @@ out:
atomic_set(&gdev->onoff, 0);
return ret;
}
+EXPORT_SYMBOL(ccwgroup_set_online);
-static int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+/**
+ * ccwgroup_set_offline() - disable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the offline state.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_offline(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
- int ret = 0;
+ int ret = -EINVAL;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
@@ -104,6 +121,7 @@ out:
atomic_set(&gdev->onoff, 0);
return ret;
}
+EXPORT_SYMBOL(ccwgroup_set_offline);
static ssize_t ccwgroup_online_store(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4d51a7c4eb8b..68e80e2734a4 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,7 +1,7 @@
/*
* S/390 common I/O routines -- channel subsystem call
*
- * Copyright IBM Corp. 1999, 2010
+ * Copyright IBM Corp. 1999,2012
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/pci.h>
#include <asm/cio.h>
#include <asm/chpid.h>
@@ -260,26 +261,45 @@ __get_chpid_from_lir(void *data)
return (u16) (lir->indesc[0]&0x000000ff);
}
-struct chsc_sei_area {
- struct chsc_header request;
+struct chsc_sei_nt0_area {
+ u8 flags;
+ u8 vf; /* validity flags */
+ u8 rs; /* reporting source */
+ u8 cc; /* content code */
+ u16 fla; /* full link address */
+ u16 rsid; /* reporting source id */
u32 reserved1;
u32 reserved2;
- u32 reserved3;
- struct chsc_header response;
- u32 reserved4;
- u8 flags;
- u8 vf; /* validity flags */
- u8 rs; /* reporting source */
- u8 cc; /* content code */
- u16 fla; /* full link address */
- u16 rsid; /* reporting source id */
- u32 reserved5;
- u32 reserved6;
- u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
/* ccdf has to be big enough for a link-incident record */
-} __attribute__ ((packed));
-
-static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
+ u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
+} __packed;
+
+struct chsc_sei_nt2_area {
+ u8 flags; /* p and v bit */
+ u8 reserved1;
+ u8 reserved2;
+ u8 cc; /* content code */
+ u32 reserved3[13];
+ u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
+} __packed;
+
+#define CHSC_SEI_NT0 0ULL
+#define CHSC_SEI_NT2 (1ULL << 61)
+
+struct chsc_sei {
+ struct chsc_header request;
+ u32 reserved1;
+ u64 ntsm; /* notification type mask */
+ struct chsc_header response;
+ u32 reserved2;
+ union {
+ struct chsc_sei_nt0_area nt0_area;
+ struct chsc_sei_nt2_area nt2_area;
+ u8 nt_area[PAGE_SIZE - 24];
+ } u;
+} __packed;
+
+static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
{
struct chp_id chpid;
int id;
@@ -298,7 +318,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
}
}
-static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
struct chp_link link;
struct chp_id chpid;
@@ -330,7 +350,7 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
s390_process_res_acc(&link);
}
-static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
{
struct channel_path *chp;
struct chp_id chpid;
@@ -366,7 +386,7 @@ struct chp_config_data {
u8 pc;
};
-static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
{
struct chp_config_data *data;
struct chp_id chpid;
@@ -398,7 +418,7 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
}
}
-static void chsc_process_sei_scm_change(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
{
int ret;
@@ -412,13 +432,26 @@ static void chsc_process_sei_scm_change(struct chsc_sei_area *sei_area)
" failed (rc=%d).\n", ret);
}
-static void chsc_process_sei(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{
- /* Check if we might have lost some information. */
- if (sei_area->flags & 0x40) {
- CIO_CRW_EVENT(2, "chsc: event overflow\n");
- css_schedule_eval_all();
+#ifdef CONFIG_PCI
+ switch (sei_area->cc) {
+ case 1:
+ zpci_event_error(sei_area->ccdf);
+ break;
+ case 2:
+ zpci_event_availability(sei_area->ccdf);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n",
+ sei_area->cc);
+ break;
}
+#endif
+}
+
+static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
+{
/* which kind of information was stored? */
switch (sei_area->cc) {
case 1: /* link incident*/
@@ -443,9 +476,51 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
}
}
+static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
+{
+ do {
+ memset(sei, 0, sizeof(*sei));
+ sei->request.length = 0x0010;
+ sei->request.code = 0x000e;
+ sei->ntsm = ntsm;
+
+ if (chsc(sei))
+ break;
+
+ if (sei->response.code == 0x0001) {
+ CIO_CRW_EVENT(2, "chsc: sei successful\n");
+
+ /* Check if we might have lost some information. */
+ if (sei->u.nt0_area.flags & 0x40) {
+ CIO_CRW_EVENT(2, "chsc: event overflow\n");
+ css_schedule_eval_all();
+ }
+
+ switch (sei->ntsm) {
+ case CHSC_SEI_NT0:
+ chsc_process_sei_nt0(&sei->u.nt0_area);
+ return 1;
+ case CHSC_SEI_NT2:
+ chsc_process_sei_nt2(&sei->u.nt2_area);
+ return 1;
+ default:
+ CIO_CRW_EVENT(2, "chsc: unhandled nt (nt=%08Lx)\n",
+ sei->ntsm);
+ return 0;
+ }
+ } else {
+ CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
+ sei->response.code);
+ break;
+ }
+ } while (sei->u.nt0_area.flags & 0x80);
+
+ return 0;
+}
+
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
- struct chsc_sei_area *sei_area;
+ struct chsc_sei *sei;
if (overflow) {
css_schedule_eval_all();
@@ -459,25 +534,18 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
return;
/* Access to sei_page is serialized through machine check handler
* thread, so no need for locking. */
- sei_area = sei_page;
+ sei = sei_page;
CIO_TRACE_EVENT(2, "prcss");
- do {
- memset(sei_area, 0, sizeof(*sei_area));
- sei_area->request.length = 0x0010;
- sei_area->request.code = 0x000e;
- if (chsc(sei_area))
- break;
- if (sei_area->response.code == 0x0001) {
- CIO_CRW_EVENT(4, "chsc: sei successful\n");
- chsc_process_sei(sei_area);
- } else {
- CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
- sei_area->response.code);
- break;
- }
- } while (sei_area->flags & 0x80);
+ /*
+ * The ntsm does not allow to select NT0 and NT2 together. We need to
+ * first check for NT2, than additionally for NT0...
+ */
+#ifdef CONFIG_PCI
+ if (!__chsc_process_crw(sei, CHSC_SEI_NT2))
+#endif
+ __chsc_process_crw(sei, CHSC_SEI_NT0);
}
void chsc_chp_online(struct chp_id chpid)
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index fd3143c291c6..6995cff44636 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -2036,16 +2036,6 @@ void ccw_driver_unregister(struct ccw_driver *cdriver)
driver_unregister(&cdriver->driver);
}
-/* Helper func for qdio. */
-struct subchannel_id
-ccw_device_get_subchannel_id(struct ccw_device *cdev)
-{
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
- return sch->schid;
-}
-
static void ccw_device_todo(struct work_struct *work)
{
struct ccw_device_private *priv;
@@ -2138,4 +2128,3 @@ EXPORT_SYMBOL(ccw_device_set_offline);
EXPORT_SYMBOL(ccw_driver_register);
EXPORT_SYMBOL(ccw_driver_unregister);
EXPORT_SYMBOL(get_ccwdev_by_busid);
-EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 6bace6942396..2e575cff9845 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -142,9 +142,7 @@ int ccw_device_notify(struct ccw_device *, int);
void ccw_device_set_disconnected(struct ccw_device *cdev);
void ccw_device_set_notoper(struct ccw_device *cdev);
-/* qdio needs this. */
void ccw_device_set_timeout(struct ccw_device *, int);
-extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
/* Channel measurement facility related */
void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index ec7fb6d3b479..c77b6e06bf64 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -755,14 +755,18 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
}
EXPORT_SYMBOL(ccw_device_tm_intrg);
-// FIXME: these have to go:
-
-int
-_ccw_device_get_subchannel_number(struct ccw_device *cdev)
+/**
+ * ccw_device_get_schid - obtain a subchannel id
+ * @cdev: device to obtain the id for
+ * @schid: where to fill in the values
+ */
+void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
{
- return cdev->private->schid.sch_no;
-}
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ *schid = sch->schid;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_schid);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_options_mask);
@@ -777,5 +781,4 @@ EXPORT_SYMBOL(ccw_device_start_timeout_key);
EXPORT_SYMBOL(ccw_device_start_key);
EXPORT_SYMBOL(ccw_device_get_ciw);
EXPORT_SYMBOL(ccw_device_get_path_mask);
-EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 368368fe04b2..908d287f66c1 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -234,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
* Determine pathgroup state from PGID data.
*/
static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
- int *mismatch, int *reserved, u8 *reset)
+ int *mismatch, u8 *reserved, u8 *reset)
{
struct pgid *pgid = &cdev->private->pgid[0];
struct pgid *first = NULL;
@@ -248,7 +248,7 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
if ((cdev->private->pgid_valid_mask & lpm) == 0)
continue;
if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
- *reserved = 1;
+ *reserved |= lpm;
if (pgid_is_reset(pgid)) {
*reset |= lpm;
continue;
@@ -316,14 +316,14 @@ static void snid_done(struct ccw_device *cdev, int rc)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct pgid *pgid;
int mismatch = 0;
- int reserved = 0;
+ u8 reserved = 0;
u8 reset = 0;
u8 donepm;
if (rc)
goto out;
pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
- if (reserved)
+ if (reserved == cdev->private->pgid_valid_mask)
rc = -EUSERS;
else if (mismatch)
rc = -EOPNOTSUPP;
@@ -336,7 +336,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
}
out:
CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
- "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid,
+ "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
cdev->private->pgid_todo_mask, mismatch, reserved, reset);
switch (rc) {
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e06fa03ea1e4..1671d3461f29 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -129,7 +129,6 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
unsigned int ccq = 0;
- BUG_ON(!q->irq_ptr->sch_token);
qperf_inc(q, eqbs);
if (!q->is_input_q)
@@ -147,7 +146,6 @@ again:
}
if (rc == 2) {
- BUG_ON(tmp_count == count);
qperf_inc(q, eqbs_partial);
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
tmp_count);
@@ -189,8 +187,6 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
if (!count)
return 0;
-
- BUG_ON(!q->irq_ptr->sch_token);
qperf_inc(q, sqbs);
if (!q->is_input_q)
@@ -199,7 +195,7 @@ again:
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
rc = qdio_check_ccq(q, ccq);
if (!rc) {
- WARN_ON(tmp_count);
+ WARN_ON_ONCE(tmp_count);
return count - tmp_count;
}
@@ -224,9 +220,6 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char __state = 0;
int i;
- BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
- BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
-
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
@@ -258,9 +251,6 @@ static inline int set_buf_states(struct qdio_q *q, int bufnr,
{
int i;
- BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
- BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
-
if (is_qebsm(q))
return qdio_do_sqbs(q, state, bufnr, count);
@@ -345,7 +335,6 @@ again:
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
- WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
retries++;
if (!start_time) {
@@ -559,7 +548,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
}
out:
return q->first_to_check;
@@ -678,12 +667,10 @@ static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
if (aob == NULL)
continue;
- BUG_ON(q->u.out.sbal_state == NULL);
q->u.out.sbal_state[b].flags |=
QDIO_OUTBUF_STATE_FLAG_PENDING;
q->u.out.aobs[b] = NULL;
} else if (state == SLSB_P_OUTPUT_EMPTY) {
- BUG_ON(q->u.out.sbal_state == NULL);
q->u.out.sbal_state[b].aob = NULL;
}
b = next_buf(b);
@@ -703,12 +690,11 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
- BUG_ON(q->sbal_state == NULL);
q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
- BUG_ON(phys_aob & 0xFF);
+ WARN_ON_ONCE(phys_aob & 0xFF);
}
out:
@@ -809,8 +795,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
goto out;
switch (state) {
- case SLSB_P_OUTPUT_PENDING:
- BUG();
case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
@@ -840,7 +824,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_OUTPUT_HALTED:
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
}
out:
@@ -912,7 +896,7 @@ retry:
static void __qdio_outbound_processing(struct qdio_q *q)
{
qperf_inc(q, tasklet_outbound);
- BUG_ON(atomic_read(&q->nr_buf_used) < 0);
+ WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
if (qdio_outbound_q_moved(q))
qdio_kick_handler(q);
@@ -1138,16 +1122,10 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
irq_ptr->perf_stat.qdio_int++;
if (IS_ERR(irb)) {
- switch (PTR_ERR(irb)) {
- case -EIO:
- DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
- wake_up(&cdev->private->wait_q);
- return;
- default:
- WARN_ON(1);
- return;
- }
+ DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ wake_up(&cdev->private->wait_q);
+ return;
}
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
@@ -1173,7 +1151,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
case QDIO_IRQ_STATE_STOPPED:
break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
wake_up(&cdev->private->wait_q);
}
@@ -1227,7 +1205,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
if (!irq_ptr)
return -ENODEV;
- BUG_ON(irqs_disabled());
+ WARN_ON_ONCE(irqs_disabled());
DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
mutex_lock(&irq_ptr->setup_mutex);
@@ -1358,7 +1336,6 @@ int qdio_allocate(struct qdio_initialize *init_data)
irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr->qdr)
goto out_rel;
- WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
init_data->no_output_qs))
@@ -1597,9 +1574,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
set:
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
-
used = atomic_add_return(count, &q->nr_buf_used) - count;
- BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
if (need_siga_in(q))
return qdio_siga_input(q);
@@ -1624,7 +1599,6 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
used = atomic_add_return(count, &q->nr_buf_used);
- BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
if (used == QDIO_MAX_BUFFERS_PER_Q)
qperf_inc(q, outbound_queue_full);
@@ -1678,7 +1652,6 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
{
struct qdio_irq *irq_ptr;
-
if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
@@ -1721,8 +1694,6 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
- WARN_ON(queue_irqs_enabled(q));
-
clear_nonshared_ind(irq_ptr);
qdio_stop_polling(q);
clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
@@ -1769,7 +1740,6 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
if (!irq_ptr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
- WARN_ON(queue_irqs_enabled(q));
/*
* Cannot rely on automatic sync after interrupt since queues may
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 6c973db14983..16ecd35b8e51 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -140,10 +140,8 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
/* fill in sbal */
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sbal[j] = *sbals_array++;
- BUG_ON((unsigned long)q->sbal[j] & 0xff);
- }
/* fill in slib */
if (i > 0) {
@@ -434,9 +432,8 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs;
-
- irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
irq_ptr->cdev = init_data->cdev;
+ ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data);
setup_qib(irq_ptr, init_data);
@@ -483,7 +480,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
char s[80];
snprintf(s, 80, "qdio: %s %s on SC %x using "
- "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n",
+ "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
dev_name(&cdev->dev),
(irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 2e060088fa87..bdb394b066fc 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -73,7 +73,6 @@ static void put_indicator(u32 *addr)
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
{
mutex_lock(&tiq_list_lock);
- BUG_ON(irq_ptr->nr_input_qs < 1);
list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
xchg(irq_ptr->dsci, 1 << 7);
@@ -83,7 +82,6 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
- BUG_ON(irq_ptr->nr_input_qs < 1);
q = irq_ptr->input_qs[0];
/* if establish triggered an error */
if (!q || !q->entry.prev || !q->entry.next)
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 035b6dc31b71..7c522f338bda 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -241,84 +241,70 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt)
{
- int mod_len, short_len, long_len, long_offset, limit;
+ int mod_len, short_len;
unsigned char *p, *q, *dp, *dq, *u, *inp;
mod_len = crt->inputdatalength;
short_len = mod_len / 2;
- long_len = mod_len / 2 + 8;
/*
- * CEX2A cannot handle p, dp, or U > 128 bytes.
- * If we have one of these, we need to do extra checking.
- * For CEX3A the limit is 256 bytes.
+ * CEX2A and CEX3A w/o FW update can handle requests up to
+ * 256 byte modulus (2k keys).
+ * CEX3A with FW update and CEX4A cards are able to handle
+ * 512 byte modulus (4k keys).
*/
- if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
- limit = 256;
- else
- limit = 128;
-
- if (long_len > limit) {
- /*
- * zcrypt_rsa_crt already checked for the leading
- * zeroes of np_prime, bp_key and u_mult_inc.
- */
- long_offset = long_len - limit;
- long_len = limit;
- } else
- long_offset = 0;
-
- /*
- * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
- * the larger message structure.
- */
- if (long_len <= 64) {
+ if (mod_len <= 128) { /* up to 1024 bit key size */
struct type50_crb1_msg *crb1 = ap_msg->message;
memset(crb1, 0, sizeof(*crb1));
ap_msg->length = sizeof(*crb1);
crb1->header.msg_type_code = TYPE50_TYPE_CODE;
crb1->header.msg_len = sizeof(*crb1);
crb1->keyblock_type = TYPE50_CRB1_FMT;
- p = crb1->p + sizeof(crb1->p) - long_len;
+ p = crb1->p + sizeof(crb1->p) - short_len;
q = crb1->q + sizeof(crb1->q) - short_len;
- dp = crb1->dp + sizeof(crb1->dp) - long_len;
+ dp = crb1->dp + sizeof(crb1->dp) - short_len;
dq = crb1->dq + sizeof(crb1->dq) - short_len;
- u = crb1->u + sizeof(crb1->u) - long_len;
+ u = crb1->u + sizeof(crb1->u) - short_len;
inp = crb1->message + sizeof(crb1->message) - mod_len;
- } else if (long_len <= 128) {
+ } else if (mod_len <= 256) { /* up to 2048 bit key size */
struct type50_crb2_msg *crb2 = ap_msg->message;
memset(crb2, 0, sizeof(*crb2));
ap_msg->length = sizeof(*crb2);
crb2->header.msg_type_code = TYPE50_TYPE_CODE;
crb2->header.msg_len = sizeof(*crb2);
crb2->keyblock_type = TYPE50_CRB2_FMT;
- p = crb2->p + sizeof(crb2->p) - long_len;
+ p = crb2->p + sizeof(crb2->p) - short_len;
q = crb2->q + sizeof(crb2->q) - short_len;
- dp = crb2->dp + sizeof(crb2->dp) - long_len;
+ dp = crb2->dp + sizeof(crb2->dp) - short_len;
dq = crb2->dq + sizeof(crb2->dq) - short_len;
- u = crb2->u + sizeof(crb2->u) - long_len;
+ u = crb2->u + sizeof(crb2->u) - short_len;
inp = crb2->message + sizeof(crb2->message) - mod_len;
- } else {
- /* long_len >= 256 */
+ } else if ((mod_len <= 512) && /* up to 4096 bit key size */
+ (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */
struct type50_crb3_msg *crb3 = ap_msg->message;
memset(crb3, 0, sizeof(*crb3));
ap_msg->length = sizeof(*crb3);
crb3->header.msg_type_code = TYPE50_TYPE_CODE;
crb3->header.msg_len = sizeof(*crb3);
crb3->keyblock_type = TYPE50_CRB3_FMT;
- p = crb3->p + sizeof(crb3->p) - long_len;
+ p = crb3->p + sizeof(crb3->p) - short_len;
q = crb3->q + sizeof(crb3->q) - short_len;
- dp = crb3->dp + sizeof(crb3->dp) - long_len;
+ dp = crb3->dp + sizeof(crb3->dp) - short_len;
dq = crb3->dq + sizeof(crb3->dq) - short_len;
- u = crb3->u + sizeof(crb3->u) - long_len;
+ u = crb3->u + sizeof(crb3->u) - short_len;
inp = crb3->message + sizeof(crb3->message) - mod_len;
- }
+ } else
+ return -EINVAL;
- if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
+ /*
+ * correct the offset of p, bp and mult_inv according zcrypt.h
+ * block size right aligned (skip the first byte)
+ */
+ if (copy_from_user(p, crt->np_prime + MSGTYPE_ADJUSTMENT, short_len) ||
copy_from_user(q, crt->nq_prime, short_len) ||
- copy_from_user(dp, crt->bp_key + long_offset, long_len) ||
+ copy_from_user(dp, crt->bp_key + MSGTYPE_ADJUSTMENT, short_len) ||
copy_from_user(dq, crt->bq_key, short_len) ||
- copy_from_user(u, crt->u_mult_inv + long_offset, long_len) ||
+ copy_from_user(u, crt->u_mult_inv + MSGTYPE_ADJUSTMENT, short_len) ||
copy_from_user(inp, crt->inputdata, mod_len))
return -EFAULT;
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
index e56dc72c7733..0a66e4aeeb50 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.h
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -33,6 +33,8 @@
#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /*sizeof(struct type50_crb2_msg)*/
#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /*sizeof(struct type50_crb3_msg)*/
+#define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/
+
int zcrypt_msgtype50_init(void);
void zcrypt_msgtype50_exit(void);
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 448303bdb85f..9e0ebe051243 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -83,19 +83,25 @@ static inline void __raw_writel(u32 b, volatile void __iomem *addr)
#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
#ifdef CONFIG_64BIT
+#ifndef __raw_readq
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
return *(const volatile u64 __force *) addr;
}
+#endif
+
#define readq(addr) __le64_to_cpu(__raw_readq(addr))
+#ifndef __raw_writeq
static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
{
*(volatile u64 __force *) addr = b;
}
-#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr)
#endif
+#define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr)
+#endif /* CONFIG_64BIT */
+
#ifndef PCI_IOBASE
#define PCI_IOBASE ((void __iomem *) 0)
#endif
@@ -286,15 +292,20 @@ static inline void writesb(const void __iomem *addr, const void *buf, int len)
#ifndef CONFIG_GENERIC_IOMAP
struct pci_dev;
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+
+#ifndef pci_iounmap
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
{
}
+#endif
#endif /* CONFIG_GENERIC_IOMAP */
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
+#ifndef virt_to_phys
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa((unsigned long)address);
@@ -304,6 +315,7 @@ static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
+#endif
/*
* Change "struct page" to physical address.
@@ -363,9 +375,16 @@ static inline void *bus_to_virt(unsigned long address)
}
#endif
+#ifndef memset_io
#define memset_io(a, b, c) memset(__io_virt(a), (b), (c))
+#endif
+
+#ifndef memcpy_fromio
#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c))
+#endif
+#ifndef memcpy_toio
#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c))
+#endif
#endif /* __KERNEL__ */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 526f10a637c1..fdf2c4a238cc 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -10,9 +10,6 @@
*/
#include <linux/smp.h>
-
-#ifndef CONFIG_S390
-
#include <linux/linkage.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
@@ -746,8 +743,11 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
-#endif /* CONFIG_GENERIC_HARDIRQS */
+#else /* !CONFIG_GENERIC_HARDIRQS */
-#endif /* !CONFIG_S390 */
+extern struct msi_desc *irq_get_msi_desc(unsigned int irq);
+extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
+
+#endif /* CONFIG_GENERIC_HARDIRQS */
#endif /* _LINUX_IRQ_H */