summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 09:41:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 09:41:23 -0700
commit049711bf3cc59501ebeae621aa22acd3918ebd79 (patch)
treed6168b7bb3d8cee71a4a4e43a85cfaf2a3ef1a0a /arch
parentae045e2455429c418a418a3376301a9e5753a0a8 (diff)
parent5b6ff9df056b69a3b65708bfb9923af41146c8c8 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next
Pull sparc updates from David Miller: 1) Add sparc RAM output to /proc/iomem, from Bob Picco. 2) Allow seeks on /dev/mdesc, from Khalid Aziz. 3) Cleanup sparc64 I/O accessors, from Sam Ravnborg. 4) If update_mmu_cache{,_pmd}() is called with an not-valid mapping, do not insert it into the TLB miss hash tables otherwise we'll livelock. Based upon work by Christopher Alexander Tobias Schulze. 5) Fix BREAK detection in sunsab driver when no actual characters are pending, from Christopher Alexander Tobias Schulze. 6) Because we have modules --> openfirmware --> vmalloc ordering of virtual memory, the lazy VMAP TLB flusher can cons up an invocation of flush_tlb_kernel_range() that covers the openfirmware address range. Unfortunately this will flush out the firmware's locked TLB mapping which causes all kinds of trouble. Just split up the flush request if this happens, but in the long term the lazy VMAP flusher should probably be made a little bit smarter. Based upon work by Christopher Alexander Tobias Schulze. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next: sparc64: Fix up merge thinko. sparc: Add "install" target arch/sparc/math-emu/math_32.c: drop stray break operator sparc64: ldc_connect() should not return EINVAL when handshake is in progress. sparc64: Guard against flushing openfirmware mappings. sunsab: Fix detection of BREAK on sunsab serial console bbc-i2c: Fix BBC I2C envctrl on SunBlade 2000 sparc64: Do not insert non-valid PTEs into the TSB hash table. sparc64: avoid code duplication in io_64.h sparc64: reorder functions in io_64.h sparc64: drop unused SLOW_DOWN_IO definitions sparc64: remove macro indirection in io_64.h sparc64: update IO access functions in PeeCeeI sparcspkr: use sbus_*() primitives for IO sparc: Add support for seek and shorter read to /dev/mdesc sparc: use %s for unaligned panic drivers/sbus/char: Micro-optimization in display7seg.c display7seg: Introduce the use of the managed version of kzalloc sparc64 - add mem to iomem resource
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc/Makefile3
-rw-r--r--arch/sparc/boot/Makefile4
-rw-r--r--arch/sparc/boot/install.sh50
-rw-r--r--arch/sparc/include/asm/io_64.h381
-rw-r--r--arch/sparc/include/asm/tlbflush_64.h12
-rw-r--r--arch/sparc/kernel/ldc.c2
-rw-r--r--arch/sparc/kernel/mdesc.c82
-rw-r--r--arch/sparc/kernel/unaligned_32.c2
-rw-r--r--arch/sparc/lib/PeeCeeI.c36
-rw-r--r--arch/sparc/math-emu/math_32.c2
-rw-r--r--arch/sparc/mm/init_64.c96
11 files changed, 390 insertions, 280 deletions
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 9ff423678cbc..eaee14637d93 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -68,6 +68,9 @@ all: zImage
image zImage uImage tftpboot.img vmlinux.aout: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+install:
+ $(Q)$(MAKE) $(build)=$(boot) $@
+
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index 6e63afb128d9..6a4ceae5ec67 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -69,3 +69,7 @@ $(obj)/image: vmlinux FORCE
$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback System.map $(ROOT_IMG) FORCE
$(call if_changed,elftoaout)
$(call if_changed,piggy)
+
+install:
+ sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/zImage \
+ System.map "$(INSTALL_PATH)"
diff --git a/arch/sparc/boot/install.sh b/arch/sparc/boot/install.sh
new file mode 100644
index 000000000000..b32851eae693
--- /dev/null
+++ b/arch/sparc/boot/install.sh
@@ -0,0 +1,50 @@
+#!/bin/sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+#
+# "make install" script for SPARC architecture
+#
+# Arguments:
+# $1 - kernel version
+# $2 - kernel image file
+# $3 - kernel map file
+# $4 - default install path (blank if root directory)
+#
+
+verify () {
+ if [ ! -f "$1" ]; then
+ echo "" 1>&2
+ echo " *** Missing file: $1" 1>&2
+ echo ' *** You need to run "make" before "make install".' 1>&2
+ echo "" 1>&2
+ exit 1
+ fi
+}
+
+# Make sure the files actually exist
+verify "$2"
+verify "$3"
+
+# User may have a custom install script
+
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+
+# Default install - same as make zlilo
+
+if [ -f $4/vmlinuz ]; then
+ mv $4/vmlinuz $4/vmlinuz.old
+fi
+
+if [ -f $4/System.map ]; then
+ mv $4/System.map $4/System.old
+fi
+
+cat $2 > $4/vmlinuz
+cp $3 $4/System.map
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 05381c3a4228..80b54b326d49 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -9,125 +9,99 @@
#include <asm/asi.h>
#include <asm-generic/pci_iomap.h>
-/* PC crapola... */
-#define __SLOW_DOWN_IO do { } while (0)
-#define SLOW_DOWN_IO do { } while (0)
-
/* BIO layer definitions. */
extern unsigned long kern_base, kern_size;
-static inline u8 _inb(unsigned long addr)
+/* __raw_{read,write}{b,w,l,q} uses direct access.
+ * Access the memory as big endian bypassing the cache
+ * by using ASI_PHYS_BYPASS_EC_E
+ */
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 ret;
- __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_inb */"
+ __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_raw_readb */"
: "=r" (ret)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
- : "memory");
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
return ret;
}
-static inline u16 _inw(unsigned long addr)
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 ret;
- __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_inw */"
+ __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_raw_readw */"
: "=r" (ret)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
- : "memory");
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
return ret;
}
-static inline u32 _inl(unsigned long addr)
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 ret;
- __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_inl */"
+ __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_raw_readl */"
: "=r" (ret)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
- : "memory");
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
return ret;
}
-static inline void _outb(u8 b, unsigned long addr)
-{
- __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_outb */"
- : /* no outputs */
- : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
- : "memory");
-}
-
-static inline void _outw(u16 w, unsigned long addr)
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
{
- __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_outw */"
- : /* no outputs */
- : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
- : "memory");
-}
-
-static inline void _outl(u32 l, unsigned long addr)
-{
- __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_outl */"
- : /* no outputs */
- : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
- : "memory");
-}
-
-#define inb(__addr) (_inb((unsigned long)(__addr)))
-#define inw(__addr) (_inw((unsigned long)(__addr)))
-#define inl(__addr) (_inl((unsigned long)(__addr)))
-#define outb(__b, __addr) (_outb((u8)(__b), (unsigned long)(__addr)))
-#define outw(__w, __addr) (_outw((u16)(__w), (unsigned long)(__addr)))
-#define outl(__l, __addr) (_outl((u32)(__l), (unsigned long)(__addr)))
-
-#define inb_p(__addr) inb(__addr)
-#define outb_p(__b, __addr) outb(__b, __addr)
-#define inw_p(__addr) inw(__addr)
-#define outw_p(__w, __addr) outw(__w, __addr)
-#define inl_p(__addr) inl(__addr)
-#define outl_p(__l, __addr) outl(__l, __addr)
+ u64 ret;
-void outsb(unsigned long, const void *, unsigned long);
-void outsw(unsigned long, const void *, unsigned long);
-void outsl(unsigned long, const void *, unsigned long);
-void insb(unsigned long, void *, unsigned long);
-void insw(unsigned long, void *, unsigned long);
-void insl(unsigned long, void *, unsigned long);
+ __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_raw_readq */"
+ : "=r" (ret)
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
-static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
-{
- insb((unsigned long __force)port, buf, count);
-}
-static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
-{
- insw((unsigned long __force)port, buf, count);
+ return ret;
}
-static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 b, const volatile void __iomem *addr)
{
- insl((unsigned long __force)port, buf, count);
+ __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */"
+ : /* no outputs */
+ : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
}
-static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 w, const volatile void __iomem *addr)
{
- outsb((unsigned long __force)port, buf, count);
+ __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */"
+ : /* no outputs */
+ : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
}
-static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 l, const volatile void __iomem *addr)
{
- outsw((unsigned long __force)port, buf, count);
+ __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */"
+ : /* no outputs */
+ : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
}
-static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 q, const volatile void __iomem *addr)
{
- outsl((unsigned long __force)port, buf, count);
+ __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */"
+ : /* no outputs */
+ : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
}
-/* Memory functions, same as I/O accesses on Ultra. */
-static inline u8 _readb(const volatile void __iomem *addr)
+/* Memory functions, same as I/O accesses on Ultra.
+ * Access memory as little endian bypassing
+ * the cache by using ASI_PHYS_BYPASS_EC_E_L
+ */
+#define readb readb
+static inline u8 readb(const volatile void __iomem *addr)
{ u8 ret;
__asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */"
@@ -137,7 +111,8 @@ static inline u8 _readb(const volatile void __iomem *addr)
return ret;
}
-static inline u16 _readw(const volatile void __iomem *addr)
+#define readw readw
+static inline u16 readw(const volatile void __iomem *addr)
{ u16 ret;
__asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */"
@@ -148,7 +123,8 @@ static inline u16 _readw(const volatile void __iomem *addr)
return ret;
}
-static inline u32 _readl(const volatile void __iomem *addr)
+#define readl readl
+static inline u32 readl(const volatile void __iomem *addr)
{ u32 ret;
__asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */"
@@ -159,7 +135,8 @@ static inline u32 _readl(const volatile void __iomem *addr)
return ret;
}
-static inline u64 _readq(const volatile void __iomem *addr)
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
{ u64 ret;
__asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */"
@@ -170,7 +147,8 @@ static inline u64 _readq(const volatile void __iomem *addr)
return ret;
}
-static inline void _writeb(u8 b, volatile void __iomem *addr)
+#define writeb writeb
+static inline void writeb(u8 b, volatile void __iomem *addr)
{
__asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */"
: /* no outputs */
@@ -178,7 +156,8 @@ static inline void _writeb(u8 b, volatile void __iomem *addr)
: "memory");
}
-static inline void _writew(u16 w, volatile void __iomem *addr)
+#define writew writew
+static inline void writew(u16 w, volatile void __iomem *addr)
{
__asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */"
: /* no outputs */
@@ -186,7 +165,8 @@ static inline void _writew(u16 w, volatile void __iomem *addr)
: "memory");
}
-static inline void _writel(u32 l, volatile void __iomem *addr)
+#define writel writel
+static inline void writel(u32 l, volatile void __iomem *addr)
{
__asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */"
: /* no outputs */
@@ -194,7 +174,8 @@ static inline void _writel(u32 l, volatile void __iomem *addr)
: "memory");
}
-static inline void _writeq(u64 q, volatile void __iomem *addr)
+#define writeq writeq
+static inline void writeq(u64 q, volatile void __iomem *addr)
{
__asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */"
: /* no outputs */
@@ -202,100 +183,91 @@ static inline void _writeq(u64 q, volatile void __iomem *addr)
: "memory");
}
-#define readb(__addr) _readb(__addr)
-#define readw(__addr) _readw(__addr)
-#define readl(__addr) _readl(__addr)
-#define readq(__addr) _readq(__addr)
-#define readb_relaxed(__addr) _readb(__addr)
-#define readw_relaxed(__addr) _readw(__addr)
-#define readl_relaxed(__addr) _readl(__addr)
-#define readq_relaxed(__addr) _readq(__addr)
-#define writeb(__b, __addr) _writeb(__b, __addr)
-#define writew(__w, __addr) _writew(__w, __addr)
-#define writel(__l, __addr) _writel(__l, __addr)
-#define writeq(__q, __addr) _writeq(__q, __addr)
-/* Now versions without byte-swapping. */
-static inline u8 _raw_readb(unsigned long addr)
+#define inb inb
+static inline u8 inb(unsigned long addr)
{
- u8 ret;
-
- __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_raw_readb */"
- : "=r" (ret)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
-
- return ret;
+ return readb((volatile void __iomem *)addr);
}
-static inline u16 _raw_readw(unsigned long addr)
+#define inw inw
+static inline u16 inw(unsigned long addr)
{
- u16 ret;
-
- __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_raw_readw */"
- : "=r" (ret)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
-
- return ret;
+ return readw((volatile void __iomem *)addr);
}
-static inline u32 _raw_readl(unsigned long addr)
+#define inl inl
+static inline u32 inl(unsigned long addr)
{
- u32 ret;
+ return readl((volatile void __iomem *)addr);
+}
- __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_raw_readl */"
- : "=r" (ret)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
+#define outb outb
+static inline void outb(u8 b, unsigned long addr)
+{
+ writeb(b, (volatile void __iomem *)addr);
+}
- return ret;
+#define outw outw
+static inline void outw(u16 w, unsigned long addr)
+{
+ writew(w, (volatile void __iomem *)addr);
}
-static inline u64 _raw_readq(unsigned long addr)
+#define outl outl
+static inline void outl(u32 l, unsigned long addr)
{
- u64 ret;
+ writel(l, (volatile void __iomem *)addr);
+}
- __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_raw_readq */"
- : "=r" (ret)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
- return ret;
+#define inb_p(__addr) inb(__addr)
+#define outb_p(__b, __addr) outb(__b, __addr)
+#define inw_p(__addr) inw(__addr)
+#define outw_p(__w, __addr) outw(__w, __addr)
+#define inl_p(__addr) inl(__addr)
+#define outl_p(__l, __addr) outl(__l, __addr)
+
+void outsb(unsigned long, const void *, unsigned long);
+void outsw(unsigned long, const void *, unsigned long);
+void outsl(unsigned long, const void *, unsigned long);
+void insb(unsigned long, void *, unsigned long);
+void insw(unsigned long, void *, unsigned long);
+void insl(unsigned long, void *, unsigned long);
+
+static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
+{
+ insb((unsigned long __force)port, buf, count);
+}
+static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
+{
+ insw((unsigned long __force)port, buf, count);
}
-static inline void _raw_writeb(u8 b, unsigned long addr)
+static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
{
- __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */"
- : /* no outputs */
- : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
+ insl((unsigned long __force)port, buf, count);
}
-static inline void _raw_writew(u16 w, unsigned long addr)
+static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
{
- __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */"
- : /* no outputs */
- : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
+ outsb((unsigned long __force)port, buf, count);
}
-static inline void _raw_writel(u32 l, unsigned long addr)
+static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
{
- __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */"
- : /* no outputs */
- : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
+ outsw((unsigned long __force)port, buf, count);
}
-static inline void _raw_writeq(u64 q, unsigned long addr)
+static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
{
- __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */"
- : /* no outputs */
- : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
+ outsl((unsigned long __force)port, buf, count);
}
-#define __raw_readb(__addr) (_raw_readb((unsigned long)(__addr)))
-#define __raw_readw(__addr) (_raw_readw((unsigned long)(__addr)))
-#define __raw_readl(__addr) (_raw_readl((unsigned long)(__addr)))
-#define __raw_readq(__addr) (_raw_readq((unsigned long)(__addr)))
-#define __raw_writeb(__b, __addr) (_raw_writeb((u8)(__b), (unsigned long)(__addr)))
-#define __raw_writew(__w, __addr) (_raw_writew((u16)(__w), (unsigned long)(__addr)))
-#define __raw_writel(__l, __addr) (_raw_writel((u32)(__l), (unsigned long)(__addr)))
-#define __raw_writeq(__q, __addr) (_raw_writeq((u64)(__q), (unsigned long)(__addr)))
+#define readb_relaxed(__addr) readb(__addr)
+#define readw_relaxed(__addr) readw(__addr)
+#define readl_relaxed(__addr) readl(__addr)
+#define readq_relaxed(__addr) readq(__addr)
/* Valid I/O Space regions are anywhere, because each PCI bus supported
* can live in an arbitrary area of the physical address range.
@@ -305,96 +277,47 @@ static inline void _raw_writeq(u64 q, unsigned long addr)
/* Now, SBUS variants, only difference from PCI is that we do
* not use little-endian ASIs.
*/
-static inline u8 _sbus_readb(const volatile void __iomem *addr)
+static inline u8 sbus_readb(const volatile void __iomem *addr)
{
- u8 ret;
-
- __asm__ __volatile__("lduba\t[%1] %2, %0\t/* sbus_readb */"
- : "=r" (ret)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
- : "memory");
-
- return ret;
+ return __raw_readb(addr);
}
-static inline u16 _sbus_readw(const volatile void __iomem *addr)
+static inline u16 sbus_readw(const volatile void __iomem *addr)
{
- u16 ret;
-
- __asm__ __volatile__("lduha\t[%1] %2, %0\t/* sbus_readw */"
- : "=r" (ret)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
- : "memory");
-
- return ret;
+ return __raw_readw(addr);
}
-static inline u32 _sbus_readl(const volatile void __iomem *addr)
+static inline u32 sbus_readl(const volatile void __iomem *addr)
{
- u32 ret;
-
- __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* sbus_readl */"
- : "=r" (ret)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
- : "memory");
-
- return ret;
+ return __raw_readl(addr);
}
-static inline u64 _sbus_readq(const volatile void __iomem *addr)
+static inline u64 sbus_readq(const volatile void __iomem *addr)
{
- u64 ret;
-
- __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* sbus_readq */"
- : "=r" (ret)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
- : "memory");
-
- return ret;
+ return __raw_readq(addr);
}
-static inline void _sbus_writeb(u8 b, volatile void __iomem *addr)
+static inline void sbus_writeb(u8 b, volatile void __iomem *addr)
{
- __asm__ __volatile__("stba\t%r0, [%1] %2\t/* sbus_writeb */"
- : /* no outputs */
- : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
- : "memory");
+ __raw_writeb(b, addr);
}
-static inline void _sbus_writew(u16 w, volatile void __iomem *addr)
+static inline void sbus_writew(u16 w, volatile void __iomem *addr)
{
- __asm__ __volatile__("stha\t%r0, [%1] %2\t/* sbus_writew */"
- : /* no outputs */
- : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
- : "memory");
+ __raw_writew(w, addr);
}
-static inline void _sbus_writel(u32 l, volatile void __iomem *addr)
+static inline void sbus_writel(u32 l, volatile void __iomem *addr)
{
- __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* sbus_writel */"
- : /* no outputs */
- : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
- : "memory");
+ __raw_writel(l, addr);
}
-static inline void _sbus_writeq(u64 l, volatile void __iomem *addr)
+static inline void sbus_writeq(u64 q, volatile void __iomem *addr)
{
- __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* sbus_writeq */"
- : /* no outputs */
- : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
- : "memory");
+ __raw_writeq(q, addr);
}
-#define sbus_readb(__addr) _sbus_readb(__addr)
-#define sbus_readw(__addr) _sbus_readw(__addr)
-#define sbus_readl(__addr) _sbus_readl(__addr)
-#define sbus_readq(__addr) _sbus_readq(__addr)
-#define sbus_writeb(__b, __addr) _sbus_writeb(__b, __addr)
-#define sbus_writew(__w, __addr) _sbus_writew(__w, __addr)
-#define sbus_writel(__l, __addr) _sbus_writel(__l, __addr)
-#define sbus_writeq(__l, __addr) _sbus_writeq(__l, __addr)
-
-static inline void _sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
+static inline void sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
{
while(n--) {
sbus_writeb(c, dst);
@@ -402,10 +325,7 @@ static inline void _sbus_memset_io(volatile void __iomem *dst, int c, __kernel_s
}
}
-#define sbus_memset_io(d,c,sz) _sbus_memset_io(d,c,sz)
-
-static inline void
-_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
+static inline void memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
{
volatile void __iomem *d = dst;
@@ -415,11 +335,8 @@ _memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
}
}
-#define memset_io(d,c,sz) _memset_io(d,c,sz)
-
-static inline void
-_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
- __kernel_size_t n)
+static inline void sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
+ __kernel_size_t n)
{
char *d = dst;
@@ -430,10 +347,9 @@ _sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
}
}
-#define sbus_memcpy_fromio(d, s, sz) _sbus_memcpy_fromio(d, s, sz)
-static inline void
-_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
+ __kernel_size_t n)
{
char *d = dst;
@@ -444,11 +360,8 @@ _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
}
}
-#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
-
-static inline void
-_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
- __kernel_size_t n)
+static inline void sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
+ __kernel_size_t n)
{
const char *s = src;
volatile void __iomem *d = dst;
@@ -460,10 +373,8 @@ _sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
}
}
-#define sbus_memcpy_toio(d, s, sz) _sbus_memcpy_toio(d, s, sz)
-
-static inline void
-_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
+ __kernel_size_t n)
{
const char *s = src;
volatile void __iomem *d = dst;
@@ -475,8 +386,6 @@ _memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
}
}
-#define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
-
#define mmiowb()
#ifdef __KERNEL__
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index 816d8202fa0a..dea1cfa2122b 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -34,6 +34,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
{
}
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
void flush_tlb_pending(void);
@@ -48,11 +50,6 @@ void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifndef CONFIG_SMP
-#define flush_tlb_kernel_range(start,end) \
-do { flush_tsb_kernel_range(start,end); \
- __flush_tlb_kernel_range(start,end); \
-} while (0)
-
static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
{
__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
@@ -63,11 +60,6 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad
void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
-#define flush_tlb_kernel_range(start, end) \
-do { flush_tsb_kernel_range(start,end); \
- smp_flush_tlb_kernel_range(start, end); \
-} while (0)
-
#define global_flush_tlb_page(mm, vaddr) \
smp_flush_tlb_page(mm, vaddr)
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index e01d75d40329..66dacd56bb10 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1336,7 +1336,7 @@ int ldc_connect(struct ldc_channel *lp)
if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
!(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
lp->hs_state != LDC_HS_OPEN)
- err = -EINVAL;
+ err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
else
err = start_handshake(lp);
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index a1a4400d4025..99632a87e697 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -906,29 +906,85 @@ void mdesc_fill_in_cpu_data(cpumask_t *mask)
smp_fill_in_sib_core_maps();
}
-static ssize_t mdesc_read(struct file *file, char __user *buf,
- size_t len, loff_t *offp)
+/* mdesc_open() - Grab a reference to mdesc_handle when /dev/mdesc is
+ * opened. Hold this reference until /dev/mdesc is closed to ensure
+ * mdesc data structure is not released underneath us. Store the
+ * pointer to mdesc structure in private_data for read and seek to use
+ */
+static int mdesc_open(struct inode *inode, struct file *file)
{
struct mdesc_handle *hp = mdesc_grab();
- int err;
if (!hp)
return -ENODEV;
- err = hp->handle_size;
- if (len < hp->handle_size)
- err = -EMSGSIZE;
- else if (copy_to_user(buf, &hp->mdesc, hp->handle_size))
- err = -EFAULT;
- mdesc_release(hp);
+ file->private_data = hp;
+
+ return 0;
+}
+
+static ssize_t mdesc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offp)
+{
+ struct mdesc_handle *hp = file->private_data;
+ unsigned char *mdesc;
+ int bytes_left, count = len;
+
+ if (*offp >= hp->handle_size)
+ return 0;
+
+ bytes_left = hp->handle_size - *offp;
+ if (count > bytes_left)
+ count = bytes_left;
+
+ mdesc = (unsigned char *)&hp->mdesc;
+ mdesc += *offp;
+ if (!copy_to_user(buf, mdesc, count)) {
+ *offp += count;
+ return count;
+ } else {
+ return -EFAULT;
+ }
+}
- return err;
+static loff_t mdesc_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct mdesc_handle *hp;
+
+ switch (whence) {
+ case SEEK_CUR:
+ offset += file->f_pos;
+ break;
+ case SEEK_SET:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hp = file->private_data;
+ if (offset > hp->handle_size)
+ return -EINVAL;
+ else
+ file->f_pos = offset;
+
+ return offset;
+}
+
+/* mdesc_close() - /dev/mdesc is being closed, release the reference to
+ * mdesc structure.
+ */
+static int mdesc_close(struct inode *inode, struct file *file)
+{
+ mdesc_release(file->private_data);
+ return 0;
}
static const struct file_operations mdesc_fops = {
- .read = mdesc_read,
- .owner = THIS_MODULE,
- .llseek = noop_llseek,
+ .open = mdesc_open,
+ .read = mdesc_read,
+ .llseek = mdesc_llseek,
+ .release = mdesc_close,
+ .owner = THIS_MODULE,
};
static struct miscdevice mdesc_misc = {
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index c5c61b3c6b56..32b61d1b6379 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -166,7 +166,7 @@ unsigned long safe_compute_effective_address(struct pt_regs *regs,
/* This is just to make gcc think panic does return... */
static void unaligned_panic(char *str)
{
- panic(str);
+ panic("%s", str);
}
/* una_asm.S */
diff --git a/arch/sparc/lib/PeeCeeI.c b/arch/sparc/lib/PeeCeeI.c
index 6529f8657597..e6d183675990 100644
--- a/arch/sparc/lib/PeeCeeI.c
+++ b/arch/sparc/lib/PeeCeeI.c
@@ -15,7 +15,7 @@ void outsb(unsigned long __addr, const void *src, unsigned long count)
const u8 *p = src;
while (count--)
- outb(*p++, addr);
+ __raw_writeb(*p++, addr);
}
EXPORT_SYMBOL(outsb);
@@ -93,21 +93,21 @@ void insb(unsigned long __addr, void *dst, unsigned long count)
u8 *pb = dst;
while ((((unsigned long)pb) & 0x3) && count--)
- *pb++ = inb(addr);
+ *pb++ = __raw_readb(addr);
pi = (u32 *)pb;
while (count >= 4) {
u32 w;
- w = (inb(addr) << 24);
- w |= (inb(addr) << 16);
- w |= (inb(addr) << 8);
- w |= (inb(addr) << 0);
+ w = (__raw_readb(addr) << 24);
+ w |= (__raw_readb(addr) << 16);
+ w |= (__raw_readb(addr) << 8);
+ w |= (__raw_readb(addr) << 0);
*pi++ = w;
count -= 4;
}
pb = (u8 *)pi;
while (count--)
- *pb++ = inb(addr);
+ *pb++ = __raw_readb(addr);
}
}
EXPORT_SYMBOL(insb);
@@ -121,21 +121,21 @@ void insw(unsigned long __addr, void *dst, unsigned long count)
u32 *pi;
if (((unsigned long)ps) & 0x2) {
- *ps++ = le16_to_cpu(inw(addr));
+ *ps++ = __raw_readw(addr);
count--;
}
pi = (u32 *)ps;
while (count >= 2) {
u32 w;
- w = (le16_to_cpu(inw(addr)) << 16);
- w |= (le16_to_cpu(inw(addr)) << 0);
+ w = __raw_readw(addr) << 16;
+ w |= __raw_readw(addr) << 0;
*pi++ = w;
count -= 2;
}
ps = (u16 *)pi;
if (count)
- *ps = le16_to_cpu(inw(addr));
+ *ps = __raw_readw(addr);
}
}
EXPORT_SYMBOL(insw);
@@ -148,7 +148,7 @@ void insl(unsigned long __addr, void *dst, unsigned long count)
if ((((unsigned long)dst) & 0x3) == 0) {
u32 *pi = dst;
while (count--)
- *pi++ = le32_to_cpu(inl(addr));
+ *pi++ = __raw_readl(addr);
} else {
u32 l = 0, l2, *pi;
u16 *ps;
@@ -158,11 +158,11 @@ void insl(unsigned long __addr, void *dst, unsigned long count)
case 0x2:
ps = dst;
count -= 1;
- l = le32_to_cpu(inl(addr));
+ l = __raw_readl(addr);
*ps++ = l;
pi = (u32 *)ps;
while (count--) {
- l2 = le32_to_cpu(inl(addr));
+ l2 = __raw_readl(addr);
*pi++ = (l << 16) | (l2 >> 16);
l = l2;
}
@@ -173,13 +173,13 @@ void insl(unsigned long __addr, void *dst, unsigned long count)
case 0x1:
pb = dst;
count -= 1;
- l = le32_to_cpu(inl(addr));
+ l = __raw_readl(addr);
*pb++ = l >> 24;
ps = (u16 *)pb;
*ps++ = ((l >> 8) & 0xffff);
pi = (u32 *)ps;
while (count--) {
- l2 = le32_to_cpu(inl(addr));
+ l2 = __raw_readl(addr);
*pi++ = (l << 24) | (l2 >> 8);
l = l2;
}
@@ -190,11 +190,11 @@ void insl(unsigned long __addr, void *dst, unsigned long count)
case 0x3:
pb = (u8 *)dst;
count -= 1;
- l = le32_to_cpu(inl(addr));
+ l = __raw_readl(addr);
*pb++ = l >> 24;
pi = (u32 *)pb;
while (count--) {
- l2 = le32_to_cpu(inl(addr));
+ l2 = __raw_readl(addr);
*pi++ = (l << 8) | (l2 >> 24);
l = l2;
}
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
index aa4d55b0bdf0..5ce8f2f64604 100644
--- a/arch/sparc/math-emu/math_32.c
+++ b/arch/sparc/math-emu/math_32.c
@@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
case 0: fsr = *pfsr;
if (IR == -1) IR = 2;
/* fcc is always fcc0 */
- fsr &= ~0xc00; fsr |= (IR << 10); break;
+ fsr &= ~0xc00; fsr |= (IR << 10);
*pfsr = fsr;
break;
case 1: rd->s = IR; break;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 16b58ff11e65..98ac8e80adae 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -22,6 +22,7 @@
#include <linux/kprobes.h>
#include <linux/cache.h>
#include <linux/sort.h>
+#include <linux/ioport.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
#include <linux/mmzone.h>
@@ -351,6 +352,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
mm = vma->vm_mm;
+ /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
+ if (!pte_accessible(mm, pte))
+ return;
+
spin_lock_irqsave(&mm->context.lock, flags);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
@@ -2619,6 +2624,10 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pte = pmd_val(entry);
+ /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
+ if (!(pte & _PAGE_VALID))
+ return;
+
/* We are fabricating 8MB pages using 4MB real hw pages. */
pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
@@ -2699,3 +2708,90 @@ void hugetlb_setup(struct pt_regs *regs)
}
}
#endif
+
+static struct resource code_resource = {
+ .name = "Kernel code",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource data_resource = {
+ .name = "Kernel data",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource bss_resource = {
+ .name = "Kernel bss",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static inline resource_size_t compute_kern_paddr(void *addr)
+{
+ return (resource_size_t) (addr - KERNBASE + kern_base);
+}
+
+static void __init kernel_lds_init(void)
+{
+ code_resource.start = compute_kern_paddr(_text);
+ code_resource.end = compute_kern_paddr(_etext - 1);
+ data_resource.start = compute_kern_paddr(_etext);
+ data_resource.end = compute_kern_paddr(_edata - 1);
+ bss_resource.start = compute_kern_paddr(__bss_start);
+ bss_resource.end = compute_kern_paddr(_end - 1);
+}
+
+static int __init report_memory(void)
+{
+ int i;
+ struct resource *res;
+
+ kernel_lds_init();
+
+ for (i = 0; i < pavail_ents; i++) {
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+
+ if (!res) {
+ pr_warn("Failed to allocate source.\n");
+ break;
+ }
+
+ res->name = "System RAM";
+ res->start = pavail[i].phys_addr;
+ res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
+ res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+
+ if (insert_resource(&iomem_resource, res) < 0) {
+ pr_warn("Resource insertion failed.\n");
+ break;
+ }
+
+ insert_resource(res, &code_resource);
+ insert_resource(res, &data_resource);
+ insert_resource(res, &bss_resource);
+ }
+
+ return 0;
+}
+device_initcall(report_memory);
+
+#ifdef CONFIG_SMP
+#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
+#else
+#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
+#endif
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
+ if (start < LOW_OBP_ADDRESS) {
+ flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
+ do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
+ }
+ if (end > HI_OBP_ADDRESS) {
+ flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
+ do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
+ }
+ } else {
+ flush_tsb_kernel_range(start, end);
+ do_flush_tlb_kernel_range(start, end);
+ }
+}