summaryrefslogtreecommitdiff
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig20
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/asm-prototypes.h24
-rw-r--r--arch/sparc/include/asm/cmpxchg_64.h76
-rw-r--r--arch/sparc/include/asm/dma-mapping.h8
-rw-r--r--arch/sparc/include/asm/hugetlb.h6
-rw-r--r--arch/sparc/include/asm/ldc.h8
-rw-r--r--arch/sparc/include/asm/mdesc.h24
-rw-r--r--arch/sparc/include/asm/mmu_64.h2
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h32
-rw-r--r--arch/sparc/include/asm/nmi.h1
-rw-r--r--arch/sparc/include/asm/pgtable_32.h4
-rw-r--r--arch/sparc/include/asm/pil.h1
-rw-r--r--arch/sparc/include/asm/processor_32.h3
-rw-r--r--arch/sparc/include/asm/processor_64.h2
-rw-r--r--arch/sparc/include/asm/qrwlock.h7
-rw-r--r--arch/sparc/include/asm/qspinlock.h7
-rw-r--r--arch/sparc/include/asm/setup.h4
-rw-r--r--arch/sparc/include/asm/siginfo.h13
-rw-r--r--arch/sparc/include/asm/spinlock_64.h208
-rw-r--r--arch/sparc/include/asm/spinlock_types.h12
-rw-r--r--arch/sparc/include/asm/timer_64.h67
-rw-r--r--arch/sparc/include/asm/uaccess_32.h1
-rw-r--r--arch/sparc/include/asm/uaccess_64.h1
-rw-r--r--arch/sparc/include/asm/vio.h12
-rw-r--r--arch/sparc/include/uapi/asm/Kbuild2
-rw-r--r--arch/sparc/include/uapi/asm/ioctls.h3
-rw-r--r--arch/sparc/include/uapi/asm/socket.h4
-rw-r--r--arch/sparc/kernel/apc.c2
-rw-r--r--arch/sparc/kernel/ds.c2
-rw-r--r--arch/sparc/kernel/ftrace.c13
-rw-r--r--arch/sparc/kernel/iommu.c52
-rw-r--r--arch/sparc/kernel/iommu_common.h2
-rw-r--r--arch/sparc/kernel/ioport.c27
-rw-r--r--arch/sparc/kernel/irq_64.c17
-rw-r--r--arch/sparc/kernel/jump_label.c2
-rw-r--r--arch/sparc/kernel/kernel.h4
-rw-r--r--arch/sparc/kernel/ldc.c151
-rw-r--r--arch/sparc/kernel/mdesc.c331
-rw-r--r--arch/sparc/kernel/nmi.c6
-rw-r--r--arch/sparc/kernel/pci_sun4v.c31
-rw-r--r--arch/sparc/kernel/pmc.c2
-rw-r--r--arch/sparc/kernel/process_32.c8
-rw-r--r--arch/sparc/kernel/process_64.c19
-rw-r--r--arch/sparc/kernel/prom_64.c2
-rw-r--r--arch/sparc/kernel/setup_64.c7
-rw-r--r--arch/sparc/kernel/smp_64.c31
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c4
-rw-r--r--arch/sparc/kernel/time_32.c2
-rw-r--r--arch/sparc/kernel/time_64.c179
-rw-r--r--arch/sparc/kernel/tsb.S11
-rw-r--r--arch/sparc/kernel/ttable_64.S2
-rw-r--r--arch/sparc/kernel/vio.c210
-rw-r--r--arch/sparc/kernel/viohs.c24
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S5
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/atomic_64.S44
-rw-r--r--arch/sparc/lib/checksum_64.S1
-rw-r--r--arch/sparc/lib/csum_copy.S1
-rw-r--r--arch/sparc/lib/hweight.S16
-rw-r--r--arch/sparc/lib/memscan_64.S2
-rw-r--r--arch/sparc/lib/memset.S1
-rw-r--r--arch/sparc/lib/multi3.S35
-rw-r--r--arch/sparc/mm/extable.c28
-rw-r--r--arch/sparc/mm/gup.c4
-rw-r--r--arch/sparc/mm/hugetlbpage.c5
-rw-r--r--arch/sparc/mm/init_32.c2
-rw-r--r--arch/sparc/mm/init_64.c89
-rw-r--r--arch/sparc/mm/tsb.c7
-rw-r--r--arch/sparc/mm/ultra.S5
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c12
71 files changed, 1220 insertions, 732 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 58243b0d21c0..a4a626199c47 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -83,6 +83,8 @@ config SPARC64
select ARCH_SUPPORTS_ATOMIC_RMW
select HAVE_NMI
select HAVE_REGS_AND_STACK_ACCESS_API
+ select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
config ARCH_DEFCONFIG
string
@@ -92,6 +94,9 @@ config ARCH_DEFCONFIG
config ARCH_PROC_KCORE_TEXT
def_bool y
+config CPU_BIG_ENDIAN
+ def_bool y
+
config ARCH_ATU
bool
default y if SPARC64
@@ -192,9 +197,9 @@ config NR_CPUS
int "Maximum number of CPUs"
depends on SMP
range 2 32 if SPARC32
- range 2 1024 if SPARC64
+ range 2 4096 if SPARC64
default 32 if SPARC32
- default 64 if SPARC64
+ default 4096 if SPARC64
source kernel/Kconfig.hz
@@ -295,9 +300,13 @@ config NUMA
depends on SPARC64 && SMP
config NODES_SHIFT
- int
- default "4"
+ int "Maximum NUMA Nodes (as a power of 2)"
+ range 4 5 if SPARC64
+ default "5"
depends on NEED_MULTIPLE_NODES
+ help
+ Specify the maximum number of NUMA Nodes available on the target
+ system. Increases memory reserved to accommodate various tables.
# Some NUMA nodes have memory ranges that span
# other nodes. Even though a pfn is valid and
@@ -573,9 +582,6 @@ config SYSVIPC_COMPAT
depends on COMPAT && SYSVIPC
default y
-config KEYS_COMPAT
- def_bool y if COMPAT && KEYS
-
endmenu
source "net/Kconfig"
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index e9e837bc3158..80ddc01f57ac 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -18,5 +18,4 @@ generic-y += preempt.h
generic-y += rwsem.h
generic-y += serial.h
generic-y += trace_clock.h
-generic-y += types.h
generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/asm-prototypes.h b/arch/sparc/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..d381e11c5dbb
--- /dev/null
+++ b/arch/sparc/include/asm/asm-prototypes.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <asm/xor.h>
+#include <asm/checksum.h>
+#include <asm/trap_block.h>
+#include <asm/uaccess.h>
+#include <asm/atomic.h>
+#include <asm/ftrace.h>
+#include <asm/cacheflush.h>
+#include <asm/oplib.h>
+#include <linux/atomic.h>
+
+void *__memscan_zero(void *, size_t);
+void *__memscan_generic(void *, int, size_t);
+void *__bzero(void *, size_t);
+void VISenter(void); /* Dummy prototype to supress warning */
+#undef memcpy
+#undef memset
+void *memcpy(void *dest, const void *src, size_t n);
+void *memset(void *s, int c, size_t n);
+typedef int TItype __attribute__((mode(TI)));
+TItype __multi3(TItype a, TItype b);
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
index faa2f61058c2..4028f4f1e561 100644
--- a/arch/sparc/include/asm/cmpxchg_64.h
+++ b/arch/sparc/include/asm/cmpxchg_64.h
@@ -6,6 +6,17 @@
#ifndef __ARCH_SPARC64_CMPXCHG__
#define __ARCH_SPARC64_CMPXCHG__
+static inline unsigned long
+__cmpxchg_u32(volatile int *m, int old, int new)
+{
+ __asm__ __volatile__("cas [%2], %3, %0"
+ : "=&r" (new)
+ : "0" (new), "r" (m), "r" (old)
+ : "memory");
+
+ return new;
+}
+
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
{
unsigned long tmp1, tmp2;
@@ -44,10 +55,38 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
void __xchg_called_with_bad_pointer(void);
+/*
+ * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic
+ * here is to get the bit shift of the byte we are interested in.
+ * The XOR is handy for reversing the bits for big-endian byte order.
+ */
+static inline unsigned long
+xchg16(__volatile__ unsigned short *m, unsigned short val)
+{
+ unsigned long maddr = (unsigned long)m;
+ int bit_shift = (((unsigned long)m & 2) ^ 2) << 3;
+ unsigned int mask = 0xffff << bit_shift;
+ unsigned int *ptr = (unsigned int *) (maddr & ~2);
+ unsigned int old32, new32, load32;
+
+ /* Read the old value */
+ load32 = *ptr;
+
+ do {
+ old32 = load32;
+ new32 = (load32 & (~mask)) | val << bit_shift;
+ load32 = __cmpxchg_u32(ptr, old32, new32);
+ } while (load32 != old32);
+
+ return (load32 & mask) >> bit_shift;
+}
+
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
int size)
{
switch (size) {
+ case 2:
+ return xchg16(ptr, x);
case 4:
return xchg32(ptr, x);
case 8:
@@ -65,10 +104,11 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
#include <asm-generic/cmpxchg-local.h>
+
static inline unsigned long
-__cmpxchg_u32(volatile int *m, int old, int new)
+__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
{
- __asm__ __volatile__("cas [%2], %3, %0"
+ __asm__ __volatile__("casx [%2], %3, %0"
: "=&r" (new)
: "0" (new), "r" (m), "r" (old)
: "memory");
@@ -76,15 +116,31 @@ __cmpxchg_u32(volatile int *m, int old, int new)
return new;
}
+/*
+ * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
+ * here is to get the bit shift of the byte we are interested in.
+ * The XOR is handy for reversing the bits for big-endian byte order
+ */
static inline unsigned long
-__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
+__cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
{
- __asm__ __volatile__("casx [%2], %3, %0"
- : "=&r" (new)
- : "0" (new), "r" (m), "r" (old)
- : "memory");
-
- return new;
+ unsigned long maddr = (unsigned long)m;
+ int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
+ unsigned int mask = 0xff << bit_shift;
+ unsigned int *ptr = (unsigned int *) (maddr & ~3);
+ unsigned int old32, new32, load;
+ unsigned int load32 = *ptr;
+
+ do {
+ new32 = (load32 & ~mask) | (new << bit_shift);
+ old32 = (load32 & ~mask) | (old << bit_shift);
+ load32 = __cmpxchg_u32(ptr, old32, new32);
+ if (load32 == old32)
+ return old;
+ load = (load32 & mask) >> bit_shift;
+ } while (load == old);
+
+ return load;
}
/* This function doesn't exist, so you'll get a linker error
@@ -95,6 +151,8 @@ static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
switch (size) {
+ case 1:
+ return __cmpxchg_u8(ptr, old, new);
case 4:
return __cmpxchg_u32(ptr, old, new);
case 8:
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 69cc627779f2..60bf1633d554 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -5,11 +5,6 @@
#include <linux/mm.h>
#include <linux/dma-debug.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-#define HAVE_ARCH_DMA_SUPPORTED 1
-int dma_supported(struct device *dev, u64 mask);
-
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
@@ -19,7 +14,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
}
extern const struct dma_map_ops *dma_ops;
-extern const struct dma_map_ops *leon_dma_ops;
extern const struct dma_map_ops pci32_dma_ops;
extern struct bus_type pci_bus_type;
@@ -28,7 +22,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
#ifdef CONFIG_SPARC_LEON
if (sparc_cpu_model == sparc_leon)
- return leon_dma_ops;
+ return &pci32_dma_ops;
#endif
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
if (bus == &pci_bus_type)
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index dcbf985ab243..d1f837dc77a4 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -24,9 +24,11 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
- if (len & ~HPAGE_MASK)
+ struct hstate *h = hstate_file(file);
+
+ if (len & ~huge_page_mask(h))
return -EINVAL;
- if (addr & ~HPAGE_MASK)
+ if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}
diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h
index 6e9004aa6f25..698738a10414 100644
--- a/arch/sparc/include/asm/ldc.h
+++ b/arch/sparc/include/asm/ldc.h
@@ -48,6 +48,8 @@ struct ldc_channel_config {
#define LDC_STATE_READY 0x03
#define LDC_STATE_CONNECTED 0x04
+#define LDC_PACKET_SIZE 64
+
struct ldc_channel;
/* Allocate state for a channel. */
@@ -72,6 +74,12 @@ int ldc_connect(struct ldc_channel *lp);
int ldc_disconnect(struct ldc_channel *lp);
int ldc_state(struct ldc_channel *lp);
+void ldc_set_state(struct ldc_channel *lp, u8 state);
+int ldc_mode(struct ldc_channel *lp);
+void __ldc_print(struct ldc_channel *lp, const char *caller);
+int ldc_rx_reset(struct ldc_channel *lp);
+
+#define ldc_print(chan) __ldc_print(chan, __func__)
/* Read and write operations. Only valid when the link is up. */
int ldc_write(struct ldc_channel *lp, const void *buf,
diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h
index aebeb88f70db..e8a4c413a1c7 100644
--- a/arch/sparc/include/asm/mdesc.h
+++ b/arch/sparc/include/asm/mdesc.h
@@ -16,6 +16,7 @@ struct mdesc_handle *mdesc_grab(void);
void mdesc_release(struct mdesc_handle *);
#define MDESC_NODE_NULL (~(u64)0)
+#define MDESC_MAX_STR_LEN 256
u64 mdesc_node_by_name(struct mdesc_handle *handle,
u64 from_node, const char *name);
@@ -62,15 +63,32 @@ u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
void mdesc_update(void);
struct mdesc_notifier_client {
- void (*add)(struct mdesc_handle *handle, u64 node);
- void (*remove)(struct mdesc_handle *handle, u64 node);
-
+ void (*add)(struct mdesc_handle *handle, u64 node,
+ const char *node_name);
+ void (*remove)(struct mdesc_handle *handle, u64 node,
+ const char *node_name);
const char *node_name;
struct mdesc_notifier_client *next;
};
void mdesc_register_notifier(struct mdesc_notifier_client *client);
+union md_node_info {
+ struct vdev_port {
+ u64 id; /* id */
+ u64 parent_cfg_hdl; /* parent config handle */
+ const char *name; /* name (property) */
+ } vdev_port;
+ struct ds_port {
+ u64 id; /* id */
+ } ds_port;
+};
+
+u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name,
+ union md_node_info *node_info);
+int mdesc_get_node_info(struct mdesc_handle *hp, u64 node,
+ const char *node_name, union md_node_info *node_info);
+
void mdesc_fill_in_cpu_data(cpumask_t *mask);
void mdesc_populate_present_mask(cpumask_t *mask);
void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index f7de0dbc38af..83b36a5371ff 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -52,7 +52,7 @@
#define CTX_NR_MASK TAG_CONTEXT_BITS
#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
-#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
+#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
#define CTX_VALID(__ctx) \
(!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 22fede6eba11..2cddcda4f85f 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
+DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
void get_new_mmu_context(struct mm_struct *mm);
-#ifdef CONFIG_SMP
-void smp_new_mmu_context_version(void);
-#else
-#define smp_new_mmu_context_version() do { } while (0)
-#endif
-
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);
@@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long ctx_valid, flags;
- int cpu;
+ int cpu = smp_processor_id();
+ per_cpu(per_cpu_secondary_mm, cpu) = mm;
if (unlikely(mm == &init_mm))
return;
@@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
* for the first time, we must flush that context out of the
* local TLB.
*/
- cpu = smp_processor_id();
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
cpumask_set_cpu(cpu, mm_cpumask(mm));
__flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
}
#define deactivate_mm(tsk,mm) do { } while (0)
-
-/* Activate a new MM instance for the current task. */
-static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
-{
- unsigned long flags;
- int cpu;
-
- spin_lock_irqsave(&mm->context.lock, flags);
- if (!CTX_VALID(mm->context))
- get_new_mmu_context(mm);
- cpu = smp_processor_id();
- if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
- cpumask_set_cpu(cpu, mm_cpumask(mm));
-
- load_secondary_context(mm);
- __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
- tsb_context_switch(mm);
- spin_unlock_irqrestore(&mm->context.lock, flags);
-}
-
+#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/nmi.h b/arch/sparc/include/asm/nmi.h
index 26ad2b2607c6..284eac3ffaf2 100644
--- a/arch/sparc/include/asm/nmi.h
+++ b/arch/sparc/include/asm/nmi.h
@@ -7,6 +7,7 @@ void nmi_adjust_hz(unsigned int new_hz);
extern atomic_t nmi_active;
+void arch_touch_nmi_watchdog(void);
void start_nmi_watchdog(void *unused);
void stop_nmi_watchdog(void *unused);
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index ce6f56980aef..cf190728360b 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -91,9 +91,9 @@ extern unsigned long pfn_base;
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/*
* In general all page table modifications should use the V8 atomic
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
index 266937030546..522b43db2ed3 100644
--- a/arch/sparc/include/asm/pil.h
+++ b/arch/sparc/include/asm/pil.h
@@ -20,7 +20,6 @@
#define PIL_SMP_CALL_FUNC 1
#define PIL_SMP_RECEIVE_SIGNAL 2
#define PIL_SMP_CAPTURE 3
-#define PIL_SMP_CTX_NEW_VERSION 4
#define PIL_DEVICE_IRQ 5
#define PIL_SMP_CALL_FUNC_SNGL 6
#define PIL_DEFERRED_PCR_WORK 7
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index dd27159819eb..b395e5620c0b 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -67,9 +67,6 @@ struct thread_struct {
.current_ds = KERNEL_DS, \
}
-/* Return saved PC of a blocked thread. */
-unsigned long thread_saved_pc(struct task_struct *t);
-
/* Do necessary setup to start up a newly executed thread. */
static inline void start_thread(struct pt_regs * regs, unsigned long pc,
unsigned long sp)
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index b58ee9018433..f04dc5a43062 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -89,9 +89,7 @@ struct thread_struct {
#include <linux/types.h>
#include <asm/fpumacro.h>
-/* Return saved PC of a blocked thread. */
struct task_struct;
-unsigned long thread_saved_pc(struct task_struct *);
/* On Uniprocessor, even in RMO processes see TSO semantics */
#ifdef CONFIG_SMP
diff --git a/arch/sparc/include/asm/qrwlock.h b/arch/sparc/include/asm/qrwlock.h
new file mode 100644
index 000000000000..d68a4b102100
--- /dev/null
+++ b/arch/sparc/include/asm/qrwlock.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_SPARC_QRWLOCK_H
+#define _ASM_SPARC_QRWLOCK_H
+
+#include <asm-generic/qrwlock_types.h>
+#include <asm-generic/qrwlock.h>
+
+#endif /* _ASM_SPARC_QRWLOCK_H */
diff --git a/arch/sparc/include/asm/qspinlock.h b/arch/sparc/include/asm/qspinlock.h
new file mode 100644
index 000000000000..5ae9a2802846
--- /dev/null
+++ b/arch/sparc/include/asm/qspinlock.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_SPARC_QSPINLOCK_H
+#define _ASM_SPARC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qspinlock.h>
+
+#endif /* _ASM_SPARC_QSPINLOCK_H */
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 478bf6bb4598..8b32538084f7 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -1,5 +1,5 @@
/*
- * Just a place holder.
+ * Just a place holder.
*/
#ifndef _SPARC_SETUP_H
#define _SPARC_SETUP_H
@@ -16,7 +16,7 @@ extern char reboot_command[];
*/
extern unsigned char boot_cpu_id;
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
extern int serial_console;
static inline int con_is_present(void)
diff --git a/arch/sparc/include/asm/siginfo.h b/arch/sparc/include/asm/siginfo.h
deleted file mode 100644
index 48c34c19f810..000000000000
--- a/arch/sparc/include/asm/siginfo.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __SPARC_SIGINFO_H
-#define __SPARC_SIGINFO_H
-
-#include <uapi/asm/siginfo.h>
-
-
-#ifdef CONFIG_COMPAT
-
-struct compat_siginfo;
-
-#endif /* CONFIG_COMPAT */
-
-#endif /* !(__SPARC_SIGINFO_H) */
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 07c9f2e9bf57..f7028f5e1a5a 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -10,216 +10,12 @@
#include <asm/processor.h>
#include <asm/barrier.h>
-
-/* To get debugging spinlocks which detect and catch
- * deadlock situations, set CONFIG_DEBUG_SPINLOCK
- * and rebuild your kernel.
- */
-
-/* Because we play games to save cycles in the non-contention case, we
- * need to be extra careful about branch targets into the "spinning"
- * code. They live in their own section, but the newer V9 branches
- * have a shorter range than the traditional 32-bit sparc branch
- * variants. The rule is that the branches that go into and out of
- * the spinner sections must be pre-V9 branches.
- */
-
-#define arch_spin_is_locked(lp) ((lp)->lock != 0)
-
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- unsigned long tmp;
-
- __asm__ __volatile__(
-"1: ldstub [%1], %0\n"
-" brnz,pn %0, 2f\n"
-" nop\n"
-" .subsection 2\n"
-"2: ldub [%1], %0\n"
-" brnz,pt %0, 2b\n"
-" nop\n"
-" ba,a,pt %%xcc, 1b\n"
-" .previous"
- : "=&r" (tmp)
- : "r" (lock)
- : "memory");
-}
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- unsigned long result;
-
- __asm__ __volatile__(
-" ldstub [%1], %0\n"
- : "=r" (result)
- : "r" (lock)
- : "memory");
-
- return (result == 0UL);
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- __asm__ __volatile__(
-" stb %%g0, [%0]"
- : /* No outputs */
- : "r" (lock)
- : "memory");
-}
-
-static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
-{
- unsigned long tmp1, tmp2;
-
- __asm__ __volatile__(
-"1: ldstub [%2], %0\n"
-" brnz,pn %0, 2f\n"
-" nop\n"
-" .subsection 2\n"
-"2: rdpr %%pil, %1\n"
-" wrpr %3, %%pil\n"
-"3: ldub [%2], %0\n"
-" brnz,pt %0, 3b\n"
-" nop\n"
-" ba,pt %%xcc, 1b\n"
-" wrpr %1, %%pil\n"
-" .previous"
- : "=&r" (tmp1), "=&r" (tmp2)
- : "r"(lock), "r"(flags)
- : "memory");
-}
-
-/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-
-static inline void arch_read_lock(arch_rwlock_t *lock)
-{
- unsigned long tmp1, tmp2;
-
- __asm__ __volatile__ (
-"1: ldsw [%2], %0\n"
-" brlz,pn %0, 2f\n"
-"4: add %0, 1, %1\n"
-" cas [%2], %0, %1\n"
-" cmp %0, %1\n"
-" bne,pn %%icc, 1b\n"
-" nop\n"
-" .subsection 2\n"
-"2: ldsw [%2], %0\n"
-" brlz,pt %0, 2b\n"
-" nop\n"
-" ba,a,pt %%xcc, 4b\n"
-" .previous"
- : "=&r" (tmp1), "=&r" (tmp2)
- : "r" (lock)
- : "memory");
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *lock)
-{
- int tmp1, tmp2;
-
- __asm__ __volatile__ (
-"1: ldsw [%2], %0\n"
-" brlz,a,pn %0, 2f\n"
-" mov 0, %0\n"
-" add %0, 1, %1\n"
-" cas [%2], %0, %1\n"
-" cmp %0, %1\n"
-" bne,pn %%icc, 1b\n"
-" mov 1, %0\n"
-"2:"
- : "=&r" (tmp1), "=&r" (tmp2)
- : "r" (lock)
- : "memory");
-
- return tmp1;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *lock)
-{
- unsigned long tmp1, tmp2;
-
- __asm__ __volatile__(
-"1: lduw [%2], %0\n"
-" sub %0, 1, %1\n"
-" cas [%2], %0, %1\n"
-" cmp %0, %1\n"
-" bne,pn %%xcc, 1b\n"
-" nop"
- : "=&r" (tmp1), "=&r" (tmp2)
- : "r" (lock)
- : "memory");
-}
-
-static inline void arch_write_lock(arch_rwlock_t *lock)
-{
- unsigned long mask, tmp1, tmp2;
-
- mask = 0x80000000UL;
-
- __asm__ __volatile__(
-"1: lduw [%2], %0\n"
-" brnz,pn %0, 2f\n"
-"4: or %0, %3, %1\n"
-" cas [%2], %0, %1\n"
-" cmp %0, %1\n"
-" bne,pn %%icc, 1b\n"
-" nop\n"
-" .subsection 2\n"
-"2: lduw [%2], %0\n"
-" brnz,pt %0, 2b\n"
-" nop\n"
-" ba,a,pt %%xcc, 4b\n"
-" .previous"
- : "=&r" (tmp1), "=&r" (tmp2)
- : "r" (lock), "r" (mask)
- : "memory");
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *lock)
-{
- __asm__ __volatile__(
-" stw %%g0, [%0]"
- : /* no outputs */
- : "r" (lock)
- : "memory");
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *lock)
-{
- unsigned long mask, tmp1, tmp2, result;
-
- mask = 0x80000000UL;
-
- __asm__ __volatile__(
-" mov 0, %2\n"
-"1: lduw [%3], %0\n"
-" brnz,pn %0, 2f\n"
-" or %0, %4, %1\n"
-" cas [%3], %0, %1\n"
-" cmp %0, %1\n"
-" bne,pn %%icc, 1b\n"
-" nop\n"
-" mov 1, %2\n"
-"2:"
- : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
- : "r" (lock), "r" (mask)
- : "memory");
-
- return result;
-}
+#include <asm/qrwlock.h>
+#include <asm/qspinlock.h>
#define arch_read_lock_flags(p, f) arch_read_lock(p)
#define arch_write_lock_flags(p, f) arch_write_lock(p)
-#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
-#define arch_write_can_lock(rw) (!(rw)->lock)
-
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h
index 9c454fdeaad8..bce8ef44dfa9 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -1,20 +1,24 @@
#ifndef __SPARC_SPINLOCK_TYPES_H
#define __SPARC_SPINLOCK_TYPES_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
-#endif
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm-generic/qspinlock_types.h>
+#else
typedef struct {
volatile unsigned char lock;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+#endif /* CONFIG_QUEUED_SPINLOCKS */
+#ifdef CONFIG_QUEUED_RWLOCKS
+#include <asm-generic/qrwlock_types.h>
+#else
typedef struct {
volatile unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
-
+#endif /* CONFIG_QUEUED_RWLOCKS */
#endif
diff --git a/arch/sparc/include/asm/timer_64.h b/arch/sparc/include/asm/timer_64.h
index fce415034000..51bc3bc54bfe 100644
--- a/arch/sparc/include/asm/timer_64.h
+++ b/arch/sparc/include/asm/timer_64.h
@@ -9,7 +9,12 @@
#include <linux/types.h>
#include <linux/init.h>
+/* The most frequently accessed fields should be first,
+ * to fit into the same cacheline.
+ */
struct sparc64_tick_ops {
+ unsigned long ticks_per_nsec_quotient;
+ unsigned long offset;
unsigned long long (*get_tick)(void);
int (*add_compare)(unsigned long);
unsigned long softint_mask;
@@ -17,6 +22,8 @@ struct sparc64_tick_ops {
void (*init_tick)(void);
unsigned long (*add_tick)(unsigned long);
+ unsigned long (*get_frequency)(void);
+ unsigned long frequency;
char *name;
};
@@ -27,4 +34,64 @@ unsigned long sparc64_get_clock_tick(unsigned int cpu);
void setup_sparc64_timer(void);
void __init time_init(void);
+#define TICK_PRIV_BIT BIT(63)
+#define TICKCMP_IRQ_BIT BIT(63)
+
+#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
+#define HBIRD_STICK_ADDR 0x1fe0000f070UL
+
+#define GET_TICK_NINSTR 13
+struct get_tick_patch {
+ unsigned int addr;
+ unsigned int tick[GET_TICK_NINSTR];
+ unsigned int stick[GET_TICK_NINSTR];
+};
+
+extern struct get_tick_patch __get_tick_patch;
+extern struct get_tick_patch __get_tick_patch_end;
+
+static inline unsigned long get_tick(void)
+{
+ unsigned long tick, tmp1, tmp2;
+
+ __asm__ __volatile__(
+ /* read hbtick 13 instructions */
+ "661:\n"
+ " mov 0x1fe, %1\n"
+ " sllx %1, 0x20, %1\n"
+ " sethi %%hi(0xf000), %2\n"
+ " or %2, 0x70, %2\n"
+ " or %1, %2, %1\n" /* %1 = HBIRD_STICK_ADDR */
+ " add %1, 8, %2\n"
+ " ldxa [%2]%3, %0\n"
+ " ldxa [%1]%3, %1\n"
+ " ldxa [%2]%3, %2\n"
+ " sub %2, %0, %0\n" /* don't modify %xcc */
+ " brnz,pn %0, 661b\n" /* restart to save one register */
+ " sllx %2, 32, %2\n"
+ " or %2, %1, %0\n"
+ /* Common/not patched code */
+ " sllx %0, 1, %0\n"
+ " srlx %0, 1, %0\n" /* Clear TICK_PRIV_BIT */
+ /* Beginning of patch section */
+ " .section .get_tick_patch, \"ax\"\n"
+ " .word 661b\n"
+ /* read tick 2 instructions and 11 skipped */
+ " ba 1f\n"
+ " rd %%tick, %0\n"
+ " .skip 4 * (%4 - 2)\n"
+ "1:\n"
+ /* read stick 2 instructions and 11 skipped */
+ " ba 1f\n"
+ " rd %%asr24, %0\n"
+ " .skip 4 * (%4 - 2)\n"
+ "1:\n"
+ /* End of patch section */
+ " .previous\n"
+ : "=&r" (tick), "=&r" (tmp1), "=&r" (tmp2)
+ : "i" (ASI_PHYS_BYPASS_EC_E), "i" (GET_TICK_NINSTR));
+
+ return tick;
+}
+
#endif /* _SPARC64_TIMER_H */
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 12ebee2d97c7..bdb1447aa1bb 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -277,7 +277,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n)
return n;
}
-__must_check long strlen_user(const char __user *str);
__must_check long strnlen_user(const char __user *str, long n);
#endif /* _ASM_UACCESS_H */
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index b5f976ee7510..6d4c997d1a9e 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -193,7 +193,6 @@ unsigned long __must_check __clear_user(void __user *, unsigned long);
#define clear_user __clear_user
-__must_check long strlen_user(const char __user *str);
__must_check long strnlen_user(const char __user *str, long n);
struct pt_regs;
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 8174f6cdbbbb..d1c47e9f0090 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -316,23 +316,33 @@ static inline u32 vio_dring_prev(struct vio_dring_state *dr, u32 index)
}
#define VIO_MAX_TYPE_LEN 32
+#define VIO_MAX_NAME_LEN 32
#define VIO_MAX_COMPAT_LEN 64
struct vio_dev {
u64 mp;
struct device_node *dp;
+ char node_name[VIO_MAX_NAME_LEN];
char type[VIO_MAX_TYPE_LEN];
char compat[VIO_MAX_COMPAT_LEN];
int compat_len;
u64 dev_no;
+ unsigned long port_id;
unsigned long channel_id;
unsigned int tx_irq;
unsigned int rx_irq;
u64 rx_ino;
+ u64 tx_ino;
+
+ /* Handle to the root of "channel-devices" sub-tree in MDESC */
+ u64 cdev_handle;
+
+ /* MD specific data used to identify the vdev in MD */
+ union md_node_info md_node_info;
struct device dev;
};
@@ -346,6 +356,7 @@ struct vio_driver {
void (*shutdown)(struct vio_dev *dev);
unsigned long driver_data;
struct device_driver driver;
+ bool no_irq;
};
struct vio_version {
@@ -489,5 +500,6 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
void vio_port_up(struct vio_driver_state *vio);
int vio_set_intr(unsigned long dev_ino, int state);
+u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev);
#endif /* _SPARC64_VIO_H */
diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild
index b15bf6bc0e94..2178c78c7c1a 100644
--- a/arch/sparc/include/uapi/asm/Kbuild
+++ b/arch/sparc/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += types.h
diff --git a/arch/sparc/include/uapi/asm/ioctls.h b/arch/sparc/include/uapi/asm/ioctls.h
index 06b3f6c3bb9a..6d27398632ea 100644
--- a/arch/sparc/include/uapi/asm/ioctls.h
+++ b/arch/sparc/include/uapi/asm/ioctls.h
@@ -27,7 +27,7 @@
#define TIOCGRS485 _IOR('T', 0x41, struct serial_rs485)
#define TIOCSRS485 _IOWR('T', 0x42, struct serial_rs485)
-/* Note that all the ioctls that are not available in Linux have a
+/* Note that all the ioctls that are not available in Linux have a
* double underscore on the front to: a) avoid some programs to
* think we support some ioctls under Linux (autoconfiguration stuff)
*/
@@ -88,6 +88,7 @@
#define TIOCGPTN _IOR('t', 134, unsigned int) /* Get Pty Number */
#define TIOCSPTLCK _IOW('t', 135, int) /* Lock/unlock PTY */
#define TIOCSIG _IOW('t', 136, int) /* Generate signal on Pty slave */
+#define TIOCGPTPEER _IOR('t', 137, int) /* Safely open the slave */
/* Little f */
#define FIOCLEX _IO('f', 1)
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 3f4ad19d9ec7..186fd8199f54 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -94,6 +94,10 @@
#define SO_COOKIE 0x003b
+#define SCM_TIMESTAMPING_PKTINFO 0x003c
+
+#define SO_PEERGROUPS 0x003d
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 9ebc37e7d64c..c988e7fa069b 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -167,7 +167,7 @@ static int apc_probe(struct platform_device *op)
return 0;
}
-static struct of_device_id apc_match[] = {
+static const struct of_device_id apc_match[] = {
{
.name = APC_OBPNAME,
},
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index b542cc7c8d94..f87265afb175 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -909,7 +909,7 @@ static int register_services(struct ds_info *dp)
pbuf.req.handle = cp->handle;
pbuf.req.major = 1;
pbuf.req.minor = 0;
- strcpy(pbuf.req.svc_id, cp->service_id);
+ strcpy(pbuf.id_buf, cp->service_id);
err = __ds_send(lp, &pbuf, msg_len);
if (err > 0)
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 6bcff698069b..cec54dc4ab81 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -130,17 +130,16 @@ unsigned long prepare_ftrace_return(unsigned long parent,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return parent + 8UL;
- if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
- frame_pointer, NULL) == -EBUSY)
- return parent + 8UL;
-
trace.func = self_addr;
+ trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
+ if (!ftrace_graph_entry(&trace))
+ return parent + 8UL;
+
+ if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
+ frame_pointer, NULL) == -EBUSY)
return parent + 8UL;
- }
return return_hooker;
}
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index c63ba99ca551..fcbcc031f615 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -314,7 +314,7 @@ bad:
bad_no_ctx:
if (printk_ratelimit())
WARN_ON(1);
- return DMA_ERROR_CODE;
+ return SPARC_MAPPING_ERROR;
}
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
@@ -547,7 +547,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = DMA_ERROR_CODE;
+ outs->dma_address = SPARC_MAPPING_ERROR;
outs->dma_length = 0;
}
@@ -573,7 +573,7 @@ iommu_map_failed:
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
IOMMU_ERROR_CODE);
- s->dma_address = DMA_ERROR_CODE;
+ s->dma_address = SPARC_MAPPING_ERROR;
s->dma_length = 0;
}
if (s == outs)
@@ -741,6 +741,26 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
spin_unlock_irqrestore(&iommu->lock, flags);
}
+static int dma_4u_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == SPARC_MAPPING_ERROR;
+}
+
+static int dma_4u_supported(struct device *dev, u64 device_mask)
+{
+ struct iommu *iommu = dev->archdata.iommu;
+
+ if (device_mask > DMA_BIT_MASK(32))
+ return 0;
+ if ((device_mask & iommu->dma_addr_mask) == iommu->dma_addr_mask)
+ return 1;
+#ifdef CONFIG_PCI
+ if (dev_is_pci(dev))
+ return pci64_dma_supported(to_pci_dev(dev), device_mask);
+#endif
+ return 0;
+}
+
static const struct dma_map_ops sun4u_dma_ops = {
.alloc = dma_4u_alloc_coherent,
.free = dma_4u_free_coherent,
@@ -750,31 +770,9 @@ static const struct dma_map_ops sun4u_dma_ops = {
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
+ .dma_supported = dma_4u_supported,
+ .mapping_error = dma_4u_mapping_error,
};
const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops);
-
-int dma_supported(struct device *dev, u64 device_mask)
-{
- struct iommu *iommu = dev->archdata.iommu;
- u64 dma_addr_mask = iommu->dma_addr_mask;
-
- if (device_mask > DMA_BIT_MASK(32)) {
- if (iommu->atu)
- dma_addr_mask = iommu->atu->dma_addr_mask;
- else
- return 0;
- }
-
- if ((device_mask & dma_addr_mask) == dma_addr_mask)
- return 1;
-
-#ifdef CONFIG_PCI
- if (dev_is_pci(dev))
- return pci64_dma_supported(to_pci_dev(dev), device_mask);
-#endif
-
- return 0;
-}
-EXPORT_SYMBOL(dma_supported);
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h
index 828493329f68..5ea5c192b1d9 100644
--- a/arch/sparc/kernel/iommu_common.h
+++ b/arch/sparc/kernel/iommu_common.h
@@ -47,4 +47,6 @@ static inline int is_span_boundary(unsigned long entry,
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
}
+#define SPARC_MAPPING_ERROR (~(dma_addr_t)0x0)
+
#endif /* _IOMMU_COMMON_H */
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index cf20033a1458..12894f259bea 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -401,6 +401,11 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
BUG();
}
+static int sbus_dma_supported(struct device *dev, u64 mask)
+{
+ return 0;
+}
+
static const struct dma_map_ops sbus_dma_ops = {
.alloc = sbus_alloc_coherent,
.free = sbus_free_coherent,
@@ -410,6 +415,7 @@ static const struct dma_map_ops sbus_dma_ops = {
.unmap_sg = sbus_unmap_sg,
.sync_sg_for_cpu = sbus_sync_sg_for_cpu,
.sync_sg_for_device = sbus_sync_sg_for_device,
+ .dma_supported = sbus_dma_supported,
};
static int __init sparc_register_ioport(void)
@@ -637,6 +643,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
}
}
+/* note: leon re-uses pci32_dma_ops */
const struct dma_map_ops pci32_dma_ops = {
.alloc = pci32_alloc_coherent,
.free = pci32_free_coherent,
@@ -651,29 +658,9 @@ const struct dma_map_ops pci32_dma_ops = {
};
EXPORT_SYMBOL(pci32_dma_ops);
-/* leon re-uses pci32_dma_ops */
-const struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
-EXPORT_SYMBOL(leon_dma_ops);
-
const struct dma_map_ops *dma_ops = &sbus_dma_ops;
EXPORT_SYMBOL(dma_ops);
-
-/*
- * Return whether the given PCI device DMA address mask can be
- * supported properly. For example, if your device can only drive the
- * low 24-bits during PCI bus mastering, then you would pass
- * 0x00ffffff as the mask to this function.
- */
-int dma_supported(struct device *dev, u64 mask)
-{
- if (dev_is_pci(dev))
- return 1;
-
- return 0;
-}
-EXPORT_SYMBOL(dma_supported);
-
#ifdef CONFIG_PROC_FS
static int sparc_io_proc_show(struct seq_file *m, void *v)
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 4d0248aa0928..99dd133a029f 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
{
#ifdef CONFIG_SMP
unsigned long page;
+ void *mondo, *p;
- BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
+ BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
+
+ /* Make sure mondo block is 64byte aligned */
+ p = kzalloc(127, GFP_KERNEL);
+ if (!p) {
+ prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
+ prom_halt();
+ }
+ mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
+ tb->cpu_mondo_block_pa = __pa(mondo);
page = get_zeroed_page(GFP_KERNEL);
if (!page) {
- prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
+ prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
prom_halt();
}
- tb->cpu_mondo_block_pa = __pa(page);
- tb->cpu_list_pa = __pa(page + 64);
+ tb->cpu_list_pa = __pa(page);
#endif
}
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 07933b9e9ce0..93adde1ac166 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -41,12 +41,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
val = 0x01000000;
}
- get_online_cpus();
mutex_lock(&text_mutex);
*insn = val;
flushi(insn);
mutex_unlock(&text_mutex);
- put_online_cpus();
}
#endif
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index c9804551262c..b625db4cfb78 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
/* smp_64.c */
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
@@ -53,6 +52,9 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs);
void do_signal32(struct pt_regs * regs);
asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp);
+/* time_64.c */
+void __init time_init_early(void);
+
/* compat_audit.c */
extern unsigned int sparc32_dir_class[];
extern unsigned int sparc32_chattr_class[];
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 59d503866431..840e0b21bfe3 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -34,7 +34,6 @@
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-#define LDC_PACKET_SIZE 64
/* Packet header layout for unreliable and reliable mode frames.
* When in RAW mode, packets are simply straight 64-byte payloads
@@ -178,6 +177,8 @@ do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
} while (0)
+#define LDC_ABORT(lp) ldc_abort((lp), __func__)
+
static const char *state_to_str(u8 state)
{
switch (state) {
@@ -196,15 +197,6 @@ static const char *state_to_str(u8 state)
}
}
-static void ldc_set_state(struct ldc_channel *lp, u8 state)
-{
- ldcdbg(STATE, "STATE (%s) --> (%s)\n",
- state_to_str(lp->state),
- state_to_str(state));
-
- lp->state = state;
-}
-
static unsigned long __advance(unsigned long off, unsigned long num_entries)
{
off += LDC_PACKET_SIZE;
@@ -516,11 +508,12 @@ static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt)
return err;
}
-static int ldc_abort(struct ldc_channel *lp)
+static int ldc_abort(struct ldc_channel *lp, const char *msg)
{
unsigned long hv_err;
- ldcdbg(STATE, "ABORT\n");
+ ldcdbg(STATE, "ABORT[%s]\n", msg);
+ ldc_print(lp);
/* We report but do not act upon the hypervisor errors because
* there really isn't much we can do if they fail at this point.
@@ -605,7 +598,7 @@ static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp)
}
}
if (err)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -618,13 +611,13 @@ static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp)
if (lp->hs_state == LDC_HS_GOTVERS) {
if (lp->ver.major != vp->major ||
lp->ver.minor != vp->minor)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
} else {
lp->ver = *vp;
lp->hs_state = LDC_HS_GOTVERS;
}
if (send_rts(lp))
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -635,17 +628,17 @@ static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
unsigned long new_tail;
if (vp->major == 0 && vp->minor == 0)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
vap = find_by_major(vp->major);
if (!vap)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
vap, sizeof(*vap),
&new_tail);
if (!p)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return send_tx_packet(lp, p, new_tail);
}
@@ -668,7 +661,7 @@ static int process_version(struct ldc_channel *lp,
return process_ver_nack(lp, vp);
default:
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
}
}
@@ -681,13 +674,13 @@ static int process_rts(struct ldc_channel *lp,
if (p->stype != LDC_INFO ||
lp->hs_state != LDC_HS_GOTVERS ||
p->env != lp->cfg.mode)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
lp->snd_nxt = p->seqid;
lp->rcv_nxt = p->seqid;
lp->hs_state = LDC_HS_SENTRTR;
if (send_rtr(lp))
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -700,7 +693,7 @@ static int process_rtr(struct ldc_channel *lp,
if (p->stype != LDC_INFO ||
p->env != lp->cfg.mode)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
lp->snd_nxt = p->seqid;
lp->hs_state = LDC_HS_COMPLETE;
@@ -723,7 +716,7 @@ static int process_rdx(struct ldc_channel *lp,
if (p->stype != LDC_INFO ||
!(rx_seq_ok(lp, p->seqid)))
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
lp->rcv_nxt = p->seqid;
@@ -750,14 +743,14 @@ static int process_control_frame(struct ldc_channel *lp,
return process_rdx(lp, p);
default:
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
}
}
static int process_error_frame(struct ldc_channel *lp,
struct ldc_packet *p)
{
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
}
static int process_data_ack(struct ldc_channel *lp,
@@ -776,7 +769,7 @@ static int process_data_ack(struct ldc_channel *lp,
return 0;
}
if (head == lp->tx_tail)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
}
return 0;
@@ -820,16 +813,21 @@ static irqreturn_t ldc_rx(int irq, void *dev_id)
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
- event_mask |= LDC_EVENT_UP;
-
- orig_state = lp->chan_state;
+ /*
+ * Generate an LDC_EVENT_UP event if the channel
+ * was not already up.
+ */
+ if (orig_state != LDC_CHANNEL_UP) {
+ event_mask |= LDC_EVENT_UP;
+ orig_state = lp->chan_state;
+ }
}
/* If we are in reset state, flush the RX queue and ignore
* everything.
*/
if (lp->flags & LDC_FLAG_RESET) {
- (void) __set_rx_head(lp, lp->rx_tail);
+ (void) ldc_rx_reset(lp);
goto out;
}
@@ -880,7 +878,7 @@ handshake_complete:
break;
default:
- err = ldc_abort(lp);
+ err = LDC_ABORT(lp);
break;
}
@@ -895,7 +893,7 @@ handshake_complete:
err = __set_rx_head(lp, new);
if (err < 0) {
- (void) ldc_abort(lp);
+ (void) LDC_ABORT(lp);
break;
}
if (lp->hs_state == LDC_HS_COMPLETE)
@@ -936,7 +934,14 @@ static irqreturn_t ldc_tx(int irq, void *dev_id)
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
- event_mask |= LDC_EVENT_UP;
+ /*
+ * Generate an LDC_EVENT_UP event if the channel
+ * was not already up.
+ */
+ if (orig_state != LDC_CHANNEL_UP) {
+ event_mask |= LDC_EVENT_UP;
+ orig_state = lp->chan_state;
+ }
}
spin_unlock_irqrestore(&lp->lock, flags);
@@ -1342,6 +1347,14 @@ int ldc_bind(struct ldc_channel *lp)
lp->hs_state = LDC_HS_OPEN;
ldc_set_state(lp, LDC_STATE_BOUND);
+ if (lp->cfg.mode == LDC_MODE_RAW) {
+ /*
+ * There is no handshake in RAW mode, so handshake
+ * is completed.
+ */
+ lp->hs_state = LDC_HS_COMPLETE;
+ }
+
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
@@ -1447,12 +1460,54 @@ int ldc_state(struct ldc_channel *lp)
}
EXPORT_SYMBOL(ldc_state);
+void ldc_set_state(struct ldc_channel *lp, u8 state)
+{
+ ldcdbg(STATE, "STATE (%s) --> (%s)\n",
+ state_to_str(lp->state),
+ state_to_str(state));
+
+ lp->state = state;
+}
+EXPORT_SYMBOL(ldc_set_state);
+
+int ldc_mode(struct ldc_channel *lp)
+{
+ return lp->cfg.mode;
+}
+EXPORT_SYMBOL(ldc_mode);
+
+int ldc_rx_reset(struct ldc_channel *lp)
+{
+ return __set_rx_head(lp, lp->rx_tail);
+}
+
+void __ldc_print(struct ldc_channel *lp, const char *caller)
+{
+ pr_info("%s: id=0x%lx flags=0x%x state=%s cstate=0x%lx hsstate=0x%x\n"
+ "\trx_h=0x%lx rx_t=0x%lx rx_n=%ld\n"
+ "\ttx_h=0x%lx tx_t=0x%lx tx_n=%ld\n"
+ "\trcv_nxt=%u snd_nxt=%u\n",
+ caller, lp->id, lp->flags, state_to_str(lp->state),
+ lp->chan_state, lp->hs_state,
+ lp->rx_head, lp->rx_tail, lp->rx_num_entries,
+ lp->tx_head, lp->tx_tail, lp->tx_num_entries,
+ lp->rcv_nxt, lp->snd_nxt);
+}
+
static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
{
struct ldc_packet *p;
- unsigned long new_tail;
+ unsigned long new_tail, hv_err;
int err;
+ hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
+ &lp->chan_state);
+ if (unlikely(hv_err))
+ return -EBUSY;
+
+ if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
+ return LDC_ABORT(lp);
+
if (size > LDC_PACKET_SIZE)
return -EMSGSIZE;
@@ -1483,7 +1538,7 @@ static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size)
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
@@ -1526,7 +1581,7 @@ static int write_nonraw(struct ldc_channel *lp, const void *buf,
return -EBUSY;
if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
if (!tx_has_space_for(lp, size))
return -EAGAIN;
@@ -1592,9 +1647,9 @@ static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
if (err)
return err;
- err = __set_rx_head(lp, lp->rx_tail);
+ err = ldc_rx_reset(lp);
if (err < 0)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -1607,7 +1662,7 @@ static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
return err;
}
if (p->stype & LDC_NACK)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -1627,7 +1682,7 @@ static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head)
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
@@ -1650,7 +1705,7 @@ static int rx_set_head(struct ldc_channel *lp, unsigned long head)
int err = __set_rx_head(lp, head);
if (err < 0)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
lp->rx_head = head;
return 0;
@@ -1689,7 +1744,7 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
@@ -1733,9 +1788,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
lp->rcv_nxt = p->seqid;
+ /*
+ * If this is a control-only packet, there is nothing
+ * else to do but advance the rx queue since the packet
+ * was already processed above.
+ */
if (!(p->type & LDC_DATA)) {
new = rx_advance(lp, new);
- goto no_data;
+ break;
}
if (p->stype & (LDC_ACK | LDC_NACK)) {
err = data_ack_nack(lp, p);
@@ -1900,6 +1960,8 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
unsigned long flags;
int err;
+ ldcdbg(RX, "%s: entered size=%d\n", __func__, size);
+
if (!buf)
return -EINVAL;
@@ -1915,6 +1977,9 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
spin_unlock_irqrestore(&lp->lock, flags);
+ ldcdbg(RX, "%s: mode=%d, head=%lu, tail=%lu rv=%d\n", __func__,
+ lp->cfg.mode, lp->rx_head, lp->rx_tail, err);
+
return err;
}
EXPORT_SYMBOL(ldc_read);
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index c0765bbf60ea..fa466ce45bc9 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -75,6 +75,74 @@ struct mdesc_handle {
struct mdesc_hdr mdesc;
};
+typedef int (*mdesc_node_info_get_f)(struct mdesc_handle *, u64,
+ union md_node_info *);
+typedef void (*mdesc_node_info_rel_f)(union md_node_info *);
+typedef bool (*mdesc_node_match_f)(union md_node_info *, union md_node_info *);
+
+struct md_node_ops {
+ char *name;
+ mdesc_node_info_get_f get_info;
+ mdesc_node_info_rel_f rel_info;
+ mdesc_node_match_f node_match;
+};
+
+static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
+ union md_node_info *node_info);
+static void rel_vdev_port_node_info(union md_node_info *node_info);
+static bool vdev_port_node_match(union md_node_info *a_node_info,
+ union md_node_info *b_node_info);
+
+static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
+ union md_node_info *node_info);
+static void rel_ds_port_node_info(union md_node_info *node_info);
+static bool ds_port_node_match(union md_node_info *a_node_info,
+ union md_node_info *b_node_info);
+
+/* supported node types which can be registered */
+static struct md_node_ops md_node_ops_table[] = {
+ {"virtual-device-port", get_vdev_port_node_info,
+ rel_vdev_port_node_info, vdev_port_node_match},
+ {"domain-services-port", get_ds_port_node_info,
+ rel_ds_port_node_info, ds_port_node_match},
+ {NULL, NULL, NULL, NULL}
+};
+
+static void mdesc_get_node_ops(const char *node_name,
+ mdesc_node_info_get_f *get_info_f,
+ mdesc_node_info_rel_f *rel_info_f,
+ mdesc_node_match_f *match_f)
+{
+ int i;
+
+ if (get_info_f)
+ *get_info_f = NULL;
+
+ if (rel_info_f)
+ *rel_info_f = NULL;
+
+ if (match_f)
+ *match_f = NULL;
+
+ if (!node_name)
+ return;
+
+ for (i = 0; md_node_ops_table[i].name != NULL; i++) {
+ if (strcmp(md_node_ops_table[i].name, node_name) == 0) {
+ if (get_info_f)
+ *get_info_f = md_node_ops_table[i].get_info;
+
+ if (rel_info_f)
+ *rel_info_f = md_node_ops_table[i].rel_info;
+
+ if (match_f)
+ *match_f = md_node_ops_table[i].node_match;
+
+ break;
+ }
+ }
+}
+
static void mdesc_handle_init(struct mdesc_handle *hp,
unsigned int handle_size,
void *base)
@@ -137,12 +205,10 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
handle_size = (sizeof(struct mdesc_handle) -
sizeof(struct mdesc_hdr) +
mdesc_size);
+ base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+ if (!base)
+ return NULL;
- /*
- * Allocation has to succeed because mdesc update would be missed
- * and such events are not retransmitted.
- */
- base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
addr = (unsigned long)base;
addr = (addr + 15UL) & ~15UL;
hp = (struct mdesc_handle *) addr;
@@ -218,14 +284,31 @@ static struct mdesc_notifier_client *client_list;
void mdesc_register_notifier(struct mdesc_notifier_client *client)
{
+ bool supported = false;
u64 node;
+ int i;
mutex_lock(&mdesc_mutex);
+
+ /* check to see if the node is supported for registration */
+ for (i = 0; md_node_ops_table[i].name != NULL; i++) {
+ if (strcmp(md_node_ops_table[i].name, client->node_name) == 0) {
+ supported = true;
+ break;
+ }
+ }
+
+ if (!supported) {
+ pr_err("MD: %s node not supported\n", client->node_name);
+ mutex_unlock(&mdesc_mutex);
+ return;
+ }
+
client->next = client_list;
client_list = client;
mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
- client->add(cur_mdesc, node);
+ client->add(cur_mdesc, node, client->node_name);
mutex_unlock(&mdesc_mutex);
}
@@ -249,59 +332,145 @@ static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
return id;
}
+static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
+ union md_node_info *node_info)
+{
+ const u64 *parent_cfg_hdlp;
+ const char *name;
+ const u64 *idp;
+
+ /*
+ * Virtual device nodes are distinguished by:
+ * 1. "id" property
+ * 2. "name" property
+ * 3. parent node "cfg-handle" property
+ */
+ idp = mdesc_get_property(md, node, "id", NULL);
+ name = mdesc_get_property(md, node, "name", NULL);
+ parent_cfg_hdlp = parent_cfg_handle(md, node);
+
+ if (!idp || !name || !parent_cfg_hdlp)
+ return -1;
+
+ node_info->vdev_port.id = *idp;
+ node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL);
+ node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp;
+
+ return 0;
+}
+
+static void rel_vdev_port_node_info(union md_node_info *node_info)
+{
+ if (node_info && node_info->vdev_port.name) {
+ kfree_const(node_info->vdev_port.name);
+ node_info->vdev_port.name = NULL;
+ }
+}
+
+static bool vdev_port_node_match(union md_node_info *a_node_info,
+ union md_node_info *b_node_info)
+{
+ if (a_node_info->vdev_port.id != b_node_info->vdev_port.id)
+ return false;
+
+ if (a_node_info->vdev_port.parent_cfg_hdl !=
+ b_node_info->vdev_port.parent_cfg_hdl)
+ return false;
+
+ if (strncmp(a_node_info->vdev_port.name,
+ b_node_info->vdev_port.name, MDESC_MAX_STR_LEN) != 0)
+ return false;
+
+ return true;
+}
+
+static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
+ union md_node_info *node_info)
+{
+ const u64 *idp;
+
+ /* DS port nodes use the "id" property to distinguish them */
+ idp = mdesc_get_property(md, node, "id", NULL);
+ if (!idp)
+ return -1;
+
+ node_info->ds_port.id = *idp;
+
+ return 0;
+}
+
+static void rel_ds_port_node_info(union md_node_info *node_info)
+{
+}
+
+static bool ds_port_node_match(union md_node_info *a_node_info,
+ union md_node_info *b_node_info)
+{
+ if (a_node_info->ds_port.id != b_node_info->ds_port.id)
+ return false;
+
+ return true;
+}
+
/* Run 'func' on nodes which are in A but not in B. */
static void invoke_on_missing(const char *name,
struct mdesc_handle *a,
struct mdesc_handle *b,
- void (*func)(struct mdesc_handle *, u64))
+ void (*func)(struct mdesc_handle *, u64,
+ const char *node_name))
{
- u64 node;
+ mdesc_node_info_get_f get_info_func;
+ mdesc_node_info_rel_f rel_info_func;
+ mdesc_node_match_f node_match_func;
+ union md_node_info a_node_info;
+ union md_node_info b_node_info;
+ bool found;
+ u64 a_node;
+ u64 b_node;
+ int rv;
- mdesc_for_each_node_by_name(a, node, name) {
- int found = 0, is_vdc_port = 0;
- const char *name_prop;
- const u64 *id;
- u64 fnode;
-
- name_prop = mdesc_get_property(a, node, "name", NULL);
- if (name_prop && !strcmp(name_prop, "vdc-port")) {
- is_vdc_port = 1;
- id = parent_cfg_handle(a, node);
- } else
- id = mdesc_get_property(a, node, "id", NULL);
-
- if (!id) {
- printk(KERN_ERR "MD: Cannot find ID for %s node.\n",
- (name_prop ? name_prop : name));
+ /*
+ * Find the get_info, rel_info and node_match ops for the given
+ * node name
+ */
+ mdesc_get_node_ops(name, &get_info_func, &rel_info_func,
+ &node_match_func);
+
+ /* If we didn't find a match, the node type is not supported */
+ if (!get_info_func || !rel_info_func || !node_match_func) {
+ pr_err("MD: %s node type is not supported\n", name);
+ return;
+ }
+
+ mdesc_for_each_node_by_name(a, a_node, name) {
+ found = false;
+
+ rv = get_info_func(a, a_node, &a_node_info);
+ if (rv != 0) {
+ pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
+ name);
continue;
}
- mdesc_for_each_node_by_name(b, fnode, name) {
- const u64 *fid;
-
- if (is_vdc_port) {
- name_prop = mdesc_get_property(b, fnode,
- "name", NULL);
- if (!name_prop ||
- strcmp(name_prop, "vdc-port"))
- continue;
- fid = parent_cfg_handle(b, fnode);
- if (!fid) {
- printk(KERN_ERR "MD: Cannot find ID "
- "for vdc-port node.\n");
- continue;
- }
- } else
- fid = mdesc_get_property(b, fnode,
- "id", NULL);
-
- if (*id == *fid) {
- found = 1;
+ /* Check each node in B for node matching a_node */
+ mdesc_for_each_node_by_name(b, b_node, name) {
+ rv = get_info_func(b, b_node, &b_node_info);
+ if (rv != 0)
+ continue;
+
+ if (node_match_func(&a_node_info, &b_node_info)) {
+ found = true;
+ rel_info_func(&b_node_info);
break;
}
+
+ rel_info_func(&b_node_info);
}
+
+ rel_info_func(&a_node_info);
+
if (!found)
- func(a, node);
+ func(a, a_node, name);
}
}
@@ -367,6 +536,76 @@ out:
mutex_unlock(&mdesc_mutex);
}
+u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name,
+ union md_node_info *node_info)
+{
+ mdesc_node_info_get_f get_info_func;
+ mdesc_node_info_rel_f rel_info_func;
+ mdesc_node_match_f node_match_func;
+ union md_node_info hp_node_info;
+ u64 hp_node;
+ int rv;
+
+ if (hp == NULL || node_name == NULL || node_info == NULL)
+ return MDESC_NODE_NULL;
+
+ /* Find the ops for the given node name */
+ mdesc_get_node_ops(node_name, &get_info_func, &rel_info_func,
+ &node_match_func);
+
+ /* If we didn't find ops for the given node name, it is not supported */
+ if (!get_info_func || !rel_info_func || !node_match_func) {
+ pr_err("MD: %s node is not supported\n", node_name);
+ return -EINVAL;
+ }
+
+ mdesc_for_each_node_by_name(hp, hp_node, node_name) {
+ rv = get_info_func(hp, hp_node, &hp_node_info);
+ if (rv != 0)
+ continue;
+
+ if (node_match_func(node_info, &hp_node_info))
+ break;
+
+ rel_info_func(&hp_node_info);
+ }
+
+ rel_info_func(&hp_node_info);
+
+ return hp_node;
+}
+EXPORT_SYMBOL(mdesc_get_node);
+
+int mdesc_get_node_info(struct mdesc_handle *hp, u64 node,
+ const char *node_name, union md_node_info *node_info)
+{
+ mdesc_node_info_get_f get_info_func;
+ int rv;
+
+ if (hp == NULL || node == MDESC_NODE_NULL ||
+ node_name == NULL || node_info == NULL)
+ return -EINVAL;
+
+ /* Find the get_info op for the given node name */
+ mdesc_get_node_ops(node_name, &get_info_func, NULL, NULL);
+
+ /* If we didn't find a get_info_func, the node name is not supported */
+ if (get_info_func == NULL) {
+ pr_err("MD: %s node is not supported\n", node_name);
+ return -EINVAL;
+ }
+
+ rv = get_info_func(hp, node, node_info);
+ if (rv != 0) {
+ pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
+ node_name);
+ return -1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mdesc_get_node_info);
+
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
{
return (struct mdesc_elem *) (mdesc + 1);
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 95e73c63c99d..048ad783ea3f 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -51,7 +51,7 @@ static DEFINE_PER_CPU(unsigned int, last_irq_sum);
static DEFINE_PER_CPU(long, alert_counter);
static DEFINE_PER_CPU(int, nmi_touch);
-void touch_nmi_watchdog(void)
+void arch_touch_nmi_watchdog(void)
{
if (atomic_read(&nmi_active)) {
int cpu;
@@ -61,10 +61,8 @@ void touch_nmi_watchdog(void)
per_cpu(nmi_touch, cpu) = 1;
}
}
-
- touch_softlockup_watchdog();
}
-EXPORT_SYMBOL(touch_nmi_watchdog);
+EXPORT_SYMBOL(arch_touch_nmi_watchdog);
static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
{
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 68bec7c97cb8..24f21c726dfa 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -24,6 +24,7 @@
#include "pci_impl.h"
#include "iommu_common.h"
+#include "kernel.h"
#include "pci_sun4v.h"
@@ -412,12 +413,12 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
bad:
if (printk_ratelimit())
WARN_ON(1);
- return DMA_ERROR_CODE;
+ return SPARC_MAPPING_ERROR;
iommu_map_fail:
local_irq_restore(flags);
iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
- return DMA_ERROR_CODE;
+ return SPARC_MAPPING_ERROR;
}
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
@@ -590,7 +591,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = DMA_ERROR_CODE;
+ outs->dma_address = SPARC_MAPPING_ERROR;
outs->dma_length = 0;
}
@@ -607,7 +608,7 @@ iommu_map_failed:
iommu_tbl_range_free(tbl, vaddr, npages,
IOMMU_ERROR_CODE);
/* XXX demap? XXX */
- s->dma_address = DMA_ERROR_CODE;
+ s->dma_address = SPARC_MAPPING_ERROR;
s->dma_length = 0;
}
if (s == outs)
@@ -669,6 +670,26 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
local_irq_restore(flags);
}
+static int dma_4v_supported(struct device *dev, u64 device_mask)
+{
+ struct iommu *iommu = dev->archdata.iommu;
+ u64 dma_addr_mask;
+
+ if (device_mask > DMA_BIT_MASK(32) && iommu->atu)
+ dma_addr_mask = iommu->atu->dma_addr_mask;
+ else
+ dma_addr_mask = iommu->dma_addr_mask;
+
+ if ((device_mask & dma_addr_mask) == dma_addr_mask)
+ return 1;
+ return pci64_dma_supported(to_pci_dev(dev), device_mask);
+}
+
+static int dma_4v_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == SPARC_MAPPING_ERROR;
+}
+
static const struct dma_map_ops sun4v_dma_ops = {
.alloc = dma_4v_alloc_coherent,
.free = dma_4v_free_coherent,
@@ -676,6 +697,8 @@ static const struct dma_map_ops sun4v_dma_ops = {
.unmap_page = dma_4v_unmap_page,
.map_sg = dma_4v_map_sg,
.unmap_sg = dma_4v_unmap_sg,
+ .dma_supported = dma_4v_supported,
+ .mapping_error = dma_4v_mapping_error,
};
static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index f12b23f7b515..3b26cf62df6d 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -71,7 +71,7 @@ static int pmc_probe(struct platform_device *op)
return 0;
}
-static struct of_device_id pmc_match[] = {
+static const struct of_device_id pmc_match[] = {
{
.name = PMC_OBPNAME,
},
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index b6dac8e980f0..9245f93398c7 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -177,14 +177,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
}
/*
- * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- return task_thread_info(tsk)->kpc;
-}
-
-/*
* Free current thread data structures etc..
*/
void exit_thread(struct task_struct *tsk)
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 1badc493e62e..b96104da5bd6 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -400,25 +400,6 @@ core_initcall(sparc_sysrq_init);
#endif
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- struct thread_info *ti = task_thread_info(tsk);
- unsigned long ret = 0xdeadbeefUL;
-
- if (ti && ti->ksp) {
- unsigned long *sp;
- sp = (unsigned long *)(ti->ksp + STACK_BIAS);
- if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
- sp[14]) {
- unsigned long *fp;
- fp = (unsigned long *)(sp[14] + STACK_BIAS);
- if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
- ret = fp[15];
- }
- }
- return ret;
-}
-
/* Free current thread data structures etc.. */
void exit_thread(struct task_struct *tsk)
{
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index 20cc5d80a471..baeaeed64993 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -381,7 +381,7 @@ bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
int this_cpu_id;
/* On hypervisor based platforms we interrogate the 'reg'
- * property. On everything else we look for a 'upa-portis',
+ * property. On everything else we look for a 'upa-portid',
* 'portid', or 'cpuid' property.
*/
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 422b17880955..4d9c3e13c150 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -95,7 +95,7 @@ static struct console prom_early_console = {
.index = -1,
};
-/*
+/*
* Process kernel command line switches that are specific to the
* SPARC or that require special low-level processing.
*/
@@ -365,6 +365,7 @@ void __init start_early_boot(void)
}
current_thread_info()->cpu = cpu;
+ time_init_early();
prom_init_report();
start_kernel();
}
@@ -639,7 +640,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
- rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
+ rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
#endif
task_thread_info(&init_task)->kregs = &fake_swapper_regs;
@@ -648,7 +649,7 @@ void __init setup_arch(char **cmdline_p)
if (!ic_set_manually) {
phandle chosen = prom_finddevice("/chosen");
u32 cl, sv, gw;
-
+
cl = prom_getintdefault (chosen, "client-ip", 0);
sv = prom_getintdefault (chosen, "server-ip", 0);
gw = prom_getintdefault (chosen, "gateway-ip", 0);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b3bc0ac757cc..fdf31040a7dc 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
preempt_enable();
}
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
-{
- struct mm_struct *mm;
- unsigned long flags;
-
- clear_softint(1 << irq);
-
- /* See if we need to allocate a new TLB context because
- * the version of the one we are using is now out of date.
- */
- mm = current->active_mm;
- if (unlikely(!mm || (mm == &init_mm)))
- return;
-
- spin_lock_irqsave(&mm->context.lock, flags);
-
- if (unlikely(!CTX_VALID(mm->context)))
- get_new_mmu_context(mm);
-
- spin_unlock_irqrestore(&mm->context.lock, flags);
-
- load_secondary_context(mm);
- __flush_tlb_mm(CTX_HWBITS(mm->context),
- SECONDARY_CONTEXT);
-}
-
-void smp_new_mmu_context_version(void)
-{
- smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
-}
-
#ifdef CONFIG_KGDB
void kgdb_roundup_cpus(unsigned long flags)
{
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index ef4520efc813..043544d0cda3 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -120,7 +120,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 9f575dfc2e41..2ce2e7b2abbb 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -298,7 +298,7 @@ static int clock_probe(struct platform_device *op)
return 0;
}
-static struct of_device_id clock_match[] = {
+static const struct of_device_id clock_match[] = {
{
.name = "eeprom",
},
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 98d05de8da66..564f0e46ffd4 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -32,7 +32,6 @@
#include <linux/kernel_stat.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/ftrace.h>
@@ -47,14 +46,13 @@
#include <asm/cpudata.h>
#include <linux/uaccess.h>
#include <asm/irq_regs.h>
+#include <asm/cacheflush.h>
#include "entry.h"
+#include "kernel.h"
DEFINE_SPINLOCK(rtc_lock);
-#define TICK_PRIV_BIT (1UL << 63)
-#define TICKCMP_IRQ_BIT (1UL << 63)
-
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
@@ -164,13 +162,44 @@ static unsigned long tick_add_tick(unsigned long adj)
return new_tick;
}
-static struct sparc64_tick_ops tick_operations __read_mostly = {
+/* Searches for cpu clock frequency with given cpuid in OpenBoot tree */
+static unsigned long cpuid_to_freq(phandle node, int cpuid)
+{
+ bool is_cpu_node = false;
+ unsigned long freq = 0;
+ char type[128];
+
+ if (!node)
+ return freq;
+
+ if (prom_getproperty(node, "device_type", type, sizeof(type)) != -1)
+ is_cpu_node = (strcmp(type, "cpu") == 0);
+
+ /* try upa-portid then cpuid to get cpuid, see prom_64.c */
+ if (is_cpu_node && (prom_getint(node, "upa-portid") == cpuid ||
+ prom_getint(node, "cpuid") == cpuid))
+ freq = prom_getintdefault(node, "clock-frequency", 0);
+ if (!freq)
+ freq = cpuid_to_freq(prom_getchild(node), cpuid);
+ if (!freq)
+ freq = cpuid_to_freq(prom_getsibling(node), cpuid);
+
+ return freq;
+}
+
+static unsigned long tick_get_frequency(void)
+{
+ return cpuid_to_freq(prom_root_node, hard_smp_processor_id());
+}
+
+static struct sparc64_tick_ops tick_operations __cacheline_aligned = {
.name = "tick",
.init_tick = tick_init_tick,
.disable_irq = tick_disable_irq,
.get_tick = tick_get_tick,
.add_tick = tick_add_tick,
.add_compare = tick_add_compare,
+ .get_frequency = tick_get_frequency,
.softint_mask = 1UL << 0,
};
@@ -250,6 +279,11 @@ static int stick_add_compare(unsigned long adj)
return ((long)(new_tick - (orig_tick+adj))) > 0L;
}
+static unsigned long stick_get_frequency(void)
+{
+ return prom_getintdefault(prom_root_node, "stick-frequency", 0);
+}
+
static struct sparc64_tick_ops stick_operations __read_mostly = {
.name = "stick",
.init_tick = stick_init_tick,
@@ -257,6 +291,7 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
.get_tick = stick_get_tick,
.add_tick = stick_add_tick,
.add_compare = stick_add_compare,
+ .get_frequency = stick_get_frequency,
.softint_mask = 1UL << 16,
};
@@ -277,9 +312,6 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
* 2) write high
* 3) write low
*/
-#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
-#define HBIRD_STICK_ADDR 0x1fe0000f070UL
-
static unsigned long __hbird_read_stick(void)
{
unsigned long ret, tmp1, tmp2, tmp3;
@@ -381,6 +413,11 @@ static int hbtick_add_compare(unsigned long adj)
return ((long)(val2 - val)) > 0L;
}
+static unsigned long hbtick_get_frequency(void)
+{
+ return prom_getintdefault(prom_root_node, "stick-frequency", 0);
+}
+
static struct sparc64_tick_ops hbtick_operations __read_mostly = {
.name = "hbtick",
.init_tick = hbtick_init_tick,
@@ -388,11 +425,10 @@ static struct sparc64_tick_ops hbtick_operations __read_mostly = {
.get_tick = hbtick_get_tick,
.add_tick = hbtick_add_tick,
.add_compare = hbtick_add_compare,
+ .get_frequency = hbtick_get_frequency,
.softint_mask = 1UL << 0,
};
-static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
-
unsigned long cmos_regs;
EXPORT_SYMBOL(cmos_regs);
@@ -582,34 +618,17 @@ static int __init clock_init(void)
*/
fs_initcall(clock_init);
-/* This is gets the master TICK_INT timer going. */
-static unsigned long sparc64_init_timers(void)
+/* Return true if this is Hummingbird, aka Ultra-IIe */
+static bool is_hummingbird(void)
{
- struct device_node *dp;
- unsigned long freq;
+ unsigned long ver, manuf, impl;
- dp = of_find_node_by_path("/");
- if (tlb_type == spitfire) {
- unsigned long ver, manuf, impl;
-
- __asm__ __volatile__ ("rdpr %%ver, %0"
- : "=&r" (ver));
- manuf = ((ver >> 48) & 0xffff);
- impl = ((ver >> 32) & 0xffff);
- if (manuf == 0x17 && impl == 0x13) {
- /* Hummingbird, aka Ultra-IIe */
- tick_ops = &hbtick_operations;
- freq = of_getintprop_default(dp, "stick-frequency", 0);
- } else {
- tick_ops = &tick_operations;
- freq = local_cpu_data().clock_tick;
- }
- } else {
- tick_ops = &stick_operations;
- freq = of_getintprop_default(dp, "stick-frequency", 0);
- }
+ __asm__ __volatile__ ("rdpr %%ver, %0"
+ : "=&r" (ver));
+ manuf = ((ver >> 48) & 0xffff);
+ impl = ((ver >> 32) & 0xffff);
- return freq;
+ return (manuf == 0x17 && impl == 0x13);
}
struct freq_table {
@@ -671,12 +690,12 @@ core_initcall(register_sparc64_cpufreq_notifier);
static int sparc64_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- return tick_ops->add_compare(delta) ? -ETIME : 0;
+ return tick_operations.add_compare(delta) ? -ETIME : 0;
}
static int sparc64_timer_shutdown(struct clock_event_device *evt)
{
- tick_ops->disable_irq();
+ tick_operations.disable_irq();
return 0;
}
@@ -693,7 +712,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- unsigned long tick_mask = tick_ops->softint_mask;
+ unsigned long tick_mask = tick_operations.softint_mask;
int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
@@ -728,7 +747,7 @@ void setup_sparc64_timer(void)
: "=r" (pstate)
: "i" (PSTATE_IE));
- tick_ops->init_tick();
+ tick_operations.init_tick();
/* Restore PSTATE_IE. */
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
@@ -755,12 +774,10 @@ static unsigned long tb_ticks_per_usec __read_mostly;
void __delay(unsigned long loops)
{
- unsigned long bclock, now;
+ unsigned long bclock = get_tick();
- bclock = tick_ops->get_tick();
- do {
- now = tick_ops->get_tick();
- } while ((now-bclock) < loops);
+ while ((get_tick() - bclock) < loops)
+ ;
}
EXPORT_SYMBOL(__delay);
@@ -772,26 +789,71 @@ EXPORT_SYMBOL(udelay);
static u64 clocksource_tick_read(struct clocksource *cs)
{
- return tick_ops->get_tick();
+ return get_tick();
+}
+
+static void __init get_tick_patch(void)
+{
+ unsigned int *addr, *instr, i;
+ struct get_tick_patch *p;
+
+ if (tlb_type == spitfire && is_hummingbird())
+ return;
+
+ for (p = &__get_tick_patch; p < &__get_tick_patch_end; p++) {
+ instr = (tlb_type == spitfire) ? p->tick : p->stick;
+ addr = (unsigned int *)(unsigned long)p->addr;
+ for (i = 0; i < GET_TICK_NINSTR; i++) {
+ addr[i] = instr[i];
+ /* ensure that address is modified before flush */
+ wmb();
+ flushi(&addr[i]);
+ }
+ }
+}
+
+static void init_tick_ops(struct sparc64_tick_ops *ops)
+{
+ unsigned long freq, quotient, tick;
+
+ freq = ops->get_frequency();
+ quotient = clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
+ tick = ops->get_tick();
+
+ ops->offset = (tick * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT;
+ ops->ticks_per_nsec_quotient = quotient;
+ ops->frequency = freq;
+ tick_operations = *ops;
+ get_tick_patch();
+}
+
+void __init time_init_early(void)
+{
+ if (tlb_type == spitfire) {
+ if (is_hummingbird())
+ init_tick_ops(&hbtick_operations);
+ else
+ init_tick_ops(&tick_operations);
+ } else {
+ init_tick_ops(&stick_operations);
+ }
}
void __init time_init(void)
{
- unsigned long freq = sparc64_init_timers();
+ unsigned long freq;
+ freq = tick_operations.frequency;
tb_ticks_per_usec = freq / USEC_PER_SEC;
- timer_ticks_per_nsec_quotient =
- clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
-
- clocksource_tick.name = tick_ops->name;
+ clocksource_tick.name = tick_operations.name;
clocksource_tick.read = clocksource_tick_read;
clocksource_register_hz(&clocksource_tick, freq);
printk("clocksource: mult[%x] shift[%d]\n",
clocksource_tick.mult, clocksource_tick.shift);
- sparc64_clockevent.name = tick_ops->name;
+ sparc64_clockevent.name = tick_operations.name;
clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
sparc64_clockevent.max_delta_ns =
@@ -809,14 +871,21 @@ void __init time_init(void)
unsigned long long sched_clock(void)
{
- unsigned long ticks = tick_ops->get_tick();
+ unsigned long quotient = tick_operations.ticks_per_nsec_quotient;
+ unsigned long offset = tick_operations.offset;
+
+ /* Use barrier so the compiler emits the loads first and overlaps load
+ * latency with reading tick, because reading %tick/%stick is a
+ * post-sync instruction that will flush and restart subsequent
+ * instructions after it commits.
+ */
+ barrier();
- return (ticks * timer_ticks_per_nsec_quotient)
- >> SPARC64_NSEC_PER_CYC_SHIFT;
+ return ((get_tick() * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT) - offset;
}
int read_current_timer(unsigned long *timer_val)
{
- *timer_val = tick_ops->get_tick();
+ *timer_val = get_tick();
return 0;
}
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index 10689cfd0ad4..07c0df924960 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -455,13 +455,16 @@ __tsb_context_switch:
.type copy_tsb,#function
copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
* %o2=new_tsb_base, %o3=new_tsb_size
+ * %o4=page_size_shift
*/
sethi %uhi(TSB_PASS_BITS), %g7
srlx %o3, 4, %o3
- add %o0, %o1, %g1 /* end of old tsb */
+ add %o0, %o1, %o1 /* end of old tsb */
sllx %g7, 32, %g7
sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
+ mov %o4, %g1 /* page_size_shift */
+
661: prefetcha [%o0] ASI_N, #one_read
.section .tsb_phys_patch, "ax"
.word 661b
@@ -486,9 +489,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
/* This can definitely be computed faster... */
srlx %o0, 4, %o5 /* Build index */
and %o5, 511, %o5 /* Mask index */
- sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
+ sllx %o5, %g1, %o5 /* Put into vaddr position */
or %o4, %o5, %o4 /* Full VADDR. */
- srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
+ srlx %o4, %g1, %o4 /* Shift down to create index */
and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
@@ -496,7 +499,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
80: add %o0, 16, %o0
- cmp %o0, %g1
+ cmp %o0, %o1
bne,pt %xcc, 90b
nop
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
index 7bd8f6556352..efe93ab4a9c0 100644
--- a/arch/sparc/kernel/ttable_64.S
+++ b/arch/sparc/kernel/ttable_64.S
@@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
-tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4)
+tl0_irq4: BTRAP(0x44)
#else
tl0_irq1: BTRAP(0x41)
tl0_irq2: BTRAP(0x42)
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index f6bb857254fc..1c8763c9c52b 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -70,15 +70,26 @@ static int vio_device_probe(struct device *dev)
struct vio_dev *vdev = to_vio_dev(dev);
struct vio_driver *drv = to_vio_driver(dev->driver);
const struct vio_device_id *id;
- int error = -ENODEV;
- if (drv->probe) {
- id = vio_match_device(drv->id_table, vdev);
- if (id)
- error = drv->probe(vdev, id);
+ if (!drv->probe)
+ return -ENODEV;
+
+ id = vio_match_device(drv->id_table, vdev);
+ if (!id)
+ return -ENODEV;
+
+ /* alloc irqs (unless the driver specified not to) */
+ if (!drv->no_irq) {
+ if (vdev->tx_irq == 0 && vdev->tx_ino != ~0UL)
+ vdev->tx_irq = sun4v_build_virq(vdev->cdev_handle,
+ vdev->tx_ino);
+
+ if (vdev->rx_irq == 0 && vdev->rx_ino != ~0UL)
+ vdev->rx_irq = sun4v_build_virq(vdev->cdev_handle,
+ vdev->rx_ino);
}
- return error;
+ return drv->probe(vdev, id);
}
static int vio_device_remove(struct device *dev)
@@ -86,8 +97,15 @@ static int vio_device_remove(struct device *dev)
struct vio_dev *vdev = to_vio_dev(dev);
struct vio_driver *drv = to_vio_driver(dev->driver);
- if (drv->remove)
+ if (drv->remove) {
+ /*
+ * Ideally, we would remove/deallocate tx/rx virqs
+ * here - however, there are currently no support
+ * routines to do so at the moment. TBD
+ */
+
return drv->remove(vdev);
+ }
return 1;
}
@@ -105,6 +123,7 @@ static ssize_t devspec_show(struct device *dev,
return sprintf(buf, "%s\n", str);
}
+static DEVICE_ATTR_RO(devspec);
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -112,6 +131,7 @@ static ssize_t type_show(struct device *dev,
struct vio_dev *vdev = to_vio_dev(dev);
return sprintf(buf, "%s\n", vdev->type);
}
+static DEVICE_ATTR_RO(type);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -120,17 +140,19 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "vio:T%sS%s\n", vdev->type, vdev->compat);
}
+static DEVICE_ATTR_RO(modalias);
-static struct device_attribute vio_dev_attrs[] = {
- __ATTR_RO(devspec),
- __ATTR_RO(type),
- __ATTR_RO(modalias),
- __ATTR_NULL
-};
+static struct attribute *vio_dev_attrs[] = {
+ &dev_attr_devspec.attr,
+ &dev_attr_type.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+ };
+ATTRIBUTE_GROUPS(vio_dev);
static struct bus_type vio_bus_type = {
.name = "vio",
- .dev_attrs = vio_dev_attrs,
+ .dev_groups = vio_dev_groups,
.uevent = vio_hotplug,
.match = vio_bus_match,
.probe = vio_device_probe,
@@ -181,11 +203,58 @@ static struct device_node *cdev_node;
static struct vio_dev *root_vdev;
static u64 cdev_cfg_handle;
+static const u64 *vio_cfg_handle(struct mdesc_handle *hp, u64 node)
+{
+ const u64 *cfg_handle = NULL;
+ u64 a;
+
+ mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
+ u64 target;
+
+ target = mdesc_arc_target(hp, a);
+ cfg_handle = mdesc_get_property(hp, target,
+ "cfg-handle", NULL);
+ if (cfg_handle)
+ break;
+ }
+
+ return cfg_handle;
+}
+
+/**
+ * vio_vdev_node() - Find VDEV node in MD
+ * @hp: Handle to the MD
+ * @vdev: Pointer to VDEV
+ *
+ * Find the node in the current MD which matches the given vio_dev. This
+ * must be done dynamically since the node value can change if the MD
+ * is updated.
+ *
+ * NOTE: the MD must be locked, using mdesc_grab(), when calling this routine
+ *
+ * Return: The VDEV node in MDESC
+ */
+u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev)
+{
+ u64 node;
+
+ if (vdev == NULL)
+ return MDESC_NODE_NULL;
+
+ node = mdesc_get_node(hp, (const char *)vdev->node_name,
+ &vdev->md_node_info);
+
+ return node;
+}
+
static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp,
struct vio_dev *vdev)
{
u64 a;
+ vdev->tx_ino = ~0UL;
+ vdev->rx_ino = ~0UL;
+ vdev->channel_id = ~0UL;
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
const u64 *chan_id;
const u64 *irq;
@@ -195,18 +264,18 @@ static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp,
irq = mdesc_get_property(hp, target, "tx-ino", NULL);
if (irq)
- vdev->tx_irq = sun4v_build_virq(cdev_cfg_handle, *irq);
+ vdev->tx_ino = *irq;
irq = mdesc_get_property(hp, target, "rx-ino", NULL);
- if (irq) {
- vdev->rx_irq = sun4v_build_virq(cdev_cfg_handle, *irq);
+ if (irq)
vdev->rx_ino = *irq;
- }
chan_id = mdesc_get_property(hp, target, "id", NULL);
if (chan_id)
vdev->channel_id = *chan_id;
}
+
+ vdev->cdev_handle = cdev_cfg_handle;
}
int vio_set_intr(unsigned long dev_ino, int state)
@@ -219,14 +288,14 @@ int vio_set_intr(unsigned long dev_ino, int state)
EXPORT_SYMBOL(vio_set_intr);
static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
+ const char *node_name,
struct device *parent)
{
- const char *type, *compat, *bus_id_name;
+ const char *type, *compat;
struct device_node *dp;
struct vio_dev *vdev;
int err, tlen, clen;
const u64 *id, *cfg_handle;
- u64 a;
type = mdesc_get_property(hp, mp, "device-type", &tlen);
if (!type) {
@@ -236,7 +305,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
tlen = strlen(type) + 1;
}
}
- if (tlen > VIO_MAX_TYPE_LEN) {
+ if (tlen > VIO_MAX_TYPE_LEN || strlen(type) >= VIO_MAX_TYPE_LEN) {
printk(KERN_ERR "VIO: Type string [%s] is too long.\n",
type);
return NULL;
@@ -244,31 +313,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
id = mdesc_get_property(hp, mp, "id", NULL);
- cfg_handle = NULL;
- mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
- u64 target;
-
- target = mdesc_arc_target(hp, a);
- cfg_handle = mdesc_get_property(hp, target,
- "cfg-handle", NULL);
- if (cfg_handle)
- break;
- }
-
- bus_id_name = type;
- if (!strcmp(type, "domain-services-port"))
- bus_id_name = "ds";
-
- /*
- * 20 char is the old driver-core name size limit, which is no more.
- * This check can probably be removed after review and possible
- * adaption of the vio users name length handling.
- */
- if (strlen(bus_id_name) >= 20 - 4) {
- printk(KERN_ERR "VIO: bus_id_name [%s] is too long.\n",
- bus_id_name);
- return NULL;
- }
+ cfg_handle = vio_cfg_handle(hp, mp);
compat = mdesc_get_property(hp, mp, "device-type", &clen);
if (!compat) {
@@ -293,22 +338,23 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
memset(vdev->compat, 0, sizeof(vdev->compat));
vdev->compat_len = clen;
- vdev->channel_id = ~0UL;
- vdev->tx_irq = ~0;
- vdev->rx_irq = ~0;
+ vdev->port_id = ~0UL;
+ vdev->tx_irq = 0;
+ vdev->rx_irq = 0;
vio_fill_channel_info(hp, mp, vdev);
if (!id) {
- dev_set_name(&vdev->dev, "%s", bus_id_name);
+ dev_set_name(&vdev->dev, "%s", type);
vdev->dev_no = ~(u64)0;
} else if (!cfg_handle) {
- dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
+ dev_set_name(&vdev->dev, "%s-%llu", type, *id);
vdev->dev_no = *id;
} else {
- dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
+ dev_set_name(&vdev->dev, "%s-%llu-%llu", type,
*cfg_handle, *id);
vdev->dev_no = *cfg_handle;
+ vdev->port_id = *id;
}
vdev->dev.parent = parent;
@@ -330,7 +376,26 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
}
vdev->dp = dp;
- printk(KERN_INFO "VIO: Adding device %s\n", dev_name(&vdev->dev));
+ /*
+ * node_name is NULL for the parent/channel-devices node and
+ * the parent doesn't require the MD node info.
+ */
+ if (node_name != NULL) {
+ (void) snprintf(vdev->node_name, VIO_MAX_NAME_LEN, "%s",
+ node_name);
+
+ err = mdesc_get_node_info(hp, mp, node_name,
+ &vdev->md_node_info);
+ if (err) {
+ pr_err("VIO: Could not get MD node info %s, err=%d\n",
+ dev_name(&vdev->dev), err);
+ kfree(vdev);
+ return NULL;
+ }
+ }
+
+ pr_info("VIO: Adding device %s (tx_ino = %llx, rx_ino = %llx)\n",
+ dev_name(&vdev->dev), vdev->tx_ino, vdev->rx_ino);
err = device_register(&vdev->dev);
if (err) {
@@ -346,32 +411,50 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
return vdev;
}
-static void vio_add(struct mdesc_handle *hp, u64 node)
+static void vio_add(struct mdesc_handle *hp, u64 node,
+ const char *node_name)
{
- (void) vio_create_one(hp, node, &root_vdev->dev);
+ (void) vio_create_one(hp, node, node_name, &root_vdev->dev);
}
+struct vio_remove_node_data {
+ struct mdesc_handle *hp;
+ u64 node;
+};
+
static int vio_md_node_match(struct device *dev, void *arg)
{
struct vio_dev *vdev = to_vio_dev(dev);
+ struct vio_remove_node_data *node_data;
+ u64 node;
- if (vdev->mp == (u64) arg)
- return 1;
+ node_data = (struct vio_remove_node_data *)arg;
- return 0;
+ node = vio_vdev_node(node_data->hp, vdev);
+
+ if (node == node_data->node)
+ return 1;
+ else
+ return 0;
}
-static void vio_remove(struct mdesc_handle *hp, u64 node)
+static void vio_remove(struct mdesc_handle *hp, u64 node, const char *node_name)
{
+ struct vio_remove_node_data node_data;
struct device *dev;
- dev = device_find_child(&root_vdev->dev, (void *) node,
+ node_data.hp = hp;
+ node_data.node = node;
+
+ dev = device_find_child(&root_vdev->dev, (void *)&node_data,
vio_md_node_match);
if (dev) {
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
device_unregister(dev);
put_device(dev);
+ } else {
+ pr_err("VIO: %s node not found in MDESC\n", node_name);
}
}
@@ -386,7 +469,8 @@ static struct mdesc_notifier_client vio_device_notifier = {
* under "openboot" that we should not mess with as aparently that is
* reserved exclusively for OBP use.
*/
-static void vio_add_ds(struct mdesc_handle *hp, u64 node)
+static void vio_add_ds(struct mdesc_handle *hp, u64 node,
+ const char *node_name)
{
int found;
u64 a;
@@ -403,7 +487,7 @@ static void vio_add_ds(struct mdesc_handle *hp, u64 node)
}
if (found)
- (void) vio_create_one(hp, node, &root_vdev->dev);
+ (void) vio_create_one(hp, node, node_name, &root_vdev->dev);
}
static struct mdesc_notifier_client vio_ds_notifier = {
@@ -470,7 +554,7 @@ static int __init vio_init(void)
cdev_cfg_handle = *cfg_handle;
- root_vdev = vio_create_one(hp, root, NULL);
+ root_vdev = vio_create_one(hp, root, NULL, NULL);
err = -ENODEV;
if (!root_vdev) {
printk(KERN_ERR "VIO: Could not create root device.\n");
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index b30b30ab3ddd..d4f13c037a40 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -223,6 +223,9 @@ static int send_rdx(struct vio_driver_state *vio)
static int send_attr(struct vio_driver_state *vio)
{
+ if (!vio->ops)
+ return -EINVAL;
+
return vio->ops->send_attr(vio);
}
@@ -283,6 +286,7 @@ static int process_ver_info(struct vio_driver_state *vio,
ver.minor = vap->minor;
pkt->minor = ver.minor;
pkt->tag.stype = VIO_SUBTYPE_ACK;
+ pkt->dev_class = vio->dev_class;
viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
pkt->major, pkt->minor);
err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
@@ -374,6 +378,9 @@ static int process_attr(struct vio_driver_state *vio, void *pkt)
if (!(vio->hs_state & VIO_HS_GOTVERS))
return handshake_failure(vio);
+ if (!vio->ops)
+ return 0;
+
err = vio->ops->handle_attr(vio, pkt);
if (err < 0) {
return handshake_failure(vio);
@@ -388,6 +395,7 @@ static int process_attr(struct vio_driver_state *vio, void *pkt)
vio->hs_state |= VIO_HS_SENT_DREG;
}
}
+
return 0;
}
@@ -647,10 +655,13 @@ int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
err = process_unknown(vio, pkt);
break;
}
+
if (!err &&
vio->hs_state != prev_state &&
- (vio->hs_state & VIO_HS_COMPLETE))
- vio->ops->handshake_complete(vio);
+ (vio->hs_state & VIO_HS_COMPLETE)) {
+ if (vio->ops)
+ vio->ops->handshake_complete(vio);
+ }
return err;
}
@@ -765,7 +776,11 @@ void vio_port_up(struct vio_driver_state *vio)
}
if (!err) {
- err = ldc_connect(vio->lp);
+ if (ldc_mode(vio->lp) == LDC_MODE_RAW)
+ ldc_set_state(vio->lp, LDC_STATE_CONNECTED);
+ else
+ err = ldc_connect(vio->lp);
+
if (err)
printk(KERN_WARNING "%s: Port %lu connect failed, "
"err=%d\n",
@@ -805,8 +820,7 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
return -EINVAL;
}
- if (!ops->send_attr ||
- !ops->handle_attr ||
+ if (!ops || !ops->send_attr || !ops->handle_attr ||
!ops->handshake_complete)
return -EINVAL;
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 572db686f845..03b3d65d1266 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -149,6 +149,11 @@ SECTIONS
*(.sun_m7_2insn_patch)
__sun_m7_2insn_patch_end = .;
}
+ .get_tick_patch : {
+ __get_tick_patch = .;
+ *(.get_tick_patch)
+ __get_tick_patch_end = .;
+ }
PERCPU_SECTION(SMP_CACHE_BYTES)
#ifdef CONFIG_JUMP_LABEL
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 69912d2f8b54..07c03e72d812 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o
lib-$(CONFIG_SPARC64) += atomic_64.o
lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
+lib-$(CONFIG_SPARC64) += multi3.o
lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 1c6a1bde5138..ce17c3094ba6 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -62,19 +62,23 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
ENDPROC(atomic_fetch_##op); \
EXPORT_SYMBOL(atomic_fetch_##op);
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
+ATOMIC_OP(add)
+ATOMIC_OP_RETURN(add)
+ATOMIC_FETCH_OP(add)
-ATOMIC_OPS(add)
-ATOMIC_OPS(sub)
+ATOMIC_OP(sub)
+ATOMIC_OP_RETURN(sub)
+ATOMIC_FETCH_OP(sub)
-#undef ATOMIC_OPS
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+ATOMIC_OP(and)
+ATOMIC_FETCH_OP(and)
-ATOMIC_OPS(and)
-ATOMIC_OPS(or)
-ATOMIC_OPS(xor)
+ATOMIC_OP(or)
+ATOMIC_FETCH_OP(or)
+
+ATOMIC_OP(xor)
+ATOMIC_FETCH_OP(xor)
-#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -124,19 +128,23 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
ENDPROC(atomic64_fetch_##op); \
EXPORT_SYMBOL(atomic64_fetch_##op);
-#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
+ATOMIC64_OP(add)
+ATOMIC64_OP_RETURN(add)
+ATOMIC64_FETCH_OP(add)
+
+ATOMIC64_OP(sub)
+ATOMIC64_OP_RETURN(sub)
+ATOMIC64_FETCH_OP(sub)
-ATOMIC64_OPS(add)
-ATOMIC64_OPS(sub)
+ATOMIC64_OP(and)
+ATOMIC64_FETCH_OP(and)
-#undef ATOMIC64_OPS
-#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op)
+ATOMIC64_OP(or)
+ATOMIC64_FETCH_OP(or)
-ATOMIC64_OPS(and)
-ATOMIC64_OPS(or)
-ATOMIC64_OPS(xor)
+ATOMIC64_OP(xor)
+ATOMIC64_FETCH_OP(xor)
-#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/arch/sparc/lib/checksum_64.S b/arch/sparc/lib/checksum_64.S
index f6732174fe6b..6cfa521f444d 100644
--- a/arch/sparc/lib/checksum_64.S
+++ b/arch/sparc/lib/checksum_64.S
@@ -38,6 +38,7 @@ csum_partial_fix_alignment:
.align 32
.globl csum_partial
+ .type csum_partial,#function
EXPORT_SYMBOL(csum_partial)
csum_partial: /* %o0=buff, %o1=len, %o2=sum */
prefetch [%o0 + 0x000], #n_reads
diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S
index 0ecbafc30fd0..b1051e77c49a 100644
--- a/arch/sparc/lib/csum_copy.S
+++ b/arch/sparc/lib/csum_copy.S
@@ -65,6 +65,7 @@
add %o5, %o4, %o4
.globl FUNC_NAME
+ .type FUNC_NAME,#function
EXPORT_SYMBOL(FUNC_NAME)
FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */
LOAD(prefetch, %o0 + 0x000, #n_reads)
diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S
index f9985f129fb6..d21cf20e5c1e 100644
--- a/arch/sparc/lib/hweight.S
+++ b/arch/sparc/lib/hweight.S
@@ -4,9 +4,9 @@
.text
.align 32
ENTRY(__arch_hweight8)
- ba,pt %xcc, __sw_hweight8
+ sethi %hi(__sw_hweight8), %g1
+ jmpl %g1 + %lo(__sw_hweight8), %g0
nop
- nop
ENDPROC(__arch_hweight8)
EXPORT_SYMBOL(__arch_hweight8)
.section .popc_3insn_patch, "ax"
@@ -17,9 +17,9 @@ EXPORT_SYMBOL(__arch_hweight8)
.previous
ENTRY(__arch_hweight16)
- ba,pt %xcc, __sw_hweight16
+ sethi %hi(__sw_hweight16), %g1
+ jmpl %g1 + %lo(__sw_hweight16), %g0
nop
- nop
ENDPROC(__arch_hweight16)
EXPORT_SYMBOL(__arch_hweight16)
.section .popc_3insn_patch, "ax"
@@ -30,9 +30,9 @@ EXPORT_SYMBOL(__arch_hweight16)
.previous
ENTRY(__arch_hweight32)
- ba,pt %xcc, __sw_hweight32
+ sethi %hi(__sw_hweight32), %g1
+ jmpl %g1 + %lo(__sw_hweight32), %g0
nop
- nop
ENDPROC(__arch_hweight32)
EXPORT_SYMBOL(__arch_hweight32)
.section .popc_3insn_patch, "ax"
@@ -43,9 +43,9 @@ EXPORT_SYMBOL(__arch_hweight32)
.previous
ENTRY(__arch_hweight64)
- ba,pt %xcc, __sw_hweight64
+ sethi %hi(__sw_hweight16), %g1
+ jmpl %g1 + %lo(__sw_hweight16), %g0
nop
- nop
ENDPROC(__arch_hweight64)
EXPORT_SYMBOL(__arch_hweight64)
.section .popc_3insn_patch, "ax"
diff --git a/arch/sparc/lib/memscan_64.S b/arch/sparc/lib/memscan_64.S
index daa96f4b03e6..5efee1f4be36 100644
--- a/arch/sparc/lib/memscan_64.S
+++ b/arch/sparc/lib/memscan_64.S
@@ -14,6 +14,8 @@
.text
.align 32
.globl __memscan_zero, __memscan_generic
+ .type __memscan_zero,#function
+ .type __memscan_generic,#function
.globl memscan
EXPORT_SYMBOL(__memscan_zero)
EXPORT_SYMBOL(__memscan_generic)
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index bb539b42b088..e23338dbfc43 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -63,6 +63,7 @@
__bzero_begin:
.globl __bzero
+ .type __bzero,#function
.globl memset
EXPORT_SYMBOL(__bzero)
EXPORT_SYMBOL(memset)
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
new file mode 100644
index 000000000000..d6b6c97fe3c7
--- /dev/null
+++ b/arch/sparc/lib/multi3.S
@@ -0,0 +1,35 @@
+#include <linux/linkage.h>
+#include <asm/export.h>
+
+ .text
+ .align 4
+ENTRY(__multi3) /* %o0 = u, %o1 = v */
+ mov %o1, %g1
+ srl %o3, 0, %g4
+ mulx %g4, %g1, %o1
+ srlx %g1, 0x20, %g3
+ mulx %g3, %g4, %g5
+ sllx %g5, 0x20, %o5
+ srl %g1, 0, %g4
+ sub %o1, %o5, %o5
+ srlx %o5, 0x20, %o5
+ addcc %g5, %o5, %g5
+ srlx %o3, 0x20, %o5
+ mulx %g4, %o5, %g4
+ mulx %g3, %o5, %o5
+ sethi %hi(0x80000000), %g3
+ addcc %g5, %g4, %g5
+ srlx %g5, 0x20, %g5
+ add %g3, %g3, %g3
+ movcc %xcc, %g0, %g3
+ addcc %o5, %g5, %o5
+ sllx %g4, 0x20, %g4
+ add %o1, %g4, %o1
+ add %o5, %g3, %g2
+ mulx %g1, %o2, %g1
+ add %g1, %g2, %g1
+ mulx %o0, %o3, %o0
+ retl
+ add %g1, %o0, %o0
+ENDPROC(__multi3)
+EXPORT_SYMBOL(__multi3)
diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c
index db214e9931d9..2422511dc8c5 100644
--- a/arch/sparc/mm/extable.c
+++ b/arch/sparc/mm/extable.c
@@ -13,11 +13,11 @@ void sort_extable(struct exception_table_entry *start,
/* Caller knows they are in a range if ret->fixup == 0 */
const struct exception_table_entry *
-search_extable(const struct exception_table_entry *start,
- const struct exception_table_entry *last,
+search_extable(const struct exception_table_entry *base,
+ const size_t num,
unsigned long value)
{
- const struct exception_table_entry *walk;
+ int i;
/* Single insn entries are encoded as:
* word 1: insn address
@@ -37,30 +37,30 @@ search_extable(const struct exception_table_entry *start,
*/
/* 1. Try to find an exact match. */
- for (walk = start; walk <= last; walk++) {
- if (walk->fixup == 0) {
+ for (i = 0; i < num; i++) {
+ if (base[i].fixup == 0) {
/* A range entry, skip both parts. */
- walk++;
+ i++;
continue;
}
/* A deleted entry; see trim_init_extable */
- if (walk->fixup == -1)
+ if (base[i].fixup == -1)
continue;
- if (walk->insn == value)
- return walk;
+ if (base[i].insn == value)
+ return &base[i];
}
/* 2. Try to find a range match. */
- for (walk = start; walk <= (last - 1); walk++) {
- if (walk->fixup)
+ for (i = 0; i < (num - 1); i++) {
+ if (base[i].fixup)
continue;
- if (walk[0].insn <= value && walk[1].insn > value)
- return walk;
+ if (base[i].insn <= value && base[i + 1].insn > value)
+ return &base[i];
- walk++;
+ i++;
}
return NULL;
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index cd0e32bbcb1d..f80cfc64c55b 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -78,8 +78,8 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
return 0;
refs = 0;
- head = pmd_page(pmd);
- page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ page = pmd_page(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ head = compound_head(page);
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 7c29d38e6b99..28ee8d8ffa07 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
@@ -277,7 +277,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index c6afe98de4d9..3bd0d513bddb 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -290,7 +290,7 @@ void __init mem_init(void)
/* Saves us work later. */
- memset((void *)&empty_zero_page, 0, PAGE_SIZE);
+ memset((void *)empty_zero_page, 0, PAGE_SIZE);
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
i += 1;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 0cda653ae007..3c40ebd50f92 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)
}
if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
- pr_warn("hugepagesz=%llu not supported by MMU.\n",
+ hugetlb_bad_size();
+ pr_err("hugepagesz=%llu not supported by MMU.\n",
hugepage_size);
goto out;
}
@@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
/* get_new_mmu_context() uses "cache + 1". */
DEFINE_SPINLOCK(ctx_alloc_lock);
-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+unsigned long tlb_context_cache = CTX_FIRST_VERSION;
#define MAX_CTX_NR (1UL << CTX_NR_BITS)
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
+
+static void mmu_context_wrap(void)
+{
+ unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
+ unsigned long new_ver, new_ctx, old_ctx;
+ struct mm_struct *mm;
+ int cpu;
+
+ bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
+
+ /* Reserve kernel context */
+ set_bit(0, mmu_context_bmap);
+
+ new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
+ if (unlikely(new_ver == 0))
+ new_ver = CTX_FIRST_VERSION;
+ tlb_context_cache = new_ver;
+
+ /*
+ * Make sure that any new mm that are added into per_cpu_secondary_mm,
+ * are going to go through get_new_mmu_context() path.
+ */
+ mb();
+
+ /*
+ * Updated versions to current on those CPUs that had valid secondary
+ * contexts
+ */
+ for_each_online_cpu(cpu) {
+ /*
+ * If a new mm is stored after we took this mm from the array,
+ * it will go into get_new_mmu_context() path, because we
+ * already bumped the version in tlb_context_cache.
+ */
+ mm = per_cpu(per_cpu_secondary_mm, cpu);
+
+ if (unlikely(!mm || mm == &init_mm))
+ continue;
+
+ old_ctx = mm->context.sparc64_ctx_val;
+ if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
+ new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
+ set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
+ mm->context.sparc64_ctx_val = new_ctx;
+ }
+ }
+}
/* Caller does TLB context flushing on local CPU if necessary.
* The caller also ensures that CTX_VALID(mm->context) is false.
@@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
{
unsigned long ctx, new_ctx;
unsigned long orig_pgsz_bits;
- int new_version;
spin_lock(&ctx_alloc_lock);
+retry:
+ /* wrap might have happened, test again if our context became valid */
+ if (unlikely(CTX_VALID(mm->context)))
+ goto out;
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
- new_version = 0;
if (new_ctx >= (1 << CTX_NR_BITS)) {
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
if (new_ctx >= ctx) {
- int i;
- new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
- CTX_FIRST_VERSION;
- if (new_ctx == 1)
- new_ctx = CTX_FIRST_VERSION;
-
- /* Don't call memset, for 16 entries that's just
- * plain silly...
- */
- mmu_context_bmap[0] = 3;
- mmu_context_bmap[1] = 0;
- mmu_context_bmap[2] = 0;
- mmu_context_bmap[3] = 0;
- for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
- mmu_context_bmap[i + 0] = 0;
- mmu_context_bmap[i + 1] = 0;
- mmu_context_bmap[i + 2] = 0;
- mmu_context_bmap[i + 3] = 0;
- }
- new_version = 1;
- goto out;
+ mmu_context_wrap();
+ goto retry;
}
}
+ if (mm->context.sparc64_ctx_val)
+ cpumask_clear(mm_cpumask(mm));
mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
-out:
tlb_context_cache = new_ctx;
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+out:
spin_unlock(&ctx_alloc_lock);
-
- if (unlikely(new_version))
- smp_new_mmu_context_version();
}
static int numa_enabled = 1;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index bedf08b22a47..0d4b998c7d7b 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -496,7 +496,8 @@ retry_tsb_alloc:
extern void copy_tsb(unsigned long old_tsb_base,
unsigned long old_tsb_size,
unsigned long new_tsb_base,
- unsigned long new_tsb_size);
+ unsigned long new_tsb_size,
+ unsigned long page_size_shift);
unsigned long old_tsb_base = (unsigned long) old_tsb;
unsigned long new_tsb_base = (unsigned long) new_tsb;
@@ -504,7 +505,9 @@ retry_tsb_alloc:
old_tsb_base = __pa(old_tsb_base);
new_tsb_base = __pa(new_tsb_base);
}
- copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
+ copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
+ tsb_index == MM_TSB_BASE ?
+ PAGE_SHIFT : REAL_HPAGE_SHIFT);
}
mm->context.tsb_block[tsb_index].tsb = new_tsb;
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 5d2fd6cd3189..fcf4d27a38fb 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -971,11 +971,6 @@ xcall_capture:
wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
retry
- .globl xcall_new_mmu_context_version
-xcall_new_mmu_context_version:
- wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
- retry
-
#ifdef CONFIG_KGDB
.globl xcall_kgdb_capture
xcall_kgdb_capture:
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 21de77419f48..8799ae9a8788 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -802,8 +802,13 @@ static void build_prologue(struct jit_ctx *ctx)
{
s32 stack_needed = BASE_STACKFRAME;
- if (ctx->saw_frame_pointer || ctx->saw_tail_call)
- stack_needed += MAX_BPF_STACK;
+ if (ctx->saw_frame_pointer || ctx->saw_tail_call) {
+ struct bpf_prog *prog = ctx->prog;
+ u32 stack_depth;
+
+ stack_depth = prog->aux->stack_depth;
+ stack_needed += round_up(stack_depth, 16);
+ }
if (ctx->saw_tail_call)
stack_needed += 8;
@@ -1217,7 +1222,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
}
/* tail call */
- case BPF_JMP | BPF_CALL |BPF_X:
+ case BPF_JMP | BPF_TAIL_CALL:
emit_tail_call(ctx);
break;
@@ -1555,6 +1560,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog->bpf_func = (void *)ctx.image;
prog->jited = 1;
+ prog->jited_len = image_size;
out_off:
kfree(ctx.offset);