summaryrefslogtreecommitdiff
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/.gitignore1
-rw-r--r--arch/sparc/kernel/Makefile31
-rw-r--r--arch/sparc/kernel/adi_64.c396
-rw-r--r--arch/sparc/kernel/apc.c11
-rw-r--r--arch/sparc/kernel/asm-offsets.c9
-rw-r--r--arch/sparc/kernel/audit.c33
-rw-r--r--arch/sparc/kernel/auxio_32.c11
-rw-r--r--arch/sparc/kernel/auxio_64.c17
-rw-r--r--arch/sparc/kernel/btext.c370
-rw-r--r--arch/sparc/kernel/central.c9
-rw-r--r--arch/sparc/kernel/cherrs.S15
-rw-r--r--arch/sparc/kernel/chmc.c18
-rw-r--r--arch/sparc/kernel/compat_audit.c27
-rw-r--r--arch/sparc/kernel/cpu.c28
-rw-r--r--arch/sparc/kernel/cpumap.c12
-rw-r--r--arch/sparc/kernel/cpumap.h5
-rw-r--r--arch/sparc/kernel/devices.c13
-rw-r--r--arch/sparc/kernel/dma.c12
-rw-r--r--arch/sparc/kernel/ds.c71
-rw-r--r--arch/sparc/kernel/dtlb_miss.S1
-rw-r--r--arch/sparc/kernel/dtlb_prot.S11
-rw-r--r--arch/sparc/kernel/ebus.c2
-rw-r--r--arch/sparc/kernel/entry.S89
-rw-r--r--arch/sparc/kernel/entry.h259
-rw-r--r--arch/sparc/kernel/etrap_32.S1
-rw-r--r--arch/sparc/kernel/etrap_64.S54
-rw-r--r--arch/sparc/kernel/fpu_traps.S12
-rw-r--r--arch/sparc/kernel/ftrace.c22
-rw-r--r--arch/sparc/kernel/getsetcc.S1
-rw-r--r--arch/sparc/kernel/head_32.S34
-rw-r--r--arch/sparc/kernel/head_64.S173
-rw-r--r--arch/sparc/kernel/helpers.S3
-rw-r--r--arch/sparc/kernel/hvapi.c7
-rw-r--r--arch/sparc/kernel/hvcalls.S109
-rw-r--r--arch/sparc/kernel/hvtramp.S4
-rw-r--r--arch/sparc/kernel/idprom.c8
-rw-r--r--arch/sparc/kernel/iommu-common.c260
-rw-r--r--arch/sparc/kernel/iommu.c275
-rw-r--r--arch/sparc/kernel/iommu_common.h10
-rw-r--r--arch/sparc/kernel/ioport.c476
-rw-r--r--arch/sparc/kernel/irq.h12
-rw-r--r--arch/sparc/kernel/irq_32.c24
-rw-r--r--arch/sparc/kernel/irq_64.c607
-rw-r--r--arch/sparc/kernel/itlb_miss.S1
-rw-r--r--arch/sparc/kernel/ivec.S1
-rw-r--r--arch/sparc/kernel/jump_label.c32
-rw-r--r--arch/sparc/kernel/kernel.h140
-rw-r--r--arch/sparc/kernel/kgdb_32.c20
-rw-r--r--arch/sparc/kernel/kgdb_64.c17
-rw-r--r--arch/sparc/kernel/kprobes.c166
-rw-r--r--arch/sparc/kernel/kstack.h1
-rw-r--r--arch/sparc/kernel/ktlb.S123
-rw-r--r--arch/sparc/kernel/ldc.c378
-rw-r--r--arch/sparc/kernel/led.c58
-rw-r--r--arch/sparc/kernel/leon_kernel.c129
-rw-r--r--arch/sparc/kernel/leon_pci.c170
-rw-r--r--arch/sparc/kernel/leon_pci_grpci1.c25
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c36
-rw-r--r--arch/sparc/kernel/leon_pmc.c17
-rw-r--r--arch/sparc/kernel/leon_smp.c52
-rw-r--r--arch/sparc/kernel/mdesc.c625
-rw-r--r--arch/sparc/kernel/misctrap.S12
-rw-r--r--arch/sparc/kernel/module.c37
-rw-r--r--arch/sparc/kernel/nmi.c95
-rw-r--r--arch/sparc/kernel/of_device_32.c33
-rw-r--r--arch/sparc/kernel/of_device_64.c80
-rw-r--r--arch/sparc/kernel/of_device_common.c15
-rw-r--r--arch/sparc/kernel/of_device_common.h1
-rw-r--r--arch/sparc/kernel/pci.c597
-rw-r--r--arch/sparc/kernel/pci_common.c99
-rw-r--r--arch/sparc/kernel/pci_fire.c8
-rw-r--r--arch/sparc/kernel/pci_impl.h41
-rw-r--r--arch/sparc/kernel/pci_msi.c32
-rw-r--r--arch/sparc/kernel/pci_psycho.c6
-rw-r--r--arch/sparc/kernel/pci_sabre.c13
-rw-r--r--arch/sparc/kernel/pci_schizo.c30
-rw-r--r--arch/sparc/kernel/pci_sun4v.c687
-rw-r--r--arch/sparc/kernel/pci_sun4v.h178
-rw-r--r--arch/sparc/kernel/pci_sun4v_asm.S69
-rw-r--r--arch/sparc/kernel/pcic.c202
-rw-r--r--arch/sparc/kernel/pcr.c83
-rw-r--r--arch/sparc/kernel/perf_event.c243
-rw-r--r--arch/sparc/kernel/pmc.c8
-rw-r--r--arch/sparc/kernel/power.c18
-rw-r--r--arch/sparc/kernel/process.c110
-rw-r--r--arch/sparc/kernel/process_32.c175
-rw-r--r--arch/sparc/kernel/process_64.c265
-rw-r--r--arch/sparc/kernel/prom.h3
-rw-r--r--arch/sparc/kernel/prom_32.c85
-rw-r--r--arch/sparc/kernel/prom_64.c161
-rw-r--r--arch/sparc/kernel/prom_common.c13
-rw-r--r--arch/sparc/kernel/prom_irqtrans.c24
-rw-r--r--arch/sparc/kernel/psycho_common.c7
-rw-r--r--arch/sparc/kernel/psycho_common.h23
-rw-r--r--arch/sparc/kernel/ptrace_32.c456
-rw-r--r--arch/sparc/kernel/ptrace_64.c760
-rw-r--r--arch/sparc/kernel/reboot.c4
-rw-r--r--arch/sparc/kernel/rtrap_32.S3
-rw-r--r--arch/sparc/kernel/rtrap_64.S131
-rw-r--r--arch/sparc/kernel/sbus.c11
-rw-r--r--arch/sparc/kernel/setup.c46
-rw-r--r--arch/sparc/kernel/setup_32.c63
-rw-r--r--arch/sparc/kernel/setup_64.c190
-rw-r--r--arch/sparc/kernel/signal32.c254
-rw-r--r--arch/sparc/kernel/signal_32.c86
-rw-r--r--arch/sparc/kernel/signal_64.c135
-rw-r--r--arch/sparc/kernel/sigutil.h1
-rw-r--r--arch/sparc/kernel/sigutil_32.c12
-rw-r--r--arch/sparc/kernel/sigutil_64.c11
-rw-r--r--arch/sparc/kernel/smp_32.c40
-rw-r--r--arch/sparc/kernel/smp_64.c622
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c12
-rw-r--r--arch/sparc/kernel/sparc_ksyms_32.c32
-rw-r--r--arch/sparc/kernel/sparc_ksyms_64.c53
-rw-r--r--arch/sparc/kernel/spiterrs.S18
-rw-r--r--arch/sparc/kernel/sstate.c8
-rw-r--r--arch/sparc/kernel/stacktrace.c10
-rw-r--r--arch/sparc/kernel/starfire.c6
-rw-r--r--arch/sparc/kernel/sun4d_irq.c36
-rw-r--r--arch/sparc/kernel/sun4d_smp.c25
-rw-r--r--arch/sparc/kernel/sun4m_irq.c13
-rw-r--r--arch/sparc/kernel/sun4m_smp.c23
-rw-r--r--arch/sparc/kernel/sun4v_ivec.S16
-rw-r--r--arch/sparc/kernel/sun4v_mcd.S17
-rw-r--r--arch/sparc/kernel/sun4v_tlb_miss.S39
-rw-r--r--arch/sparc/kernel/sys32.S259
-rw-r--r--arch/sparc/kernel/sys_sparc32.c121
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c98
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c238
-rw-r--r--arch/sparc/kernel/syscalls.S103
-rw-r--r--arch/sparc/kernel/syscalls/Makefile33
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl518
-rw-r--r--arch/sparc/kernel/sysfs.c95
-rw-r--r--arch/sparc/kernel/systbls.h124
-rw-r--r--arch/sparc/kernel/systbls_32.S78
-rw-r--r--arch/sparc/kernel/systbls_64.S152
-rw-r--r--arch/sparc/kernel/tadpole.c126
-rw-r--r--arch/sparc/kernel/termios.c115
-rw-r--r--arch/sparc/kernel/time_32.c110
-rw-r--r--arch/sparc/kernel/time_64.c288
-rw-r--r--arch/sparc/kernel/trampoline_32.S5
-rw-r--r--arch/sparc/kernel/trampoline_64.S20
-rw-r--r--arch/sparc/kernel/traps_32.c123
-rw-r--r--arch/sparc/kernel/traps_64.c527
-rw-r--r--arch/sparc/kernel/tsb.S65
-rw-r--r--arch/sparc/kernel/ttable_32.S1
-rw-r--r--arch/sparc/kernel/ttable_64.S11
-rw-r--r--arch/sparc/kernel/una_asm_32.S1
-rw-r--r--arch/sparc/kernel/una_asm_64.S1
-rw-r--r--arch/sparc/kernel/unaligned_32.c127
-rw-r--r--arch/sparc/kernel/unaligned_64.c61
-rw-r--r--arch/sparc/kernel/uprobes.c320
-rw-r--r--arch/sparc/kernel/urtt_fill.S105
-rw-r--r--arch/sparc/kernel/utrap.S4
-rw-r--r--arch/sparc/kernel/vdso.c69
-rw-r--r--arch/sparc/kernel/vio.c258
-rw-r--r--arch/sparc/kernel/viohs.c78
-rw-r--r--arch/sparc/kernel/visemul.c5
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S54
-rw-r--r--arch/sparc/kernel/windows.c12
-rw-r--r--arch/sparc/kernel/winfixup.S6
-rw-r--r--arch/sparc/kernel/wof.S1
-rw-r--r--arch/sparc/kernel/wuf.S1
163 files changed, 9230 insertions, 7203 deletions
diff --git a/arch/sparc/kernel/.gitignore b/arch/sparc/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/sparc/kernel/.gitignore
+++ b/arch/sparc/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index d432fb20358e..22170d4f8e06 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -1,16 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
#
# Makefile for the linux kernel.
#
-asflags-y := -ansi
-ccflags-y := -Werror
-
-extra-y := head_$(BITS).o
-
# Undefine sparc when processing vmlinux.lds - it is used
# And teach CPP we are doing $(BITS) builds (for this case)
CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS)
-extra-y += vmlinux.lds
+always-$(KBUILD_BUILTIN) += vmlinux.lds
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
@@ -20,6 +17,8 @@ CFLAGS_REMOVE_perf_event.o := -pg
CFLAGS_REMOVE_pcr.o := -pg
endif
+obj-y := head_$(BITS).o
+obj-$(CONFIG_SPARC64) += urtt_fill.o
obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
obj-$(CONFIG_SPARC32) += etrap_32.o
obj-$(CONFIG_SPARC32) += rtrap_32.o
@@ -30,9 +29,11 @@ obj-y += irq_$(BITS).o
obj-$(CONFIG_SPARC32) += sun4m_irq.o sun4d_irq.o
obj-y += process_$(BITS).o
+obj-y += process.o
obj-y += signal_$(BITS).o
obj-y += sigutil_$(BITS).o
obj-$(CONFIG_SPARC32) += ioport.o
+obj-y += setup.o
obj-y += setup_$(BITS).o
obj-y += idprom.o
obj-y += sys_sparc_$(BITS).o
@@ -40,8 +41,8 @@ obj-$(CONFIG_SPARC32) += systbls_32.o
obj-y += time_$(BITS).o
obj-$(CONFIG_SPARC32) += windows.o
obj-y += cpu.o
+obj-$(CONFIG_SPARC64) += vdso.o
obj-$(CONFIG_SPARC32) += devices.o
-obj-$(CONFIG_SPARC32) += tadpole.o
obj-y += ptrace_$(BITS).o
obj-y += unaligned_$(BITS).o
obj-y += una_asm_$(BITS).o
@@ -56,7 +57,7 @@ obj-$(CONFIG_SPARC32) += leon_pmc.o
obj-$(CONFIG_SPARC64) += reboot.o
obj-$(CONFIG_SPARC64) += sysfs.o
-obj-$(CONFIG_SPARC64) += iommu.o
+obj-$(CONFIG_SPARC64) += iommu.o iommu-common.o
obj-$(CONFIG_SPARC64) += central.o
obj-$(CONFIG_SPARC64) += starfire.o
obj-$(CONFIG_SPARC64) += power.o
@@ -66,12 +67,11 @@ obj-$(CONFIG_SPARC64) += visemul.o
obj-$(CONFIG_SPARC64) += hvapi.o
obj-$(CONFIG_SPARC64) += sstate.o
obj-$(CONFIG_SPARC64) += mdesc.o
+obj-$(CONFIG_SPARC64) += adi_64.o
obj-$(CONFIG_SPARC64) += pcr.o
obj-$(CONFIG_SPARC64) += nmi.o
obj-$(CONFIG_SPARC64_SMP) += cpumap.o
-obj-y += dma.o
-
obj-$(CONFIG_PCIC_PCI) += pcic.o
obj-$(CONFIG_LEON_PCI) += leon_pci.o
obj-$(CONFIG_SPARC_GRPCI2)+= leon_pci_grpci2.o
@@ -84,12 +84,13 @@ obj-$(CONFIG_SPARC64_SMP) += hvtramp.o
obj-y += auxio_$(BITS).o
obj-$(CONFIG_SUN_PM) += apc.o pmc.o
+obj-y += termios.o
+
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_MODULES) += sparc_ksyms_$(BITS).o
+obj-$(CONFIG_MODULES) += sparc_ksyms.o
obj-$(CONFIG_SPARC_LED) += led.o
obj-$(CONFIG_KGDB) += kgdb_$(BITS).o
-
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
@@ -99,7 +100,8 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o
obj-$(CONFIG_SPARC64_PCI) += pci_psycho.o pci_sabre.o pci_schizo.o
obj-$(CONFIG_SPARC64_PCI) += pci_sun4v.o pci_sun4v_asm.o pci_fire.o
-obj-$(CONFIG_PCI_MSI) += pci_msi.o
+obj-$(CONFIG_SPARC64_PCI_MSI) += pci_msi.o
+
obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o
@@ -115,4 +117,5 @@ obj-$(CONFIG_COMPAT) += $(audit--y)
pc--$(CONFIG_PERF_EVENTS) := perf_event.o
obj-$(CONFIG_SPARC64) += $(pc--y)
-obj-$(CONFIG_SPARC64) += jump_label.o
+obj-$(CONFIG_UPROBES) += uprobes.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
diff --git a/arch/sparc/kernel/adi_64.c b/arch/sparc/kernel/adi_64.c
new file mode 100644
index 000000000000..18036a43cf56
--- /dev/null
+++ b/arch/sparc/kernel/adi_64.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* adi_64.c: support for ADI (Application Data Integrity) feature on
+ * sparc m7 and newer processors. This feature is also known as
+ * SSM (Silicon Secured Memory).
+ *
+ * Copyright (C) 2016 Oracle and/or its affiliates. All rights reserved.
+ * Author: Khalid Aziz (khalid.aziz@oracle.com)
+ */
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mm_types.h>
+#include <asm/mdesc.h>
+#include <asm/adi_64.h>
+#include <asm/mmu_64.h>
+#include <asm/pgtable_64.h>
+
+/* Each page of storage for ADI tags can accommodate tags for 128
+ * pages. When ADI enabled pages are being swapped out, it would be
+ * prudent to allocate at least enough tag storage space to accommodate
+ * SWAPFILE_CLUSTER number of pages. Allocate enough tag storage to
+ * store tags for four SWAPFILE_CLUSTER pages to reduce need for
+ * further allocations for same vma.
+ */
+#define TAG_STORAGE_PAGES 8
+
+struct adi_config adi_state;
+EXPORT_SYMBOL(adi_state);
+
+/* mdesc_adi_init() : Parse machine description provided by the
+ * hypervisor to detect ADI capabilities
+ *
+ * Hypervisor reports ADI capabilities of platform in "hwcap-list" property
+ * for "cpu" node. If the platform supports ADI, "hwcap-list" property
+ * contains the keyword "adp". If the platform supports ADI, "platform"
+ * node will contain "adp-blksz", "adp-nbits" and "ue-on-adp" properties
+ * to describe the ADI capabilities.
+ */
+void __init mdesc_adi_init(void)
+{
+ struct mdesc_handle *hp = mdesc_grab();
+ const char *prop;
+ u64 pn, *val;
+ int len;
+
+ if (!hp)
+ goto adi_not_found;
+
+ pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
+ if (pn == MDESC_NODE_NULL)
+ goto adi_not_found;
+
+ prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
+ if (!prop)
+ goto adi_not_found;
+
+ /*
+ * Look for "adp" keyword in hwcap-list which would indicate
+ * ADI support
+ */
+ adi_state.enabled = false;
+ while (len) {
+ int plen;
+
+ if (!strcmp(prop, "adp")) {
+ adi_state.enabled = true;
+ break;
+ }
+
+ plen = strlen(prop) + 1;
+ prop += plen;
+ len -= plen;
+ }
+
+ if (!adi_state.enabled)
+ goto adi_not_found;
+
+ /* Find the ADI properties in "platform" node. If all ADI
+ * properties are not found, ADI support is incomplete and
+ * do not enable ADI in the kernel.
+ */
+ pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
+ if (pn == MDESC_NODE_NULL)
+ goto adi_not_found;
+
+ val = (u64 *) mdesc_get_property(hp, pn, "adp-blksz", &len);
+ if (!val)
+ goto adi_not_found;
+ adi_state.caps.blksz = *val;
+
+ val = (u64 *) mdesc_get_property(hp, pn, "adp-nbits", &len);
+ if (!val)
+ goto adi_not_found;
+ adi_state.caps.nbits = *val;
+
+ val = (u64 *) mdesc_get_property(hp, pn, "ue-on-adp", &len);
+ if (!val)
+ goto adi_not_found;
+ adi_state.caps.ue_on_adi = *val;
+
+ /* Some of the code to support swapping ADI tags is written
+ * assumption that two ADI tags can fit inside one byte. If
+ * this assumption is broken by a future architecture change,
+ * that code will have to be revisited. If that were to happen,
+ * disable ADI support so we do not get unpredictable results
+ * with programs trying to use ADI and their pages getting
+ * swapped out
+ */
+ if (adi_state.caps.nbits > 4) {
+ pr_warn("WARNING: ADI tag size >4 on this platform. Disabling AADI support\n");
+ adi_state.enabled = false;
+ }
+
+ mdesc_release(hp);
+ return;
+
+adi_not_found:
+ adi_state.enabled = false;
+ adi_state.caps.blksz = 0;
+ adi_state.caps.nbits = 0;
+ if (hp)
+ mdesc_release(hp);
+}
+
+static tag_storage_desc_t *find_tag_store(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ tag_storage_desc_t *tag_desc = NULL;
+ unsigned long i, max_desc, flags;
+
+ /* Check if this vma already has tag storage descriptor
+ * allocated for it.
+ */
+ max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
+ if (mm->context.tag_store) {
+ tag_desc = mm->context.tag_store;
+ spin_lock_irqsave(&mm->context.tag_lock, flags);
+ for (i = 0; i < max_desc; i++) {
+ if ((addr >= tag_desc->start) &&
+ ((addr + PAGE_SIZE - 1) <= tag_desc->end))
+ break;
+ tag_desc++;
+ }
+ spin_unlock_irqrestore(&mm->context.tag_lock, flags);
+
+ /* If no matching entries were found, this must be a
+ * freshly allocated page
+ */
+ if (i >= max_desc)
+ tag_desc = NULL;
+ }
+
+ return tag_desc;
+}
+
+static tag_storage_desc_t *alloc_tag_store(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ unsigned char *tags;
+ unsigned long i, size, max_desc, flags;
+ tag_storage_desc_t *tag_desc, *open_desc;
+ unsigned long end_addr, hole_start, hole_end;
+
+ max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
+ open_desc = NULL;
+ hole_start = 0;
+ hole_end = ULONG_MAX;
+ end_addr = addr + PAGE_SIZE - 1;
+
+ /* Check if this vma already has tag storage descriptor
+ * allocated for it.
+ */
+ spin_lock_irqsave(&mm->context.tag_lock, flags);
+ if (mm->context.tag_store) {
+ tag_desc = mm->context.tag_store;
+
+ /* Look for a matching entry for this address. While doing
+ * that, look for the first open slot as well and find
+ * the hole in already allocated range where this request
+ * will fit in.
+ */
+ for (i = 0; i < max_desc; i++) {
+ if (tag_desc->tag_users == 0) {
+ if (open_desc == NULL)
+ open_desc = tag_desc;
+ } else {
+ if ((addr >= tag_desc->start) &&
+ (tag_desc->end >= (addr + PAGE_SIZE - 1))) {
+ tag_desc->tag_users++;
+ goto out;
+ }
+ }
+ if ((tag_desc->start > end_addr) &&
+ (tag_desc->start < hole_end))
+ hole_end = tag_desc->start;
+ if ((tag_desc->end < addr) &&
+ (tag_desc->end > hole_start))
+ hole_start = tag_desc->end;
+ tag_desc++;
+ }
+
+ } else {
+ size = sizeof(tag_storage_desc_t)*max_desc;
+ mm->context.tag_store = kzalloc(size, GFP_NOWAIT);
+ if (mm->context.tag_store == NULL) {
+ tag_desc = NULL;
+ goto out;
+ }
+ tag_desc = mm->context.tag_store;
+ for (i = 0; i < max_desc; i++, tag_desc++)
+ tag_desc->tag_users = 0;
+ open_desc = mm->context.tag_store;
+ i = 0;
+ }
+
+ /* Check if we ran out of tag storage descriptors */
+ if (open_desc == NULL) {
+ tag_desc = NULL;
+ goto out;
+ }
+
+ /* Mark this tag descriptor slot in use and then initialize it */
+ tag_desc = open_desc;
+ tag_desc->tag_users = 1;
+
+ /* Tag storage has not been allocated for this vma and space
+ * is available in tag storage descriptor. Since this page is
+ * being swapped out, there is high probability subsequent pages
+ * in the VMA will be swapped out as well. Allocate pages to
+ * store tags for as many pages in this vma as possible but not
+ * more than TAG_STORAGE_PAGES. Each byte in tag space holds
+ * two ADI tags since each ADI tag is 4 bits. Each ADI tag
+ * covers adi_blksize() worth of addresses. Check if the hole is
+ * big enough to accommodate full address range for using
+ * TAG_STORAGE_PAGES number of tag pages.
+ */
+ size = TAG_STORAGE_PAGES * PAGE_SIZE;
+ end_addr = addr + (size*2*adi_blksize()) - 1;
+ /* Check for overflow. If overflow occurs, allocate only one page */
+ if (end_addr < addr) {
+ size = PAGE_SIZE;
+ end_addr = addr + (size*2*adi_blksize()) - 1;
+ /* If overflow happens with the minimum tag storage
+ * allocation as well, adjust ending address for this
+ * tag storage.
+ */
+ if (end_addr < addr)
+ end_addr = ULONG_MAX;
+ }
+ if (hole_end < end_addr) {
+ /* Available hole is too small on the upper end of
+ * address. Can we expand the range towards the lower
+ * address and maximize use of this slot?
+ */
+ unsigned long tmp_addr;
+
+ end_addr = hole_end - 1;
+ tmp_addr = end_addr - (size*2*adi_blksize()) + 1;
+ /* Check for underflow. If underflow occurs, allocate
+ * only one page for storing ADI tags
+ */
+ if (tmp_addr > addr) {
+ size = PAGE_SIZE;
+ tmp_addr = end_addr - (size*2*adi_blksize()) - 1;
+ /* If underflow happens with the minimum tag storage
+ * allocation as well, adjust starting address for
+ * this tag storage.
+ */
+ if (tmp_addr > addr)
+ tmp_addr = 0;
+ }
+ if (tmp_addr < hole_start) {
+ /* Available hole is restricted on lower address
+ * end as well
+ */
+ tmp_addr = hole_start + 1;
+ }
+ addr = tmp_addr;
+ size = (end_addr + 1 - addr)/(2*adi_blksize());
+ size = (size + (PAGE_SIZE-adi_blksize()))/PAGE_SIZE;
+ size = size * PAGE_SIZE;
+ }
+ tags = kzalloc(size, GFP_NOWAIT);
+ if (tags == NULL) {
+ tag_desc->tag_users = 0;
+ tag_desc = NULL;
+ goto out;
+ }
+ tag_desc->start = addr;
+ tag_desc->tags = tags;
+ tag_desc->end = end_addr;
+
+out:
+ spin_unlock_irqrestore(&mm->context.tag_lock, flags);
+ return tag_desc;
+}
+
+static void del_tag_store(tag_storage_desc_t *tag_desc, struct mm_struct *mm)
+{
+ unsigned long flags;
+ unsigned char *tags = NULL;
+
+ spin_lock_irqsave(&mm->context.tag_lock, flags);
+ tag_desc->tag_users--;
+ if (tag_desc->tag_users == 0) {
+ tag_desc->start = tag_desc->end = 0;
+ /* Do not free up the tag storage space allocated
+ * by the first descriptor. This is persistent
+ * emergency tag storage space for the task.
+ */
+ if (tag_desc != mm->context.tag_store) {
+ tags = tag_desc->tags;
+ tag_desc->tags = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&mm->context.tag_lock, flags);
+ kfree(tags);
+}
+
+#define tag_start(addr, tag_desc) \
+ ((tag_desc)->tags + ((addr - (tag_desc)->start)/(2*adi_blksize())))
+
+/* Retrieve any saved ADI tags for the page being swapped back in and
+ * restore these tags to the newly allocated physical page.
+ */
+void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pte_t pte)
+{
+ unsigned char *tag;
+ tag_storage_desc_t *tag_desc;
+ unsigned long paddr, tmp, version1, version2;
+
+ /* Check if the swapped out page has an ADI version
+ * saved. If yes, restore version tag to the newly
+ * allocated page.
+ */
+ tag_desc = find_tag_store(mm, vma, addr);
+ if (tag_desc == NULL)
+ return;
+
+ tag = tag_start(addr, tag_desc);
+ paddr = pte_val(pte) & _PAGE_PADDR_4V;
+ for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) {
+ version1 = (*tag) >> 4;
+ version2 = (*tag) & 0x0f;
+ *tag++ = 0;
+ asm volatile("stxa %0, [%1] %2\n\t"
+ :
+ : "r" (version1), "r" (tmp),
+ "i" (ASI_MCD_REAL));
+ tmp += adi_blksize();
+ asm volatile("stxa %0, [%1] %2\n\t"
+ :
+ : "r" (version2), "r" (tmp),
+ "i" (ASI_MCD_REAL));
+ }
+ asm volatile("membar #Sync\n\t");
+
+ /* Check and mark this tag space for release later if
+ * the swapped in page was the last user of tag space
+ */
+ del_tag_store(tag_desc, mm);
+}
+
+/* A page is about to be swapped out. Save any ADI tags associated with
+ * this physical page so they can be restored later when the page is swapped
+ * back in.
+ */
+int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pte_t oldpte)
+{
+ unsigned char *tag;
+ tag_storage_desc_t *tag_desc;
+ unsigned long version1, version2, paddr, tmp;
+
+ tag_desc = alloc_tag_store(mm, vma, addr);
+ if (tag_desc == NULL)
+ return -1;
+
+ tag = tag_start(addr, tag_desc);
+ paddr = pte_val(oldpte) & _PAGE_PADDR_4V;
+ for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) {
+ asm volatile("ldxa [%1] %2, %0\n\t"
+ : "=r" (version1)
+ : "r" (tmp), "i" (ASI_MCD_REAL));
+ tmp += adi_blksize();
+ asm volatile("ldxa [%1] %2, %0\n\t"
+ : "=r" (version2)
+ : "r" (tmp), "i" (ASI_MCD_REAL));
+ *tag = (version1 << 4) | version2;
+ tag++;
+ }
+
+ return 0;
+}
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index eefda32b595e..849db20e7165 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* apc - Driver implementation for power management functions
* of Aurora Personality Chip (APC) on SPARCstation-4/5 and
* derivatives.
@@ -12,12 +13,12 @@
#include <linux/miscdevice.h>
#include <linux/pm.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/oplib.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/auxio.h>
#include <asm/apc.h>
#include <asm/processor.h>
@@ -27,7 +28,6 @@
* #define APC_DEBUG_LED
*/
-#define APC_MINOR MISC_DYNAMIC_MINOR
#define APC_OBPNAME "power-management"
#define APC_DEVNAME "apc"
@@ -137,7 +137,7 @@ static const struct file_operations apc_fops = {
.llseek = noop_llseek,
};
-static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops };
+static struct miscdevice apc_miscdev = { MISC_DYNAMIC_MINOR, APC_DEVNAME, &apc_fops };
static int apc_probe(struct platform_device *op)
{
@@ -167,7 +167,7 @@ static int apc_probe(struct platform_device *op)
return 0;
}
-static struct of_device_id apc_match[] = {
+static const struct of_device_id apc_match[] = {
{
.name = APC_OBPNAME,
},
@@ -178,7 +178,6 @@ MODULE_DEVICE_TABLE(of, apc_match);
static struct platform_driver apc_driver = {
.driver = {
.name = "apc",
- .owner = THIS_MODULE,
.of_match_table = apc_match,
},
.probe = apc_probe,
diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c
index f76389a32342..6e660bde48dd 100644
--- a/arch/sparc/kernel/asm-offsets.c
+++ b/arch/sparc/kernel/asm-offsets.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This program is used to generate definitions needed by
* assembly language modules.
@@ -9,22 +10,24 @@
*
* On sparc, thread_info data is static and TI_XXX offsets are computed by hand.
*/
+#define COMPILE_OFFSETS
#include <linux/sched.h>
+#include <linux/mm_types.h>
// #include <linux/mm.h>
#include <linux/kbuild.h>
#include <asm/hibernate.h>
#ifdef CONFIG_SPARC32
-int sparc32_foo(void)
+static int __used sparc32_foo(void)
{
DEFINE(AOFF_thread_fork_kpsr,
offsetof(struct thread_struct, fork_kpsr));
return 0;
}
#else
-int sparc64_foo(void)
+static int __used sparc64_foo(void)
{
#ifdef CONFIG_HIBERNATION
BLANK();
@@ -43,7 +46,7 @@ int sparc64_foo(void)
}
#endif
-int foo(void)
+static int __used foo(void)
{
BLANK();
DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
diff --git a/arch/sparc/kernel/audit.c b/arch/sparc/kernel/audit.c
index 8fff0ac63d56..b092274eca79 100644
--- a/arch/sparc/kernel/audit.c
+++ b/arch/sparc/kernel/audit.c
@@ -1,29 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <asm/unistd.h>
-static unsigned dir_class[] = {
+#include "kernel.h"
+
+static unsigned int dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
-static unsigned read_class[] = {
+static unsigned int read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
-static unsigned write_class[] = {
+static unsigned int write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
-static unsigned chattr_class[] = {
+static unsigned int chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
-static unsigned signal_class[] = {
+static unsigned int signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
@@ -37,35 +40,31 @@ int audit_classify_arch(int arch)
return 0;
}
-int audit_classify_syscall(int abi, unsigned syscall)
+int audit_classify_syscall(int abi, unsigned int syscall)
{
#ifdef CONFIG_COMPAT
- extern int sparc32_classify_syscall(unsigned);
if (abi == AUDIT_ARCH_SPARC)
return sparc32_classify_syscall(syscall);
#endif
switch(syscall) {
case __NR_open:
- return 2;
+ return AUDITSC_OPEN;
case __NR_openat:
- return 3;
+ return AUDITSC_OPENAT;
case __NR_socketcall:
- return 4;
+ return AUDITSC_SOCKETCALL;
case __NR_execve:
- return 5;
+ return AUDITSC_EXECVE;
+ case __NR_openat2:
+ return AUDITSC_OPENAT2;
default:
- return 0;
+ return AUDITSC_NATIVE;
}
}
static int __init audit_classes_init(void)
{
#ifdef CONFIG_COMPAT
- extern __u32 sparc32_dir_class[];
- extern __u32 sparc32_write_class[];
- extern __u32 sparc32_read_class[];
- extern __u32 sparc32_chattr_class[];
- extern __u32 sparc32_signal_class[];
audit_register_class(AUDIT_CLASS_WRITE_32, sparc32_write_class);
audit_register_class(AUDIT_CLASS_READ_32, sparc32_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, sparc32_dir_class);
diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c
index e20cc55fb768..989860e890c4 100644
--- a/arch/sparc/kernel/auxio_32.c
+++ b/arch/sparc/kernel/auxio_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* auxio.c: Probing for the Sparc AUXIO register at boot time.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -7,14 +8,16 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/export.h>
+
#include <asm/oplib.h>
#include <asm/io.h>
#include <asm/auxio.h>
#include <asm/string.h> /* memset(), Linux has no bzero() */
#include <asm/cpu_type.h>
+#include "kernel.h"
+
/* Probe and map in the Auxiliary I/O register */
/* auxio_register is not static because it is referenced
@@ -103,7 +106,7 @@ EXPORT_SYMBOL(set_auxio);
/* sun4m power control register (AUXIO2) */
-volatile unsigned char * auxio_power_register = NULL;
+volatile u8 __iomem *auxio_power_register = NULL;
void __init auxio_power_probe(void)
{
@@ -127,8 +130,8 @@ void __init auxio_power_probe(void)
r.flags = regs.which_io & 0xF;
r.start = regs.phys_addr;
r.end = regs.phys_addr + regs.reg_size - 1;
- auxio_power_register = (unsigned char *) of_ioremap(&r, 0,
- regs.reg_size, "auxpower");
+ auxio_power_register =
+ (u8 __iomem *)of_ioremap(&r, 0, regs.reg_size, "auxpower");
/* Display a quick message on the console. */
if (auxio_power_register)
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
index 86e55778e4af..2a2800d21325 100644
--- a/arch/sparc/kernel/auxio_64.c
+++ b/arch/sparc/kernel/auxio_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* auxio.c: Probing for the Sparc AUXIO register at boot time.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -9,7 +10,8 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <asm/prom.h>
#include <asm/io.h>
@@ -86,7 +88,6 @@ void auxio_set_lte(int on)
__auxio_sbus_set_lte(on);
break;
case AUXIO_TYPE_EBUS:
- /* FALL-THROUGH */
default:
break;
}
@@ -107,23 +108,22 @@ static int auxio_probe(struct platform_device *dev)
struct device_node *dp = dev->dev.of_node;
unsigned long size;
- if (!strcmp(dp->parent->name, "ebus")) {
+ if (of_node_name_eq(dp->parent, "ebus")) {
auxio_devtype = AUXIO_TYPE_EBUS;
size = sizeof(u32);
- } else if (!strcmp(dp->parent->name, "sbus")) {
+ } else if (of_node_name_eq(dp->parent, "sbus")) {
auxio_devtype = AUXIO_TYPE_SBUS;
size = 1;
} else {
- printk("auxio: Unknown parent bus type [%s]\n",
- dp->parent->name);
+ printk("auxio: Unknown parent bus type [%pOFn]\n",
+ dp->parent);
return -ENODEV;
}
auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio");
if (!auxio_register)
return -ENODEV;
- printk(KERN_INFO "AUXIO: Found device at %s\n",
- dp->full_name);
+ printk(KERN_INFO "AUXIO: Found device at %pOF\n", dp);
if (auxio_devtype == AUXIO_TYPE_EBUS)
auxio_set_led(AUXIO_LED_ON);
@@ -135,7 +135,6 @@ static struct platform_driver auxio_driver = {
.probe = auxio_probe,
.driver = {
.name = "auxio",
- .owner = THIS_MODULE,
.of_match_table = auxio_match,
},
};
diff --git a/arch/sparc/kernel/btext.c b/arch/sparc/kernel/btext.c
index 57073e56ba9e..2bf558a0c568 100644
--- a/arch/sparc/kernel/btext.c
+++ b/arch/sparc/kernel/btext.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Procedures for drawing on the screen early on in the boot process.
*
@@ -7,6 +8,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/console.h>
+#include <linux/font.h>
#include <asm/btext.h>
#include <asm/oplib.h>
@@ -19,11 +21,11 @@ static void scrollscreen(void);
#endif
static void draw_byte(unsigned char c, long locX, long locY);
-static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
-static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
-static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
+static void draw_byte_32(const unsigned char *bits, unsigned int *base, int rb);
+static void draw_byte_16(const unsigned char *bits, unsigned int *base, int rb);
+static void draw_byte_8(const unsigned char *bits, unsigned int *base, int rb);
-#define __force_data __attribute__((__section__(".data")))
+#define __force_data __section(".data")
static int g_loc_X __force_data;
static int g_loc_Y __force_data;
@@ -35,10 +37,6 @@ static int dispDeviceDepth __force_data;
static int dispDeviceRect[4] __force_data;
static unsigned char *dispDeviceBase __force_data;
-#define cmapsz (16*256)
-
-static unsigned char vga_font[cmapsz];
-
static int __init btext_initialize(phandle node)
{
unsigned int width, height, depth, pitch;
@@ -137,7 +135,7 @@ static void scrollscreen(void)
}
#endif /* ndef NO_SCROLL */
-void btext_drawchar(char c)
+static void btext_drawchar(char c)
{
int cline = 0;
#ifdef NO_SCROLL
@@ -193,7 +191,8 @@ static void btext_drawtext(const char *c, unsigned int len)
static void draw_byte(unsigned char c, long locX, long locY)
{
unsigned char *base = calc_base(locX << 3, locY << 4);
- unsigned char *font = &vga_font[((unsigned int)c) * 16];
+ unsigned int font_index = c * 16;
+ const unsigned char *font = font_sun_8x16.data + font_index;
int rb = dispDeviceRowBytes;
switch(dispDeviceDepth) {
@@ -238,7 +237,7 @@ static unsigned int expand_bits_16[4] = {
};
-static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
+static void draw_byte_32(const unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
@@ -259,7 +258,7 @@ static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
}
}
-static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
+static void draw_byte_16(const unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
@@ -277,7 +276,7 @@ static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
}
}
-static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
+static void draw_byte_8(const unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0x0F0F0F0FUL;
@@ -325,348 +324,3 @@ int __init btext_find_display(void)
}
return ret;
}
-
-static unsigned char vga_font[cmapsz] = {
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
-0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
-0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
-0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
-0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
-0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
-0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
-0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
-0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
-0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
-0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
-0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
-0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
-0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
-0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
-0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
-0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
-0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
-0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
-0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
-0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
-0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
-0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
-0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
-0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
-0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
-0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
-0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
-0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
-0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
-0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
-0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
-0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
-0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
-0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
-0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
-0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
-0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
-0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
-0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
-0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
-0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
-0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
-0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
-0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
-0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
-0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
-0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
-0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
-0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
-0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
-0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
-0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
-0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
-0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
-0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
-0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
-0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
-0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
-0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
-0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
-0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
-0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
-0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
-0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
-0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
-0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
-0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
-0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
-0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
-0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
-0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
-0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
-0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
-0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
-0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
-0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
-0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
-0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
-0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
-0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
-0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
-0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
-0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
-0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
-0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
-0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
-0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
-0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
-0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
-0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
-0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
-0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
-0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
-0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
-0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
-0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
-0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
-0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
-0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
-0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
-0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
-0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
-0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
-0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
-0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
-0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
-0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
-0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
-0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
-0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
-0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
-0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
-0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
-0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00,
-};
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index 052b5a44318f..a1a6485c9183 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* central.c: Central FHC driver for Sunfire/Starfire/Wildfire.
*
* Copyright (C) 1997, 1999, 2008 David S. Miller (davem@davemloft.net)
@@ -9,7 +10,7 @@
#include <linux/export.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <asm/fhc.h>
@@ -54,7 +55,7 @@ static int clock_board_calc_nslots(struct clock_board *p)
else
return 5;
}
- /* Fallthrough */
+ fallthrough;
default:
return 4;
}
@@ -152,7 +153,6 @@ static struct platform_driver clock_board_driver = {
.probe = clock_board_probe,
.driver = {
.name = "clock_board",
- .owner = THIS_MODULE,
.of_match_table = clock_board_match,
},
};
@@ -168,7 +168,7 @@ static int fhc_probe(struct platform_device *op)
goto out;
}
- if (!strcmp(op->dev.of_node->parent->name, "central"))
+ if (of_node_name_eq(op->dev.of_node->parent, "central"))
p->central = true;
p->pregs = of_ioremap(&op->resource[0], 0,
@@ -257,7 +257,6 @@ static struct platform_driver fhc_driver = {
.probe = fhc_probe,
.driver = {
.name = "fhc",
- .owner = THIS_MODULE,
.of_match_table = fhc_match,
},
};
diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S
index 4ee1ad420862..7f3d3d264390 100644
--- a/arch/sparc/kernel/cherrs.S
+++ b/arch/sparc/kernel/cherrs.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* These get patched into the trap table at boot time
* once we know we have a cheetah processor.
*/
@@ -214,8 +215,7 @@ do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
subcc %g1, %g2, %g1 ! Next cacheline
bge,pt %icc, 1b
nop
- ba,pt %xcc, dcpe_icpe_tl1_common
- nop
+ ba,a,pt %xcc, dcpe_icpe_tl1_common
do_dcpe_tl1_fatal:
sethi %hi(1f), %g7
@@ -224,8 +224,7 @@ do_dcpe_tl1_fatal:
mov 0x2, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_dcpe_tl1,.-do_dcpe_tl1
.globl do_icpe_tl1
@@ -259,8 +258,7 @@ do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
subcc %g1, %g2, %g1
bge,pt %icc, 1b
nop
- ba,pt %xcc, dcpe_icpe_tl1_common
- nop
+ ba,a,pt %xcc, dcpe_icpe_tl1_common
do_icpe_tl1_fatal:
sethi %hi(1f), %g7
@@ -269,8 +267,7 @@ do_icpe_tl1_fatal:
mov 0x3, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_icpe_tl1,.-do_icpe_tl1
.type dcpe_icpe_tl1_common,#function
@@ -456,7 +453,7 @@ __cheetah_log_error:
cmp %g2, 0x63
be c_cee
nop
- ba,pt %xcc, c_deferred
+ ba,a,pt %xcc, c_deferred
.size __cheetah_log_error,.-__cheetah_log_error
/* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index dbb210d74e21..d4c74d6b2e1b 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* chmc.c: Driver for UltraSPARC-III memory controller.
*
* Copyright (C) 2001, 2007, 2008 David S. Miller (davem@davemloft.net)
@@ -14,7 +15,8 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/spitfire.h>
#include <asm/chmctrl.h>
#include <asm/cpudata.h>
@@ -28,7 +30,7 @@
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "0.2"
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_DESCRIPTION("UltraSPARC-III memory controller driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -464,8 +466,8 @@ static int jbusmc_probe(struct platform_device *op)
mc_list_add(&p->list);
- printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %s\n",
- op->dev.of_node->full_name);
+ printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %pOF\n",
+ op->dev.of_node);
dev_set_drvdata(&op->dev, p);
@@ -747,8 +749,8 @@ static int chmc_probe(struct platform_device *op)
mc_list_add(&p->list);
- printk(KERN_INFO PFX "UltraSPARC-III memory controller at %s [%s]\n",
- dp->full_name,
+ printk(KERN_INFO PFX "UltraSPARC-III memory controller at %pOF [%s]\n",
+ dp,
(p->layout_size ? "ACTIVE" : "INACTIVE"));
dev_set_drvdata(&op->dev, p);
@@ -786,7 +788,7 @@ static void jbusmc_destroy(struct platform_device *op, struct jbusmc *p)
kfree(p);
}
-static int us3mc_remove(struct platform_device *op)
+static void us3mc_remove(struct platform_device *op)
{
void *p = dev_get_drvdata(&op->dev);
@@ -796,7 +798,6 @@ static int us3mc_remove(struct platform_device *op)
else if (mc_type == MC_TYPE_JBUS)
jbusmc_destroy(op, p);
}
- return 0;
}
static const struct of_device_id us3mc_match[] = {
@@ -810,7 +811,6 @@ MODULE_DEVICE_TABLE(of, us3mc_match);
static struct platform_driver us3mc_driver = {
.driver = {
.name = "us3mc",
- .owner = THIS_MODULE,
.of_match_table = us3mc_match,
},
.probe = us3mc_probe,
diff --git a/arch/sparc/kernel/compat_audit.c b/arch/sparc/kernel/compat_audit.c
index d865575b25bf..f1ea0005a729 100644
--- a/arch/sparc/kernel/compat_audit.c
+++ b/arch/sparc/kernel/compat_audit.c
@@ -1,43 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
#define __32bit_syscall_numbers__
+#include <linux/audit_arch.h>
#include <asm/unistd.h>
+#include "kernel.h"
-unsigned sparc32_dir_class[] = {
+unsigned int sparc32_dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
-unsigned sparc32_chattr_class[] = {
+unsigned int sparc32_chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
-unsigned sparc32_write_class[] = {
+unsigned int sparc32_write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
-unsigned sparc32_read_class[] = {
+unsigned int sparc32_read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
-unsigned sparc32_signal_class[] = {
+unsigned int sparc32_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
-int sparc32_classify_syscall(unsigned syscall)
+int sparc32_classify_syscall(unsigned int syscall)
{
switch(syscall) {
case __NR_open:
- return 2;
+ return AUDITSC_OPEN;
case __NR_openat:
- return 3;
+ return AUDITSC_OPENAT;
case __NR_socketcall:
- return 4;
+ return AUDITSC_SOCKETCALL;
case __NR_execve:
- return 5;
+ return AUDITSC_EXECVE;
+ case __NR_openat2:
+ return AUDITSC_OPENAT2;
default:
- return 1;
+ return AUDITSC_COMPAT;
}
}
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 5c5125895db8..79cd6ccfeac0 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* cpu.c: Dinky routines to look for the kind of Sparc cpu
* we are on.
*
@@ -10,9 +11,9 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/threads.h>
+#include <linux/pgtable.h>
#include <asm/spitfire.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -22,6 +23,7 @@
#include <asm/cpudata.h>
#include "kernel.h"
+#include "entry.h"
DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
EXPORT_PER_CPU_SYMBOL(__cpu_data);
@@ -493,6 +495,30 @@ static void __init sun4v_cpu_probe(void)
sparc_pmu_type = "niagara5";
break;
+ case SUN4V_CHIP_SPARC_M6:
+ sparc_cpu_type = "SPARC-M6";
+ sparc_fpu_type = "SPARC-M6 integrated FPU";
+ sparc_pmu_type = "sparc-m6";
+ break;
+
+ case SUN4V_CHIP_SPARC_M7:
+ sparc_cpu_type = "SPARC-M7";
+ sparc_fpu_type = "SPARC-M7 integrated FPU";
+ sparc_pmu_type = "sparc-m7";
+ break;
+
+ case SUN4V_CHIP_SPARC_M8:
+ sparc_cpu_type = "SPARC-M8";
+ sparc_fpu_type = "SPARC-M8 integrated FPU";
+ sparc_pmu_type = "sparc-m8";
+ break;
+
+ case SUN4V_CHIP_SPARC_SN:
+ sparc_cpu_type = "SPARC-SN";
+ sparc_fpu_type = "SPARC-SN integrated FPU";
+ sparc_pmu_type = "sparc-sn";
+ break;
+
case SUN4V_CHIP_SPARC64X:
sparc_cpu_type = "SPARC64-X";
sparc_fpu_type = "SPARC64-X integrated FPU";
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index e4de74c2c9b0..8fcf2d8c6bd2 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* cpumap.c: used for optimizing CPU assignment
*
* Copyright (C) 2009 Hong H. Pham <hong.pham@windriver.com>
@@ -6,7 +7,6 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/spinlock.h>
#include <asm/cpudata.h>
@@ -50,7 +50,7 @@ struct cpuinfo_tree {
/* Offsets into nodes[] for each level of the tree */
struct cpuinfo_level level[CPUINFO_LVL_MAX];
- struct cpuinfo_node nodes[0];
+ struct cpuinfo_node nodes[] __counted_by(total_nodes);
};
@@ -194,8 +194,7 @@ static struct cpuinfo_tree *build_cpuinfo_tree(void)
n = enumerate_cpuinfo_nodes(tmp_level);
- new_tree = kzalloc(sizeof(struct cpuinfo_tree) +
- (sizeof(struct cpuinfo_node) * n), GFP_ATOMIC);
+ new_tree = kzalloc(struct_size(new_tree, nodes, n), GFP_ATOMIC);
if (!new_tree)
return NULL;
@@ -327,6 +326,11 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
case SUN4V_CHIP_NIAGARA3:
case SUN4V_CHIP_NIAGARA4:
case SUN4V_CHIP_NIAGARA5:
+ case SUN4V_CHIP_SPARC_M6:
+ case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_M8:
+ case SUN4V_CHIP_SPARC_SN:
+ case SUN4V_CHIP_SPARC64X:
rover_inc_table = niagara_iterate_method;
break;
default:
diff --git a/arch/sparc/kernel/cpumap.h b/arch/sparc/kernel/cpumap.h
index e639880ab864..7d5b774862e7 100644
--- a/arch/sparc/kernel/cpumap.h
+++ b/arch/sparc/kernel/cpumap.h
@@ -1,9 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CPUMAP_H
#define _CPUMAP_H
#ifdef CONFIG_SMP
-extern void cpu_map_rebuild(void);
-extern int map_to_cpu(unsigned int index);
+void cpu_map_rebuild(void);
+int map_to_cpu(unsigned int index);
#define cpu_map_init() cpu_map_rebuild()
#else
#define cpu_map_init() do {} while (0)
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
index 3d465e87f7e2..23b6e50d4ada 100644
--- a/arch/sparc/kernel/devices.c
+++ b/arch/sparc/kernel/devices.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* devices.c: Initial scan of the prom device tree for important
* Sparc device nodes which we need to find.
*
@@ -19,8 +20,9 @@
#include <asm/smp.h>
#include <asm/cpudata.h>
#include <asm/cpu_type.h>
+#include <asm/setup.h>
-extern void clock_stop_probe(void); /* tadpole.c */
+#include "kernel.h"
static char *cpu_mid_prop(void)
{
@@ -131,11 +133,6 @@ void __init device_scan(void)
}
#endif /* !CONFIG_SMP */
- {
- extern void auxio_probe(void);
- extern void auxio_power_probe(void);
- auxio_probe();
- auxio_power_probe();
- }
- clock_stop_probe();
+ auxio_probe();
+ auxio_power_probe();
}
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c
deleted file mode 100644
index b667aa6f28f6..000000000000
--- a/arch/sparc/kernel/dma.c
+++ /dev/null
@@ -1,12 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma-debug.h>
-
-#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15)
-
-static int __init dma_init(void)
-{
- dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
- return 0;
-}
-fs_initcall(dma_init);
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 11d460f6f9cc..f7fc6f2af2f2 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* ds.c: Domain Services driver for Logical Domains
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
@@ -9,6 +10,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
@@ -31,7 +33,7 @@
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_DESCRIPTION("Sun LDOM domain services driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -85,7 +87,7 @@ struct ds_reg_req {
__u64 handle;
__u16 major;
__u16 minor;
- char svc_id[0];
+ char svc_id[];
};
struct ds_reg_ack {
@@ -528,10 +530,8 @@ static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
}
}
-static int __cpuinit dr_cpu_configure(struct ds_info *dp,
- struct ds_cap_state *cp,
- u64 req_num,
- cpumask_t *mask)
+static int dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp,
+ u64 req_num, cpumask_t *mask)
{
struct ds_data *resp;
int resp_len, ncpus, cpu;
@@ -555,7 +555,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
printk(KERN_INFO "ds-%llu: Starting cpu %d...\n",
dp->id, cpu);
- err = cpu_up(cpu);
+ err = add_cpu(cpu);
if (err) {
__u32 res = DR_CPU_RES_FAILURE;
__u32 stat = DR_CPU_STAT_UNCONFIGURED;
@@ -611,7 +611,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n",
dp->id, cpu);
- err = cpu_down(cpu);
+ err = remove_cpu(cpu);
if (err)
dr_cpu_mark(resp, cpu, ncpus,
DR_CPU_RES_FAILURE,
@@ -627,9 +627,8 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
return 0;
}
-static void __cpuinit dr_cpu_data(struct ds_info *dp,
- struct ds_cap_state *cp,
- void *buf, int len)
+static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf,
+ int len)
{
struct ds_data *data = buf;
struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
@@ -702,12 +701,12 @@ struct ds_var_hdr {
struct ds_var_set_msg {
struct ds_var_hdr hdr;
- char name_and_value[0];
+ char name_and_value[];
};
struct ds_var_delete_msg {
struct ds_var_hdr hdr;
- char name[0];
+ char name[];
};
struct ds_var_resp {
@@ -782,14 +781,17 @@ void ldom_set_var(const char *var, const char *value)
} pkt;
char *base, *p;
int msg_len, loops;
+ size_t var_len, value_len;
- if (strlen(var) + strlen(value) + 2 >
- sizeof(pkt) - sizeof(pkt.header)) {
- printk(KERN_ERR PFX
- "contents length: %zu, which more than max: %lu,"
- "so could not set (%s) variable to (%s).\n",
- strlen(var) + strlen(value) + 2,
- sizeof(pkt) - sizeof(pkt.header), var, value);
+ var_len = strlen(var) + 1;
+ value_len = strlen(value) + 1;
+
+ if (var_len + value_len > sizeof(pkt) - sizeof(pkt.header)) {
+ pr_err(PFX
+ "contents length: %zu, which more than max: %lu,"
+ "so could not set (%s) variable to (%s).\n",
+ var_len + value_len,
+ sizeof(pkt) - sizeof(pkt.header), var, value);
return;
}
@@ -798,10 +800,10 @@ void ldom_set_var(const char *var, const char *value)
pkt.header.data.handle = cp->handle;
pkt.header.msg.hdr.type = DS_VAR_SET_REQ;
base = p = &pkt.header.msg.name_and_value[0];
- strcpy(p, var);
- p += strlen(var) + 1;
- strcpy(p, value);
- p += strlen(value) + 1;
+ strscpy(p, var, var_len);
+ p += var_len;
+ strscpy(p, value, value_len);
+ p += value_len;
msg_len = (sizeof(struct ds_data) +
sizeof(struct ds_var_set_msg) +
@@ -852,9 +854,8 @@ void ldom_reboot(const char *boot_command)
if (boot_command && strlen(boot_command)) {
unsigned long len;
- strcpy(full_boot_str, "boot ");
- strlcpy(full_boot_str + strlen("boot "), boot_command,
- sizeof(full_boot_str + strlen("boot ")));
+ snprintf(full_boot_str, sizeof(full_boot_str), "boot %s",
+ boot_command);
len = strlen(full_boot_str);
if (reboot_data_supported) {
@@ -879,7 +880,7 @@ void ldom_power_off(void)
static void ds_conn_reset(struct ds_info *dp)
{
- printk(KERN_ERR "ds-%llu: ds_conn_reset() from %pf\n",
+ printk(KERN_ERR "ds-%llu: ds_conn_reset() from %ps\n",
dp->id, __builtin_return_address(0));
}
@@ -912,7 +913,7 @@ static int register_services(struct ds_info *dp)
pbuf.req.handle = cp->handle;
pbuf.req.major = 1;
pbuf.req.minor = 0;
- strcpy(pbuf.req.svc_id, cp->service_id);
+ strscpy(pbuf.id_buf, cp->service_id);
err = __ds_send(lp, &pbuf, msg_len);
if (err > 0)
@@ -991,7 +992,7 @@ struct ds_queue_entry {
struct ds_info *dp;
int req_len;
int __pad;
- u64 req[0];
+ u64 req[];
};
static void process_ds_work(void)
@@ -1204,14 +1205,14 @@ static int ds_probe(struct vio_dev *vdev, const struct vio_device_id *id)
ds_cfg.tx_irq = vdev->tx_irq;
ds_cfg.rx_irq = vdev->rx_irq;
- lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp);
+ lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp, "DS");
if (IS_ERR(lp)) {
err = PTR_ERR(lp);
goto out_free_ds_states;
}
dp->lp = lp;
- err = ldc_bind(lp, "DS");
+ err = ldc_bind(lp);
if (err)
goto out_free_ldc;
@@ -1238,11 +1239,6 @@ out_err:
return err;
}
-static int ds_remove(struct vio_dev *vdev)
-{
- return 0;
-}
-
static const struct vio_device_id ds_match[] = {
{
.type = "domain-services-port",
@@ -1253,7 +1249,6 @@ static const struct vio_device_id ds_match[] = {
static struct vio_driver ds_driver = {
.id_table = ds_match,
.probe = ds_probe,
- .remove = ds_remove,
.name = "ds",
};
diff --git a/arch/sparc/kernel/dtlb_miss.S b/arch/sparc/kernel/dtlb_miss.S
index 09a6a15a7105..fb9c788437b0 100644
--- a/arch/sparc/kernel/dtlb_miss.S
+++ b/arch/sparc/kernel/dtlb_miss.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* DTLB ** ICACHE line 1: Context 0 check and TSB load */
ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer
ldxa [%g0] ASI_DMMU, %g6 ! Get TAG TARGET
diff --git a/arch/sparc/kernel/dtlb_prot.S b/arch/sparc/kernel/dtlb_prot.S
index b2c2c5be281c..9f945771bbd1 100644
--- a/arch/sparc/kernel/dtlb_prot.S
+++ b/arch/sparc/kernel/dtlb_prot.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* dtlb_prot.S: DTLB protection trap strategy.
* This is included directly into the trap table.
@@ -24,13 +25,13 @@
mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr
/* PROT ** ICACHE line 2: More real fault processing */
+ ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
+ srlx %g5, PAGE_SHIFT, %g5
+ sllx %g5, PAGE_SHIFT, %g5 ! Clear context ID bits
bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
- ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
- ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
- nop
- nop
- nop
+ ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
+ nop
nop
/* PROT ** ICACHE line 3: Unused... */
diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c
index e306fb08ee5e..264b186478f3 100644
--- a/arch/sparc/kernel/ebus.c
+++ b/arch/sparc/kernel/ebus.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* ebus.c: EBUS DMA library code.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
@@ -7,7 +8,6 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index e2a030045089..a3fdee4cd6fa 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
*
* Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
@@ -7,8 +8,10 @@
* Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <linux/errno.h>
+#include <linux/pgtable.h>
#include <asm/head.h>
#include <asm/asi.h>
@@ -19,7 +22,6 @@
#include <asm/psr.h>
#include <asm/vaddrs.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/winmacro.h>
#include <asm/signal.h>
#include <asm/obio.h>
@@ -799,27 +801,12 @@ SUN_PI_(lda [%l4] ASI_M_MMUREGS, %l5) ! read sfsr last
RESTORE_ALL
.align 4
- .globl sys_nis_syscall
-sys_nis_syscall:
- mov %o7, %l5
- add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
- call c_sys_nis_syscall
- mov %l5, %o7
-
sunos_execv:
.globl sunos_execv
b sys_execve
clr %i2
.align 4
- .globl sys_sparc_pipe
-sys_sparc_pipe:
- mov %o7, %l5
- add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
- call sparc_pipe
- mov %l5, %o7
-
- .align 4
.globl sys_sigstack
sys_sigstack:
mov %o7, %l5
@@ -839,7 +826,7 @@ sys_sigreturn:
nop
call syscall_trace
- nop
+ mov 1, %o1
1:
/* We don't want to muck with user registers like a
@@ -882,14 +869,11 @@ flush_patch_two:
ld [%curptr + TI_TASK], %o4
rd %psr, %g4
WRITE_PAUSE
- mov SIGCHLD, %o0 ! arg0: clone flags
rd %wim, %g5
WRITE_PAUSE
- mov %fp, %o1 ! arg1: usp
std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
- add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr
- mov 0, %o3
- call sparc_do_fork
+ add %sp, STACKFRAME_SZ, %o0
+ call sparc_fork
mov %l5, %o7
/* Whee, kernel threads! */
@@ -901,19 +885,11 @@ flush_patch_three:
ld [%curptr + TI_TASK], %o4
rd %psr, %g4
WRITE_PAUSE
-
- /* arg0,1: flags,usp -- loaded already */
- cmp %o1, 0x0 ! Is new_usp NULL?
rd %wim, %g5
WRITE_PAUSE
- be,a 1f
- mov %fp, %o1 ! yes, use callers usp
- andn %o1, 7, %o1 ! no, align to 8 bytes
-1:
std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
- add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr
- mov 0, %o3
- call sparc_do_fork
+ add %sp, STACKFRAME_SZ, %o0
+ call sparc_clone
mov %l5, %o7
/* Whee, real vfork! */
@@ -927,13 +903,9 @@ flush_patch_four:
rd %wim, %g5
WRITE_PAUSE
std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
- sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
- mov %fp, %o1
- or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
- sethi %hi(sparc_do_fork), %l1
- mov 0, %o3
- jmpl %l1 + %lo(sparc_do_fork), %g0
- add %sp, STACKFRAME_SZ, %o2
+ sethi %hi(sparc_vfork), %l1
+ jmpl %l1 + %lo(sparc_vfork), %g0
+ add %sp, STACKFRAME_SZ, %o0
.align 4
linux_sparc_ni_syscall:
@@ -948,7 +920,24 @@ linux_syscall_trace:
cmp %o0, 0
bne 3f
mov -ENOSYS, %o0
+
+ /* Syscall tracing can modify the registers. */
+ ld [%sp + STACKFRAME_SZ + PT_G1], %g1
+ sethi %hi(sys_call_table), %l7
+ ld [%sp + STACKFRAME_SZ + PT_I0], %i0
+ or %l7, %lo(sys_call_table), %l7
+ ld [%sp + STACKFRAME_SZ + PT_I1], %i1
+ ld [%sp + STACKFRAME_SZ + PT_I2], %i2
+ ld [%sp + STACKFRAME_SZ + PT_I3], %i3
+ ld [%sp + STACKFRAME_SZ + PT_I4], %i4
+ ld [%sp + STACKFRAME_SZ + PT_I5], %i5
+ cmp %g1, NR_syscalls
+ bgeu 3f
+ mov -ENOSYS, %o0
+
+ sll %g1, 2, %l4
mov %i0, %o0
+ ld [%l7 + %l4], %l7
mov %i1, %o1
mov %i2, %o2
mov %i3, %o3
@@ -1005,7 +994,7 @@ do_syscall:
andcc %l5, _TIF_SYSCALL_TRACE, %g0
mov %i4, %o4
bne linux_syscall_trace
- mov %i0, %l5
+ mov %i0, %l6
2:
call %l7
mov %i5, %o5
@@ -1014,16 +1003,15 @@ do_syscall:
st %o0, [%sp + STACKFRAME_SZ + PT_I0]
ret_sys_call:
- ld [%curptr + TI_FLAGS], %l6
+ ld [%curptr + TI_FLAGS], %l5
cmp %o0, -ERESTART_RESTARTBLOCK
ld [%sp + STACKFRAME_SZ + PT_PSR], %g3
set PSR_C, %g2
bgeu 1f
- andcc %l6, _TIF_SYSCALL_TRACE, %g0
+ andcc %l5, _TIF_SYSCALL_TRACE, %g0
/* System call success, clear Carry condition code. */
andn %g3, %g2, %g3
- clr %l6
st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
bne linux_syscall_trace2
ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
@@ -1038,7 +1026,6 @@ ret_sys_call:
sub %g0, %o0, %o0
or %g3, %g2, %g3
st %o0, [%sp + STACKFRAME_SZ + PT_I0]
- mov 1, %l6
st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
bne linux_syscall_trace2
ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
@@ -1190,6 +1177,8 @@ delay_continue:
ret
restore
+EXPORT_SYMBOL(__udelay)
+EXPORT_SYMBOL(__ndelay)
/* Handle a software breakpoint */
/* We have to inform parent that child has stopped */
@@ -1208,20 +1197,18 @@ breakpoint_trap:
RESTORE_ALL
#ifdef CONFIG_KGDB
- .align 4
- .globl kgdb_trap_low
- .type kgdb_trap_low,#function
-kgdb_trap_low:
+ ENTRY(kgdb_trap_low)
rd %wim,%l3
SAVE_ALL
wr %l0, PSR_ET, %psr
WRITE_PAUSE
+ mov %l7, %o0 ! trap_level
call kgdb_trap
- add %sp, STACKFRAME_SZ, %o0
+ add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
RESTORE_ALL
- .size kgdb_trap_low,.-kgdb_trap_low
+ ENDPROC(kgdb_trap_low)
#endif
.align 4
@@ -1238,7 +1225,7 @@ flush_patch_exception:
kuw_patch1_7win: sll %o3, 6, %o3
/* No matter how much overhead this routine has in the worst
- * case scenerio, it is several times better than taking the
+ * case scenario, it is several times better than taking the
* traps with the old method of just doing flush_user_windows().
*/
kill_user_windows:
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index cc3c5cb47cda..c746c0fd5d6b 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ENTRY_H
#define _ENTRY_H
@@ -6,40 +7,39 @@
#include <linux/init.h>
/* irq */
-extern void handler_irq(int irq, struct pt_regs *regs);
+void handler_irq(int irq, struct pt_regs *regs);
#ifdef CONFIG_SPARC32
/* traps */
-extern void do_hw_interrupt(struct pt_regs *regs, unsigned long type);
-extern void do_illegal_instruction(struct pt_regs *regs, unsigned long pc,
- unsigned long npc, unsigned long psr);
-
-extern void do_priv_instruction(struct pt_regs *regs, unsigned long pc,
- unsigned long npc, unsigned long psr);
-extern void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc,
- unsigned long npc,
- unsigned long psr);
-extern void do_fpd_trap(struct pt_regs *regs, unsigned long pc,
+void do_hw_interrupt(struct pt_regs *regs, unsigned long type);
+void do_illegal_instruction(struct pt_regs *regs, unsigned long pc,
+ unsigned long npc, unsigned long psr);
+
+void do_priv_instruction(struct pt_regs *regs, unsigned long pc,
+ unsigned long npc, unsigned long psr);
+void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc,
+ unsigned long npc, unsigned long psr);
+void do_fpd_trap(struct pt_regs *regs, unsigned long pc,
+ unsigned long npc, unsigned long psr);
+void do_fpe_trap(struct pt_regs *regs, unsigned long pc,
+ unsigned long npc, unsigned long psr);
+void handle_tag_overflow(struct pt_regs *regs, unsigned long pc,
+ unsigned long npc, unsigned long psr);
+void handle_watchpoint(struct pt_regs *regs, unsigned long pc,
+ unsigned long npc, unsigned long psr);
+void handle_reg_access(struct pt_regs *regs, unsigned long pc,
+ unsigned long npc, unsigned long psr);
+void handle_cp_disabled(struct pt_regs *regs, unsigned long pc,
unsigned long npc, unsigned long psr);
-extern void do_fpe_trap(struct pt_regs *regs, unsigned long pc,
- unsigned long npc, unsigned long psr);
-extern void handle_tag_overflow(struct pt_regs *regs, unsigned long pc,
- unsigned long npc, unsigned long psr);
-extern void handle_watchpoint(struct pt_regs *regs, unsigned long pc,
- unsigned long npc, unsigned long psr);
-extern void handle_reg_access(struct pt_regs *regs, unsigned long pc,
- unsigned long npc, unsigned long psr);
-extern void handle_cp_disabled(struct pt_regs *regs, unsigned long pc,
- unsigned long npc, unsigned long psr);
-extern void handle_cp_exception(struct pt_regs *regs, unsigned long pc,
- unsigned long npc, unsigned long psr);
+void handle_cp_exception(struct pt_regs *regs, unsigned long pc,
+ unsigned long npc, unsigned long psr);
/* entry.S */
-extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
- void *fpqueue, unsigned long *fpqdepth);
-extern void fpload(unsigned long *fpregs, unsigned long *fsr);
+void fpsave(unsigned long *fpregs, unsigned long *fsr,
+ void *fpqueue, unsigned long *fpqdepth);
+void fpload(unsigned long *fpregs, unsigned long *fsr);
#else /* CONFIG_SPARC32 */
@@ -66,124 +66,121 @@ struct pause_patch_entry {
extern struct pause_patch_entry __pause_3insn_patch,
__pause_3insn_patch_end;
-extern void __init per_cpu_patch(void);
-extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
- struct sun4v_1insn_patch_entry *);
-extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
- struct sun4v_2insn_patch_entry *);
-extern void __init sun4v_patch(void);
-extern void __init boot_cpu_id_too_large(int cpu);
+void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
+ struct sun4v_1insn_patch_entry *);
+void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+ struct sun4v_2insn_patch_entry *);
+void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+ struct sun4v_2insn_patch_entry *);
extern unsigned int dcache_parity_tl1_occurred;
extern unsigned int icache_parity_tl1_occurred;
-extern asmlinkage void sparc_breakpoint(struct pt_regs *regs);
-extern void timer_interrupt(int irq, struct pt_regs *regs);
-
-extern void do_notify_resume(struct pt_regs *regs,
- unsigned long orig_i0,
- unsigned long thread_info_flags);
-
-extern asmlinkage int syscall_trace_enter(struct pt_regs *regs);
-extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
-
-extern void bad_trap_tl1(struct pt_regs *regs, long lvl);
-
-extern void do_fpe_common(struct pt_regs *regs);
-extern void do_fpieee(struct pt_regs *regs);
-extern void do_fpother(struct pt_regs *regs);
-extern void do_tof(struct pt_regs *regs);
-extern void do_div0(struct pt_regs *regs);
-extern void do_illegal_instruction(struct pt_regs *regs);
-extern void mem_address_unaligned(struct pt_regs *regs,
- unsigned long sfar,
- unsigned long sfsr);
-extern void sun4v_do_mna(struct pt_regs *regs,
- unsigned long addr,
- unsigned long type_ctx);
-extern void do_privop(struct pt_regs *regs);
-extern void do_privact(struct pt_regs *regs);
-extern void do_cee(struct pt_regs *regs);
-extern void do_cee_tl1(struct pt_regs *regs);
-extern void do_dae_tl1(struct pt_regs *regs);
-extern void do_iae_tl1(struct pt_regs *regs);
-extern void do_div0_tl1(struct pt_regs *regs);
-extern void do_fpdis_tl1(struct pt_regs *regs);
-extern void do_fpieee_tl1(struct pt_regs *regs);
-extern void do_fpother_tl1(struct pt_regs *regs);
-extern void do_ill_tl1(struct pt_regs *regs);
-extern void do_irq_tl1(struct pt_regs *regs);
-extern void do_lddfmna_tl1(struct pt_regs *regs);
-extern void do_stdfmna_tl1(struct pt_regs *regs);
-extern void do_paw(struct pt_regs *regs);
-extern void do_paw_tl1(struct pt_regs *regs);
-extern void do_vaw(struct pt_regs *regs);
-extern void do_vaw_tl1(struct pt_regs *regs);
-extern void do_tof_tl1(struct pt_regs *regs);
-extern void do_getpsr(struct pt_regs *regs);
-
-extern void spitfire_insn_access_exception(struct pt_regs *regs,
- unsigned long sfsr,
- unsigned long sfar);
-extern void spitfire_insn_access_exception_tl1(struct pt_regs *regs,
- unsigned long sfsr,
- unsigned long sfar);
-extern void spitfire_data_access_exception(struct pt_regs *regs,
- unsigned long sfsr,
- unsigned long sfar);
-extern void spitfire_data_access_exception_tl1(struct pt_regs *regs,
- unsigned long sfsr,
- unsigned long sfar);
-extern void spitfire_access_error(struct pt_regs *regs,
- unsigned long status_encoded,
- unsigned long afar);
-
-extern void cheetah_fecc_handler(struct pt_regs *regs,
- unsigned long afsr,
- unsigned long afar);
-extern void cheetah_cee_handler(struct pt_regs *regs,
- unsigned long afsr,
- unsigned long afar);
-extern void cheetah_deferred_handler(struct pt_regs *regs,
- unsigned long afsr,
- unsigned long afar);
-extern void cheetah_plus_parity_error(int type, struct pt_regs *regs);
-
-extern void sun4v_insn_access_exception(struct pt_regs *regs,
- unsigned long addr,
- unsigned long type_ctx);
-extern void sun4v_insn_access_exception_tl1(struct pt_regs *regs,
- unsigned long addr,
- unsigned long type_ctx);
-extern void sun4v_data_access_exception(struct pt_regs *regs,
- unsigned long addr,
- unsigned long type_ctx);
-extern void sun4v_data_access_exception_tl1(struct pt_regs *regs,
- unsigned long addr,
- unsigned long type_ctx);
-extern void sun4v_resum_error(struct pt_regs *regs,
- unsigned long offset);
-extern void sun4v_resum_overflow(struct pt_regs *regs);
-extern void sun4v_nonresum_error(struct pt_regs *regs,
- unsigned long offset);
-extern void sun4v_nonresum_overflow(struct pt_regs *regs);
+asmlinkage void sparc_breakpoint(struct pt_regs *regs);
+void timer_interrupt(int irq, struct pt_regs *regs);
+
+void do_notify_resume(struct pt_regs *regs,
+ unsigned long orig_i0,
+ unsigned long thread_info_flags);
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs);
+asmlinkage void syscall_trace_leave(struct pt_regs *regs);
+
+void bad_trap_tl1(struct pt_regs *regs, long lvl);
+
+void do_fpieee(struct pt_regs *regs);
+void do_fpother(struct pt_regs *regs);
+void do_tof(struct pt_regs *regs);
+void do_div0(struct pt_regs *regs);
+void do_illegal_instruction(struct pt_regs *regs);
+void mem_address_unaligned(struct pt_regs *regs,
+ unsigned long sfar,
+ unsigned long sfsr);
+void sun4v_do_mna(struct pt_regs *regs,
+ unsigned long addr,
+ unsigned long type_ctx);
+void do_privop(struct pt_regs *regs);
+void do_privact(struct pt_regs *regs);
+void do_cee(struct pt_regs *regs);
+void do_div0_tl1(struct pt_regs *regs);
+void do_fpieee_tl1(struct pt_regs *regs);
+void do_fpother_tl1(struct pt_regs *regs);
+void do_ill_tl1(struct pt_regs *regs);
+void do_irq_tl1(struct pt_regs *regs);
+void do_lddfmna_tl1(struct pt_regs *regs);
+void do_stdfmna_tl1(struct pt_regs *regs);
+void do_paw(struct pt_regs *regs);
+void do_paw_tl1(struct pt_regs *regs);
+void do_vaw(struct pt_regs *regs);
+void do_vaw_tl1(struct pt_regs *regs);
+void do_tof_tl1(struct pt_regs *regs);
+void do_getpsr(struct pt_regs *regs);
+
+void spitfire_insn_access_exception(struct pt_regs *regs,
+ unsigned long sfsr,
+ unsigned long sfar);
+void spitfire_insn_access_exception_tl1(struct pt_regs *regs,
+ unsigned long sfsr,
+ unsigned long sfar);
+void spitfire_data_access_exception(struct pt_regs *regs,
+ unsigned long sfsr,
+ unsigned long sfar);
+void spitfire_data_access_exception_tl1(struct pt_regs *regs,
+ unsigned long sfsr,
+ unsigned long sfar);
+void spitfire_access_error(struct pt_regs *regs,
+ unsigned long status_encoded,
+ unsigned long afar);
+
+void cheetah_fecc_handler(struct pt_regs *regs,
+ unsigned long afsr,
+ unsigned long afar);
+void cheetah_cee_handler(struct pt_regs *regs,
+ unsigned long afsr,
+ unsigned long afar);
+void cheetah_deferred_handler(struct pt_regs *regs,
+ unsigned long afsr,
+ unsigned long afar);
+void cheetah_plus_parity_error(int type, struct pt_regs *regs);
+
+void sun4v_insn_access_exception(struct pt_regs *regs,
+ unsigned long addr,
+ unsigned long type_ctx);
+void sun4v_insn_access_exception_tl1(struct pt_regs *regs,
+ unsigned long addr,
+ unsigned long type_ctx);
+void sun4v_data_access_exception(struct pt_regs *regs,
+ unsigned long addr,
+ unsigned long type_ctx);
+void sun4v_data_access_exception_tl1(struct pt_regs *regs,
+ unsigned long addr,
+ unsigned long type_ctx);
+void sun4v_resum_error(struct pt_regs *regs,
+ unsigned long offset);
+void sun4v_resum_overflow(struct pt_regs *regs);
+void sun4v_nonresum_error(struct pt_regs *regs,
+ unsigned long offset);
+void sun4v_nonresum_overflow(struct pt_regs *regs);
+void sun4v_mem_corrupt_detect_precise(struct pt_regs *regs,
+ unsigned long addr,
+ unsigned long context);
extern unsigned long sun4v_err_itlb_vaddr;
extern unsigned long sun4v_err_itlb_ctx;
extern unsigned long sun4v_err_itlb_pte;
extern unsigned long sun4v_err_itlb_error;
-extern void sun4v_itlb_error_report(struct pt_regs *regs, int tl);
+void sun4v_itlb_error_report(struct pt_regs *regs, int tl);
extern unsigned long sun4v_err_dtlb_vaddr;
extern unsigned long sun4v_err_dtlb_ctx;
extern unsigned long sun4v_err_dtlb_pte;
extern unsigned long sun4v_err_dtlb_error;
-extern void sun4v_dtlb_error_report(struct pt_regs *regs, int tl);
-extern void hypervisor_tlbop_error(unsigned long err,
- unsigned long op);
-extern void hypervisor_tlbop_error_xcall(unsigned long err,
- unsigned long op);
+void sun4v_dtlb_error_report(struct pt_regs *regs, int tl);
+void hypervisor_tlbop_error(unsigned long err,
+ unsigned long op);
+void hypervisor_tlbop_error_xcall(unsigned long err,
+ unsigned long op);
/* WARNING: The error trap handlers in assembly know the precise
* layout of the following structure.
@@ -249,8 +246,8 @@ struct ino_bucket {
extern struct ino_bucket *ivector_table;
extern unsigned long ivector_table_pa;
-extern void init_irqwork_curcpu(void);
-extern void __cpuinit sun4v_register_mondo_queues(int this_cpu);
+void init_irqwork_curcpu(void);
+void sun4v_register_mondo_queues(int this_cpu);
#endif /* CONFIG_SPARC32 */
#endif /* _ENTRY_H */
diff --git a/arch/sparc/kernel/etrap_32.S b/arch/sparc/kernel/etrap_32.S
index e3e80d65e39a..9f243f918619 100644
--- a/arch/sparc/kernel/etrap_32.S
+++ b/arch/sparc/kernel/etrap_32.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* etrap.S: Sparc trap window preparation for entry into the
* Linux kernel.
diff --git a/arch/sparc/kernel/etrap_64.S b/arch/sparc/kernel/etrap_64.S
index 1276ca2567ba..08cc41f64725 100644
--- a/arch/sparc/kernel/etrap_64.S
+++ b/arch/sparc/kernel/etrap_64.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* etrap.S: Preparing for entry into the kernel on Sparc V9.
*
@@ -38,7 +39,11 @@ etrap_syscall: TRAP_LOAD_THREAD_REG(%g6, %g1)
or %g1, %g3, %g1
bne,pn %xcc, 1f
sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2
- wrpr %g0, 7, %cleanwin
+661: wrpr %g0, 7, %cleanwin
+ .section .fast_win_ctrl_1insn_patch, "ax"
+ .word 661b
+ .word 0x85880000 ! allclean
+ .previous
sethi %hi(TASK_REGOFF), %g2
sethi %hi(TSTATE_PEF), %g3
@@ -88,16 +93,30 @@ etrap_save: save %g2, -STACK_BIAS, %sp
bne,pn %xcc, 3f
mov PRIMARY_CONTEXT, %l4
- rdpr %canrestore, %g3
+661: rdpr %canrestore, %g3
+ .section .fast_win_ctrl_1insn_patch, "ax"
+ .word 661b
+ nop
+ .previous
+
rdpr %wstate, %g2
- wrpr %g0, 0, %canrestore
+661: wrpr %g0, 0, %canrestore
+ .section .fast_win_ctrl_1insn_patch, "ax"
+ .word 661b
+ nop
+ .previous
sll %g2, 3, %g2
/* Set TI_SYS_FPDEPTH to 1 and clear TI_SYS_NOERROR. */
mov 1, %l5
sth %l5, [%l6 + TI_SYS_NOERROR]
- wrpr %g3, 0, %otherwin
+661: wrpr %g3, 0, %otherwin
+ .section .fast_win_ctrl_1insn_patch, "ax"
+ .word 661b
+ .word 0x87880000 ! otherw
+ .previous
+
wrpr %g2, 0, %wstate
sethi %hi(sparc64_kern_pri_context), %g2
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
@@ -132,7 +151,32 @@ etrap_save: save %g2, -STACK_BIAS, %sp
stx %g6, [%sp + PTREGS_OFF + PT_V9_G6]
stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
or %l7, %l0, %l7
- sethi %hi(TSTATE_TSO | TSTATE_PEF), %l0
+661: sethi %hi(TSTATE_TSO | TSTATE_PEF), %l0
+ /* If userspace is using ADI, it could potentially pass
+ * a pointer with version tag embedded in it. To maintain
+ * the ADI security, we must enable PSTATE.mcde. Userspace
+ * would have already set TTE.mcd in an earlier call to
+ * kernel and set the version tag for the address being
+ * dereferenced. Setting PSTATE.mcde would ensure any
+ * access to userspace data through a system call honors
+ * ADI and does not allow a rogue app to bypass ADI by
+ * using system calls. Setting PSTATE.mcde only affects
+ * accesses to virtual addresses that have TTE.mcd set.
+ * Set PMCDPER to ensure any exceptions caused by ADI
+ * version tag mismatch are exposed before system call
+ * returns to userspace. Setting PMCDPER affects only
+ * writes to virtual addresses that have TTE.mcd set and
+ * have a version tag set as well.
+ */
+ .section .sun_m7_1insn_patch, "ax"
+ .word 661b
+ sethi %hi(TSTATE_TSO | TSTATE_PEF | TSTATE_MCDE), %l0
+ .previous
+661: nop
+ .section .sun_m7_1insn_patch, "ax"
+ .word 661b
+ .word 0xaf902001 /* wrpr %g0, 1, %pmcdper */
+ .previous
or %l7, %l0, %l7
wrpr %l2, %tnpc
wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate
diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S
index a6864826a4bd..051659e29c7a 100644
--- a/arch/sparc/kernel/fpu_traps.S
+++ b/arch/sparc/kernel/fpu_traps.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* This is trivial with the new code... */
.globl do_fpdis
.type do_fpdis,#function
@@ -100,8 +101,8 @@ do_fpdis:
fmuld %f0, %f2, %f26
faddd %f0, %f2, %f28
fmuld %f0, %f2, %f30
- b,pt %xcc, fpdis_exit
- nop
+ ba,a,pt %xcc, fpdis_exit
+
2: andcc %g5, FPRS_DU, %g0
bne,pt %icc, 3f
fzero %f32
@@ -144,8 +145,8 @@ do_fpdis:
fmuld %f32, %f34, %f58
faddd %f32, %f34, %f60
fmuld %f32, %f34, %f62
- ba,pt %xcc, fpdis_exit
- nop
+ ba,a,pt %xcc, fpdis_exit
+
3: mov SECONDARY_CONTEXT, %g3
add %g6, TI_FPREGS, %g1
@@ -197,8 +198,7 @@ fpdis_exit2:
fp_other_bounce:
call do_fpother
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size fp_other_bounce,.-fp_other_bounce
.align 32
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 03ab022e51c5..eaead3da8e03 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include <linux/ftrace.h>
@@ -81,15 +82,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
new = ftrace_call_replace(ip, (unsigned long)func);
return ftrace_modify_code(ip, old, new);
}
-
-int __init ftrace_dyn_arch_init(void *data)
-{
- unsigned long *p = data;
-
- *p = 0;
-
- return 0;
-}
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -129,22 +121,12 @@ unsigned long prepare_ftrace_return(unsigned long parent,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long) &return_to_handler;
- struct ftrace_graph_ent trace;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return parent + 8UL;
- if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
- frame_pointer) == -EBUSY)
- return parent + 8UL;
-
- trace.func = self_addr;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
+ if (function_graph_enter(parent, self_addr, frame_pointer, NULL))
return parent + 8UL;
- }
return return_hooker;
}
diff --git a/arch/sparc/kernel/getsetcc.S b/arch/sparc/kernel/getsetcc.S
index a14d272d2061..181e09fd1c55 100644
--- a/arch/sparc/kernel/getsetcc.S
+++ b/arch/sparc/kernel/getsetcc.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
.globl getcc
.type getcc,#function
getcc:
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index 3d92c0a8f6c4..38345460d542 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* head.S: The initial boot code for the Sparc port of Linux.
*
@@ -10,6 +11,7 @@
* CompactPCI platform by Eric Brower, 1999.
*/
+#include <linux/export.h>
#include <linux/version.h>
#include <linux/init.h>
@@ -23,7 +25,7 @@
#include <asm/winmacro.h>
#include <asm/thread_info.h> /* TI_UWINMASK */
#include <asm/errno.h>
-#include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */
+#include <asm/pgtable.h> /* PGDIR_SHIFT */
.data
/* The following are used with the prom_vector node-ops to figure out
@@ -60,6 +62,7 @@ sun4e_notsup:
*/
.globl empty_zero_page
empty_zero_page: .skip PAGE_SIZE
+EXPORT_SYMBOL(empty_zero_page)
.global root_flags
.global ram_flags
@@ -115,9 +118,12 @@ current_pc:
mov %o7, %g3
tst %o0
- be no_sun4u_here
+ bne 2f
mov %g4, %o7 /* Previous %o7. */
-
+ sethi %hi(no_sun4u_here), %l1
+ jmpl %l1 + %lo(no_sun4u_here), %g0
+ nop
+2:
mov %o0, %l0 ! stash away romvec
mov %o0, %g7 ! put it here too
mov %o1, %l1 ! stash away debug_vec too
@@ -192,7 +198,8 @@ halt_notsup:
sub %o0, %l6, %o0
call %o1
nop
- ba halt_me
+ sethi %hi(halt_me), %o0
+ jmpl %o0 + %lo(halt_me), %g0
nop
not_a_sun4:
@@ -270,7 +277,7 @@ not_a_sun4:
lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd
/* Calculate to KERNBASE entry. */
- add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
+ add %o1, KERNBASE >> (PGDIR_SHIFT - 2), %o3
/* Poke the entry into the calculated address. */
sta %o2, [%o3] ASI_M_BYPASS
@@ -314,7 +321,7 @@ srmmu_not_viking:
sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
- add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
+ add %g1, KERNBASE >> (PGDIR_SHIFT - 2), %g3
sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
b go_to_highmem
nop ! wheee....
@@ -338,7 +345,7 @@ leon_remap:
sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
- add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
+ add %g1, KERNBASE >> (PGDIR_SHIFT - 2), %g3
sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
b go_to_highmem
nop ! wheee....
@@ -428,8 +435,11 @@ leon_init:
#ifdef CONFIG_SMP
ldub [%g2 + %lo(boot_cpu_id)], %g1
cmp %g1, 0xff ! unset means first CPU
- bne leon_smp_cpu_startup ! continue only with master
+ be 1f
+ sethi %hi(leon_smp_cpu_startup), %g1
+ jmpl %g1 + %lo(leon_smp_cpu_startup), %g0
nop
+1:
#endif
/* Get CPU-ID from most significant 4-bit of ASR17 */
rd %asr17, %g1
@@ -512,7 +522,7 @@ continue_boot:
/* I want a kernel stack NOW! */
set init_thread_union, %g1
- set (THREAD_SIZE - STACKFRAME_SZ), %g2
+ set (THREAD_SIZE - STACKFRAME_SZ - TRACEREG_SZ), %g2
add %g1, %g2, %sp
mov 0, %fp /* And for good luck */
@@ -807,9 +817,3 @@ lvl14_save:
.word 0
.word 0
.word t_irq14
-
- .section ".fixup",#alloc,#execinstr
- .globl __ret_efault
-__ret_efault:
- ret
- restore %g0, -EFAULT, %o0
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 26b706a1867d..cf0549134234 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* head.S: Initial boot code for the Sparc64 port of Linux.
*
* Copyright (C) 1996, 1997, 2007 David S. Miller (davem@davemloft.net)
@@ -8,16 +9,17 @@
#include <linux/version.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/threads.h>
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/thread_info.h>
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
#include <asm/spitfire.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/errno.h>
#include <asm/signal.h>
#include <asm/processor.h>
@@ -32,7 +34,7 @@
#include <asm/estate.h>
#include <asm/sfafsr.h>
#include <asm/unistd.h>
-
+
/* This section from from _start to sparc64_boot_end should fit into
* 0x0000000000404000 to 0x0000000000408000.
*/
@@ -95,6 +97,7 @@ sparc64_boot:
andn %g1, PSTATE_AM, %g1
wrpr %g1, 0x0, %pstate
ba,a,pt %xcc, 1f
+ nop
.globl prom_finddev_name, prom_chosen_path, prom_root_node
.globl prom_getprop_name, prom_mmu_name, prom_peer_name
@@ -143,6 +146,7 @@ prom_cpu_compatible:
.skip 64
prom_root_node:
.word 0
+EXPORT_SYMBOL(prom_root_node)
prom_mmu_ihandle_cache:
.word 0
prom_boot_mapped_pc:
@@ -158,6 +162,7 @@ is_sun4v:
.word 0
sun4v_chip_type:
.word SUN4V_CHIP_INVALID
+EXPORT_SYMBOL(sun4v_chip_type)
1:
rd %pc, %l0
@@ -282,8 +287,8 @@ sun4v_chip_type:
stx %l2, [%l4 + 0x0]
ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low
/* 4MB align */
- srlx %l3, 22, %l3
- sllx %l3, 22, %l3
+ srlx %l3, ILOG2_4MB, %l3
+ sllx %l3, ILOG2_4MB, %l3
stx %l3, [%l4 + 0x8]
/* Leave service as-is, "call-method" */
@@ -414,29 +419,43 @@ sun4v_chip_type:
cmp %g2, 'T'
be,pt %xcc, 70f
cmp %g2, 'M'
+ be,pt %xcc, 70f
+ cmp %g2, 'S'
bne,pn %xcc, 49f
nop
70: ldub [%g1 + 7], %g2
- cmp %g2, '3'
+ cmp %g2, CPU_ID_NIAGARA3
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA3, %g4
- cmp %g2, '4'
+ cmp %g2, CPU_ID_NIAGARA4
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA4, %g4
- cmp %g2, '5'
+ cmp %g2, CPU_ID_NIAGARA5
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA5, %g4
+ cmp %g2, CPU_ID_M6
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_SPARC_M6, %g4
+ cmp %g2, CPU_ID_M7
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_SPARC_M7, %g4
+ cmp %g2, CPU_ID_M8
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_SPARC_M8, %g4
+ cmp %g2, CPU_ID_SONOMA1
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_SPARC_SN, %g4
ba,pt %xcc, 49f
nop
91: sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
ldub [%g1 + 17], %g2
- cmp %g2, '1'
+ cmp %g2, CPU_ID_NIAGARA1
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA1, %g4
- cmp %g2, '2'
+ cmp %g2, CPU_ID_NIAGARA2
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA2, %g4
@@ -455,9 +474,8 @@ sun4v_chip_type:
subcc %g3, 1, %g3
bne,pt %xcc, 41b
add %g1, 1, %g1
- mov SUN4V_CHIP_SPARC64X, %g4
ba,pt %xcc, 5f
- nop
+ mov SUN4V_CHIP_SPARC64X, %g4
49:
mov SUN4V_CHIP_UNKNOWN, %g4
@@ -542,8 +560,7 @@ sun4u_init:
stxa %g0, [%g7] ASI_DMMU
membar #Sync
- ba,pt %xcc, sun4u_continue
- nop
+ ba,a,pt %xcc, sun4u_continue
sun4v_init:
/* Set ctx 0 */
@@ -554,14 +571,12 @@ sun4v_init:
mov SECONDARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_MMU
membar #Sync
- ba,pt %xcc, niagara_tlb_fixup
- nop
+ ba,a,pt %xcc, niagara_tlb_fixup
sun4u_continue:
BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
- ba,pt %xcc, spitfire_tlb_fixup
- nop
+ ba,a,pt %xcc, spitfire_tlb_fixup
niagara_tlb_fixup:
mov 3, %g2 /* Set TLB type to hypervisor. */
@@ -585,6 +600,18 @@ niagara_tlb_fixup:
cmp %g1, SUN4V_CHIP_NIAGARA5
be,pt %xcc, niagara4_patch
nop
+ cmp %g1, SUN4V_CHIP_SPARC_M6
+ be,pt %xcc, niagara4_patch
+ nop
+ cmp %g1, SUN4V_CHIP_SPARC_M7
+ be,pt %xcc, sparc_m7_patch
+ nop
+ cmp %g1, SUN4V_CHIP_SPARC_M8
+ be,pt %xcc, sparc_m7_patch
+ nop
+ cmp %g1, SUN4V_CHIP_SPARC_SN
+ be,pt %xcc, niagara4_patch
+ nop
call generic_patch_copyops
nop
@@ -594,6 +621,19 @@ niagara_tlb_fixup:
nop
ba,a,pt %xcc, 80f
+ nop
+
+sparc_m7_patch:
+ call m7_patch_copyops
+ nop
+ call m7_patch_bzero
+ nop
+ call m7_patch_pageops
+ nop
+
+ ba,a,pt %xcc, 80f
+ nop
+
niagara4_patch:
call niagara4_patch_copyops
nop
@@ -601,8 +641,11 @@ niagara4_patch:
nop
call niagara4_patch_pageops
nop
+ call niagara4_patch_fls
+ nop
ba,a,pt %xcc, 80f
+ nop
niagara2_patch:
call niagara2_patch_copyops
@@ -613,6 +656,7 @@ niagara2_patch:
nop
ba,a,pt %xcc, 80f
+ nop
niagara_patch:
call niagara_patch_copyops
@@ -627,8 +671,7 @@ niagara_patch:
call hypervisor_patch_cachetlbops
nop
- ba,pt %xcc, tlb_fixup_done
- nop
+ ba,a,pt %xcc, tlb_fixup_done
cheetah_tlb_fixup:
mov 2, %g2 /* Set TLB type to cheetah+. */
@@ -647,8 +690,7 @@ cheetah_tlb_fixup:
call cheetah_patch_cachetlbops
nop
- ba,pt %xcc, tlb_fixup_done
- nop
+ ba,a,pt %xcc, tlb_fixup_done
spitfire_tlb_fixup:
/* Set TLB type to spitfire. */
@@ -660,14 +702,12 @@ tlb_fixup_done:
sethi %hi(init_thread_union), %g6
or %g6, %lo(init_thread_union), %g6
ldx [%g6 + TI_TASK], %g4
- mov %sp, %l6
wr %g0, ASI_P, %asi
mov 1, %g1
sllx %g1, THREAD_SHIFT, %g1
- sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
+ sub %g1, (STACKFRAME_SZ + STACK_BIAS + TRACEREG_SZ), %g1
add %g6, %g1, %sp
- mov 0, %fp
/* Set per-cpu pointer initially to zero, this makes
* the boot-cpu use the in-kernel-image per-cpu areas
@@ -686,52 +726,14 @@ tlb_fixup_done:
call __bzero
sub %o1, %o0, %o1
-#ifdef CONFIG_LOCKDEP
- /* We have this call this super early, as even prom_init can grab
- * spinlocks and thus call into the lockdep code.
- */
- call lockdep_init
- nop
-#endif
-
- mov %l6, %o1 ! OpenPROM stack
call prom_init
mov %l7, %o0 ! OpenPROM cif handler
- /* Initialize current_thread_info()->cpu as early as possible.
- * In order to do that accurately we have to patch up the get_cpuid()
- * assembler sequences. And that, in turn, requires that we know
- * if we are on a Starfire box or not. While we're here, patch up
- * the sun4v sequences as well.
+ /* To create a one-register-window buffer between the kernel's
+ * initial stack and the last stack frame we use from the firmware,
+ * do the rest of the boot from a C helper function.
*/
- call check_if_starfire
- nop
- call per_cpu_patch
- nop
- call sun4v_patch
- nop
-
-#ifdef CONFIG_SMP
- call hard_smp_processor_id
- nop
- cmp %o0, NR_CPUS
- blu,pt %xcc, 1f
- nop
- call boot_cpu_id_too_large
- nop
- /* Not reached... */
-
-1:
-#else
- mov 0, %o0
-#endif
- sth %o0, [%g6 + TI_CPU]
-
- call prom_init_report
- nop
-
- /* Off we go.... */
- call start_kernel
+ call start_early_boot
nop
/* Not reached... */
@@ -802,8 +804,7 @@ setup_trap_table:
call %o1
add %sp, (2047 + 128), %o0
- ba,pt %xcc, 2f
- nop
+ ba,a,pt %xcc, 2f
1: sethi %hi(sparc64_ttable_tl0), %o0
set prom_set_trap_table_name, %g2
@@ -842,8 +843,7 @@ setup_trap_table:
BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
- ba,pt %xcc, 2f
- nop
+ ba,a,pt %xcc, 2f
/* Disable STICK_INT interrupts. */
1:
@@ -896,8 +896,8 @@ sparc64_boot_end:
#include "misctrap.S"
#include "syscalls.S"
#include "helpers.S"
-#include "hvcalls.S"
#include "sun4v_tlb_miss.S"
+#include "sun4v_mcd.S"
#include "sun4v_ivec.S"
#include "ktlb.S"
#include "tsb.S"
@@ -941,6 +941,7 @@ swapper_4m_tsb:
! 0x0000000000428000
+#include "hvcalls.S"
#include "systbls_64.S"
.data
@@ -948,37 +949,21 @@ swapper_4m_tsb:
.globl prom_tba, tlb_type
prom_tba: .xword 0
tlb_type: .word 0 /* Must NOT end up in BSS */
+EXPORT_SYMBOL(tlb_type)
.section ".fixup",#alloc,#execinstr
- .globl __ret_efault, __retl_efault, __ret_one, __retl_one
-ENTRY(__ret_efault)
- ret
- restore %g0, -EFAULT, %o0
-ENDPROC(__ret_efault)
-
ENTRY(__retl_efault)
retl
mov -EFAULT, %o0
ENDPROC(__retl_efault)
-ENTRY(__retl_one)
- retl
- mov 1, %o0
-ENDPROC(__retl_one)
-
-ENTRY(__ret_one_asi)
- wr %g0, ASI_AIUS, %asi
- ret
- restore %g0, 1, %o0
-ENDPROC(__ret_one_asi)
-
-ENTRY(__retl_one_asi)
- wr %g0, ASI_AIUS, %asi
- retl
- mov 1, %o0
-ENDPROC(__retl_one_asi)
-
ENTRY(__retl_o1)
retl
mov %o1, %o0
ENDPROC(__retl_o1)
+
+ENTRY(__retl_o1_asi)
+ wr %o5, 0x0, %asi
+ retl
+ mov %o1, %o0
+ENDPROC(__retl_o1_asi)
diff --git a/arch/sparc/kernel/helpers.S b/arch/sparc/kernel/helpers.S
index 314dd0c9fc5b..9b3f74706cfb 100644
--- a/arch/sparc/kernel/helpers.S
+++ b/arch/sparc/kernel/helpers.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
.align 32
.globl __flushw_user
.type __flushw_user,#function
@@ -15,6 +16,7 @@ __flushw_user:
2: retl
nop
.size __flushw_user,.-__flushw_user
+EXPORT_SYMBOL(__flushw_user)
/* Flush %fp and %i7 to the stack for all register
* windows active inside of the cpu. This allows
@@ -61,3 +63,4 @@ real_hard_smp_processor_id:
.size hard_smp_processor_id,.-hard_smp_processor_id
#endif
.size real_hard_smp_processor_id,.-real_hard_smp_processor_id
+EXPORT_SYMBOL_GPL(real_hard_smp_processor_id)
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index c0a2de0fd624..717ec7ef07f9 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* hvapi.c: Hypervisor API management.
*
* Copyright (C) 2007 David S. Miller <davem@davemloft.net>
@@ -39,6 +40,8 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_SDIO, },
{ .group = HV_GRP_SDIO_ERR, },
{ .group = HV_GRP_REBOOT_DATA, },
+ { .group = HV_GRP_ATU, .flags = FLAG_PRE_API },
+ { .group = HV_GRP_DAX, },
{ .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
{ .group = HV_GRP_FIRE_PERF, },
{ .group = HV_GRP_N2_CPU, },
@@ -46,7 +49,9 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_VF_CPU, },
{ .group = HV_GRP_KT_CPU, },
{ .group = HV_GRP_VT_CPU, },
+ { .group = HV_GRP_T5_CPU, },
{ .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
+ { .group = HV_GRP_M7_PERF, },
};
static DEFINE_SPINLOCK(hvapi_lock);
@@ -186,7 +191,7 @@ void __init sun4v_hvapi_init(void)
group = HV_GRP_CORE;
major = 1;
- minor = 1;
+ minor = 6;
if (sun4v_hvapi_register(group, major, &minor))
goto bad;
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index f3ab509b76a8..2f865a464576 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* %o0: devhandle
* %o1: devino
*
@@ -106,6 +107,17 @@ ENTRY(sun4v_cpu_yield)
nop
ENDPROC(sun4v_cpu_yield)
+ /* %o0: cpuid
+ *
+ * returns %o0: status
+ */
+ENTRY(sun4v_cpu_poke)
+ mov HV_FAST_CPU_POKE, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_cpu_poke)
+
/* %o0: type
* %o1: queue paddr
* %o2: num queue entries
@@ -338,10 +350,12 @@ ENTRY(sun4v_mach_set_watchdog)
mov %o1, %o4
mov HV_FAST_MACH_SET_WATCHDOG, %o5
ta HV_FAST_TRAP
+ brnz,a,pn %o4, 0f
stx %o1, [%o4]
- retl
+0: retl
nop
ENDPROC(sun4v_mach_set_watchdog)
+EXPORT_SYMBOL(sun4v_mach_set_watchdog)
/* No inputs and does not return. */
ENTRY(sun4v_mach_sir)
@@ -775,6 +789,7 @@ ENTRY(sun4v_niagara_getperf)
retl
nop
ENDPROC(sun4v_niagara_getperf)
+EXPORT_SYMBOL(sun4v_niagara_getperf)
ENTRY(sun4v_niagara_setperf)
mov HV_FAST_SET_PERFREG, %o5
@@ -782,6 +797,7 @@ ENTRY(sun4v_niagara_setperf)
retl
nop
ENDPROC(sun4v_niagara_setperf)
+EXPORT_SYMBOL(sun4v_niagara_setperf)
ENTRY(sun4v_niagara2_getperf)
mov %o0, %o4
@@ -791,6 +807,7 @@ ENTRY(sun4v_niagara2_getperf)
retl
nop
ENDPROC(sun4v_niagara2_getperf)
+EXPORT_SYMBOL(sun4v_niagara2_getperf)
ENTRY(sun4v_niagara2_setperf)
mov HV_FAST_N2_SET_PERFREG, %o5
@@ -798,6 +815,7 @@ ENTRY(sun4v_niagara2_setperf)
retl
nop
ENDPROC(sun4v_niagara2_setperf)
+EXPORT_SYMBOL(sun4v_niagara2_setperf)
ENTRY(sun4v_reboot_data_set)
mov HV_FAST_REBOOT_DATA_SET, %o5
@@ -821,3 +839,92 @@ ENTRY(sun4v_vt_set_perfreg)
retl
nop
ENDPROC(sun4v_vt_set_perfreg)
+
+ENTRY(sun4v_t5_get_perfreg)
+ mov %o1, %o4
+ mov HV_FAST_T5_GET_PERFREG, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ nop
+ENDPROC(sun4v_t5_get_perfreg)
+
+ENTRY(sun4v_t5_set_perfreg)
+ mov HV_FAST_T5_SET_PERFREG, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_t5_set_perfreg)
+
+ENTRY(sun4v_m7_get_perfreg)
+ mov %o1, %o4
+ mov HV_FAST_M7_GET_PERFREG, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ nop
+ENDPROC(sun4v_m7_get_perfreg)
+
+ENTRY(sun4v_m7_set_perfreg)
+ mov HV_FAST_M7_SET_PERFREG, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_m7_set_perfreg)
+
+ /* %o0: address of CCB array
+ * %o1: size (in bytes) of CCB array
+ * %o2: flags
+ * %o3: reserved
+ *
+ * returns:
+ * %o0: status
+ * %o1: size (in bytes) of the CCB array that was accepted
+ * %o2: status data
+ * %o3: reserved
+ */
+ENTRY(sun4v_ccb_submit)
+ mov %o5, %g1
+ mov HV_CCB_SUBMIT, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ stx %o2, [%g1]
+ENDPROC(sun4v_ccb_submit)
+EXPORT_SYMBOL(sun4v_ccb_submit)
+
+ /* %o0: completion area ra for the ccb to get info
+ *
+ * returns:
+ * %o0: status
+ * %o1: CCB state
+ * %o2: position
+ * %o3: dax unit
+ * %o4: queue
+ */
+ENTRY(sun4v_ccb_info)
+ mov %o1, %g1
+ mov HV_CCB_INFO, %o5
+ ta HV_FAST_TRAP
+ sth %o1, [%g1 + CCB_INFO_OFFSET_CCB_STATE]
+ sth %o2, [%g1 + CCB_INFO_OFFSET_QUEUE_POS]
+ sth %o3, [%g1 + CCB_INFO_OFFSET_DAX_UNIT]
+ retl
+ sth %o4, [%g1 + CCB_INFO_OFFSET_QUEUE_NUM]
+ENDPROC(sun4v_ccb_info)
+EXPORT_SYMBOL(sun4v_ccb_info)
+
+ /* %o0: completion area ra for the ccb to kill
+ *
+ * returns:
+ * %o0: status
+ * %o1: result of the kill
+ */
+ENTRY(sun4v_ccb_kill)
+ mov %o1, %g1
+ mov HV_CCB_KILL, %o5
+ ta HV_FAST_TRAP
+ retl
+ sth %o1, [%g1]
+ENDPROC(sun4v_ccb_kill)
+EXPORT_SYMBOL(sun4v_ccb_kill)
diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
index 605c960b2fa6..f39220471b65 100644
--- a/arch/sparc/kernel/hvtramp.S
+++ b/arch/sparc/kernel/hvtramp.S
@@ -1,9 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* hvtramp.S: Hypervisor start-cpu trampoline code.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
*/
-#include <linux/init.h>
#include <asm/thread_info.h>
#include <asm/hypervisor.h>
@@ -16,7 +16,6 @@
#include <asm/asi.h>
#include <asm/pil.h>
- __CPUINIT
.align 8
.globl hv_cpu_startup, hv_cpu_startup_end
@@ -111,7 +110,6 @@ hv_cpu_startup:
sllx %g5, THREAD_SHIFT, %g5
sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
add %g6, %g5, %sp
- mov 0, %fp
call init_irqwork_curcpu
nop
diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c
index 6bd75012109d..d6c46d512220 100644
--- a/arch/sparc/kernel/idprom.c
+++ b/arch/sparc/kernel/idprom.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* idprom.c: Routines to load the idprom into kernel addresses and
* interpret the data contained within.
@@ -9,6 +10,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/export.h>
+#include <linux/etherdevice.h>
#include <asm/oplib.h>
#include <asm/idprom.h>
@@ -60,6 +62,12 @@ static void __init display_system_type(unsigned char machtype)
{
}
#endif
+
+unsigned char *arch_get_platform_mac_address(void)
+{
+ return idprom->id_ethaddr;
+}
+
/* Calculate the IDPROM checksum (xor of the data bytes). */
static unsigned char __init calc_idprom_cksum(struct idprom *idprom)
{
diff --git a/arch/sparc/kernel/iommu-common.c b/arch/sparc/kernel/iommu-common.c
new file mode 100644
index 000000000000..23ca75f09277
--- /dev/null
+++ b/arch/sparc/kernel/iommu-common.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IOMMU mmap management and range allocation functions.
+ * Based almost entirely upon the powerpc iommu allocator.
+ */
+
+#include <linux/export.h>
+#include <linux/bitmap.h>
+#include <linux/bug.h>
+#include <linux/iommu-helper.h>
+#include <linux/dma-mapping.h>
+#include <linux/hash.h>
+#include <asm/iommu-common.h>
+
+static unsigned long iommu_large_alloc = 15;
+
+static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
+
+static inline bool need_flush(struct iommu_map_table *iommu)
+{
+ return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
+}
+
+static inline void set_flush(struct iommu_map_table *iommu)
+{
+ iommu->flags |= IOMMU_NEED_FLUSH;
+}
+
+static inline void clear_flush(struct iommu_map_table *iommu)
+{
+ iommu->flags &= ~IOMMU_NEED_FLUSH;
+}
+
+static void setup_iommu_pool_hash(void)
+{
+ unsigned int i;
+ static bool do_once;
+
+ if (do_once)
+ return;
+ do_once = true;
+ for_each_possible_cpu(i)
+ per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
+}
+
+/*
+ * Initialize iommu_pool entries for the iommu_map_table. `num_entries'
+ * is the number of table entries. If `large_pool' is set to true,
+ * the top 1/4 of the table will be set aside for pool allocations
+ * of more than iommu_large_alloc pages.
+ */
+void iommu_tbl_pool_init(struct iommu_map_table *iommu,
+ unsigned long num_entries,
+ u32 table_shift,
+ void (*lazy_flush)(struct iommu_map_table *),
+ bool large_pool, u32 npools,
+ bool skip_span_boundary_check)
+{
+ unsigned int start, i;
+ struct iommu_pool *p = &(iommu->large_pool);
+
+ setup_iommu_pool_hash();
+ if (npools == 0)
+ iommu->nr_pools = IOMMU_NR_POOLS;
+ else
+ iommu->nr_pools = npools;
+ BUG_ON(npools > IOMMU_NR_POOLS);
+
+ iommu->table_shift = table_shift;
+ iommu->lazy_flush = lazy_flush;
+ start = 0;
+ if (skip_span_boundary_check)
+ iommu->flags |= IOMMU_NO_SPAN_BOUND;
+ if (large_pool)
+ iommu->flags |= IOMMU_HAS_LARGE_POOL;
+
+ if (!large_pool)
+ iommu->poolsize = num_entries/iommu->nr_pools;
+ else
+ iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
+ for (i = 0; i < iommu->nr_pools; i++) {
+ spin_lock_init(&(iommu->pools[i].lock));
+ iommu->pools[i].start = start;
+ iommu->pools[i].hint = start;
+ start += iommu->poolsize; /* start for next pool */
+ iommu->pools[i].end = start - 1;
+ }
+ if (!large_pool)
+ return;
+ /* initialize large_pool */
+ spin_lock_init(&(p->lock));
+ p->start = start;
+ p->hint = p->start;
+ p->end = num_entries;
+}
+
+unsigned long iommu_tbl_range_alloc(struct device *dev,
+ struct iommu_map_table *iommu,
+ unsigned long npages,
+ unsigned long *handle,
+ unsigned long mask,
+ unsigned int align_order)
+{
+ unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
+ unsigned long n, end, start, limit, boundary_size;
+ struct iommu_pool *pool;
+ int pass = 0;
+ unsigned int pool_nr;
+ unsigned int npools = iommu->nr_pools;
+ unsigned long flags;
+ bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
+ bool largealloc = (large_pool && npages > iommu_large_alloc);
+ unsigned long shift;
+ unsigned long align_mask = 0;
+
+ if (align_order > 0)
+ align_mask = ~0ul >> (BITS_PER_LONG - align_order);
+
+ /* Sanity check */
+ if (unlikely(npages == 0)) {
+ WARN_ON_ONCE(1);
+ return IOMMU_ERROR_CODE;
+ }
+
+ if (largealloc) {
+ pool = &(iommu->large_pool);
+ pool_nr = 0; /* to keep compiler happy */
+ } else {
+ /* pick out pool_nr */
+ pool_nr = pool_hash & (npools - 1);
+ pool = &(iommu->pools[pool_nr]);
+ }
+ spin_lock_irqsave(&pool->lock, flags);
+
+ again:
+ if (pass == 0 && handle && *handle &&
+ (*handle >= pool->start) && (*handle < pool->end))
+ start = *handle;
+ else
+ start = pool->hint;
+
+ limit = pool->end;
+
+ /* The case below can happen if we have a small segment appended
+ * to a large, or when the previous alloc was at the very end of
+ * the available space. If so, go back to the beginning. If a
+ * flush is needed, it will get done based on the return value
+ * from iommu_area_alloc() below.
+ */
+ if (start >= limit)
+ start = pool->start;
+ shift = iommu->table_map_base >> iommu->table_shift;
+ if (limit + shift > mask) {
+ limit = mask - shift + 1;
+ /* If we're constrained on address range, first try
+ * at the masked hint to avoid O(n) search complexity,
+ * but on second pass, start at 0 in pool 0.
+ */
+ if ((start & mask) >= limit || pass > 0) {
+ spin_unlock(&(pool->lock));
+ pool = &(iommu->pools[0]);
+ spin_lock(&(pool->lock));
+ start = pool->start;
+ } else {
+ start &= mask;
+ }
+ }
+
+ /*
+ * if the skip_span_boundary_check had been set during init, we set
+ * things up so that iommu_is_span_boundary() merely checks if the
+ * (index + npages) < num_tsb_entries
+ */
+ if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
+ shift = 0;
+ boundary_size = iommu->poolsize * iommu->nr_pools;
+ } else {
+ boundary_size = dma_get_seg_boundary_nr_pages(dev,
+ iommu->table_shift);
+ }
+ n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
+ boundary_size, align_mask);
+ if (n == -1) {
+ if (likely(pass == 0)) {
+ /* First failure, rescan from the beginning. */
+ pool->hint = pool->start;
+ set_flush(iommu);
+ pass++;
+ goto again;
+ } else if (!largealloc && pass <= iommu->nr_pools) {
+ spin_unlock(&(pool->lock));
+ pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
+ pool = &(iommu->pools[pool_nr]);
+ spin_lock(&(pool->lock));
+ pool->hint = pool->start;
+ set_flush(iommu);
+ pass++;
+ goto again;
+ } else {
+ /* give up */
+ n = IOMMU_ERROR_CODE;
+ goto bail;
+ }
+ }
+ if (iommu->lazy_flush &&
+ (n < pool->hint || need_flush(iommu))) {
+ clear_flush(iommu);
+ iommu->lazy_flush(iommu);
+ }
+
+ end = n + npages;
+ pool->hint = end;
+
+ /* Update handle for SG allocations */
+ if (handle)
+ *handle = end;
+bail:
+ spin_unlock_irqrestore(&(pool->lock), flags);
+
+ return n;
+}
+
+static struct iommu_pool *get_pool(struct iommu_map_table *tbl,
+ unsigned long entry)
+{
+ struct iommu_pool *p;
+ unsigned long largepool_start = tbl->large_pool.start;
+ bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
+
+ /* The large pool is the last pool at the top of the table */
+ if (large_pool && entry >= largepool_start) {
+ p = &tbl->large_pool;
+ } else {
+ unsigned int pool_nr = entry / tbl->poolsize;
+
+ BUG_ON(pool_nr >= tbl->nr_pools);
+ p = &tbl->pools[pool_nr];
+ }
+ return p;
+}
+
+/* Caller supplies the index of the entry into the iommu map table
+ * itself when the mapping from dma_addr to the entry is not the
+ * default addr->entry mapping below.
+ */
+void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
+ unsigned long npages, unsigned long entry)
+{
+ struct iommu_pool *pool;
+ unsigned long flags;
+ unsigned long shift = iommu->table_shift;
+
+ if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
+ entry = (dma_addr - iommu->table_map_base) >> shift;
+ pool = get_pool(iommu, entry);
+
+ spin_lock_irqsave(&(pool->lock), flags);
+ bitmap_clear(iommu->map, entry, npages);
+ spin_unlock_irqrestore(&(pool->lock), flags);
+}
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 070ed141aac7..46ef88bc9c26 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* iommu.c: Generic sparc64 IOMMU support.
*
* Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
@@ -9,10 +10,11 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/errno.h>
#include <linux/iommu-helper.h>
#include <linux/bitmap.h>
+#include <asm/iommu-common.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
@@ -21,6 +23,7 @@
#include <asm/iommu.h>
#include "iommu_common.h"
+#include "kernel.h"
#define STC_CTXMATCH_ADDR(STC, CTX) \
((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
@@ -44,8 +47,9 @@
"i" (ASI_PHYS_BYPASS_EC_E))
/* Must be invoked under the IOMMU lock. */
-static void iommu_flushall(struct iommu *iommu)
+static void iommu_flushall(struct iommu_map_table *iommu_map_table)
{
+ struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
if (iommu->iommu_flushinv) {
iommu_write(iommu->iommu_flushinv, ~(u64)0);
} else {
@@ -86,94 +90,6 @@ static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
iopte_val(*iopte) = val;
}
-/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
- * facility it must all be done in one pass while under the iommu lock.
- *
- * On sun4u platforms, we only flush the IOMMU once every time we've passed
- * over the entire page table doing allocations. Therefore we only ever advance
- * the hint and cannot backtrack it.
- */
-unsigned long iommu_range_alloc(struct device *dev,
- struct iommu *iommu,
- unsigned long npages,
- unsigned long *handle)
-{
- unsigned long n, end, start, limit, boundary_size;
- struct iommu_arena *arena = &iommu->arena;
- int pass = 0;
-
- /* This allocator was derived from x86_64's bit string search */
-
- /* Sanity check */
- if (unlikely(npages == 0)) {
- if (printk_ratelimit())
- WARN_ON(1);
- return DMA_ERROR_CODE;
- }
-
- if (handle && *handle)
- start = *handle;
- else
- start = arena->hint;
-
- limit = arena->limit;
-
- /* The case below can happen if we have a small segment appended
- * to a large, or when the previous alloc was at the very end of
- * the available space. If so, go back to the beginning and flush.
- */
- if (start >= limit) {
- start = 0;
- if (iommu->flush_all)
- iommu->flush_all(iommu);
- }
-
- again:
-
- if (dev)
- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- 1 << IO_PAGE_SHIFT);
- else
- boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
-
- n = iommu_area_alloc(arena->map, limit, start, npages,
- iommu->page_table_map_base >> IO_PAGE_SHIFT,
- boundary_size >> IO_PAGE_SHIFT, 0);
- if (n == -1) {
- if (likely(pass < 1)) {
- /* First failure, rescan from the beginning. */
- start = 0;
- if (iommu->flush_all)
- iommu->flush_all(iommu);
- pass++;
- goto again;
- } else {
- /* Second failure, give up */
- return DMA_ERROR_CODE;
- }
- }
-
- end = n + npages;
-
- arena->hint = end;
-
- /* Update handle for SG allocations */
- if (handle)
- *handle = end;
-
- return n;
-}
-
-void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
-{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long entry;
-
- entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
-
- bitmap_clear(arena->map, entry, npages);
-}
-
int iommu_table_init(struct iommu *iommu, int tsbsize,
u32 dma_offset, u32 dma_addr_mask,
int numa_node)
@@ -186,22 +102,19 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
- iommu->page_table_map_base = dma_offset;
+ iommu->tbl.table_map_base = dma_offset;
iommu->dma_addr_mask = dma_addr_mask;
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
- if (!iommu->arena.map) {
- printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
+ iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
+ if (!iommu->tbl.map)
return -ENOMEM;
- }
- memset(iommu->arena.map, 0, sz);
- iommu->arena.limit = num_tsb_entries;
- if (tlb_type != hypervisor)
- iommu->flush_all = iommu_flushall;
+ iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
+ (tlb_type != hypervisor ? iommu_flushall : NULL),
+ false, 1, false);
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to.
@@ -234,19 +147,21 @@ out_free_dummy_page:
iommu->dummy_page = 0UL;
out_free_map:
- kfree(iommu->arena.map);
- iommu->arena.map = NULL;
+ kfree(iommu->tbl.map);
+ iommu->tbl.map = NULL;
return -ENOMEM;
}
-static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
+static inline iopte_t *alloc_npages(struct device *dev,
+ struct iommu *iommu,
unsigned long npages)
{
unsigned long entry;
- entry = iommu_range_alloc(dev, iommu, npages, NULL);
- if (unlikely(entry == DMA_ERROR_CODE))
+ entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+ (unsigned long)(-1), 0);
+ if (unlikely(entry == IOMMU_ERROR_CODE))
return NULL;
return iommu->page_table + entry;
@@ -281,9 +196,9 @@ static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- unsigned long flags, order, first_page;
+ unsigned long order, first_page;
struct iommu *iommu;
struct page *page;
int npages, nid;
@@ -305,16 +220,14 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
iommu = dev->archdata.iommu;
- spin_lock_irqsave(&iommu->lock, flags);
iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(iopte == NULL)) {
free_pages(first_page, order);
return NULL;
}
- *dma_addrp = (iommu->page_table_map_base +
+ *dma_addrp = (iommu->tbl.table_map_base +
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
ret = (void *) first_page;
npages = size >> IO_PAGE_SHIFT;
@@ -332,50 +245,55 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
static void dma_4u_free_coherent(struct device *dev, size_t size,
void *cpu, dma_addr_t dvma,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu *iommu;
- unsigned long flags, order, npages;
+ unsigned long order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
- spin_lock_irqsave(&iommu->lock, flags);
-
- iommu_range_free(iommu, dvma, npages);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
+ iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
}
-static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t sz,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
+static dma_addr_t dma_4u_map_phys(struct device *dev, phys_addr_t phys,
+ size_t sz, enum dma_data_direction direction,
+ unsigned long attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, oaddr;
- unsigned long i, base_paddr, ctx;
+ unsigned long i, ctx;
u32 bus_addr, ret;
unsigned long iopte_protection;
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ /*
+ * This check is included because older versions of the code
+ * lacked MMIO path support, and my ability to test this path
+ * is limited. However, from a software technical standpoint,
+ * there is no restriction, as the following code operates
+ * solely on physical addresses.
+ */
+ goto bad_no_ctx;
+
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
- oaddr = (unsigned long)(page_address(page) + offset);
+ oaddr = (unsigned long)(phys_to_virt(phys));
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
- spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(dev, iommu, npages);
+ spin_lock_irqsave(&iommu->lock, flags);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
@@ -384,10 +302,9 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
if (unlikely(!base))
goto bad;
- bus_addr = (iommu->page_table_map_base +
+ bus_addr = (iommu->tbl.table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
- base_paddr = __pa(oaddr & IO_PAGE_MASK);
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
@@ -395,8 +312,8 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
if (direction != DMA_TO_DEVICE)
iopte_protection |= IOPTE_WRITE;
- for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
- iopte_val(*base) = iopte_protection | base_paddr;
+ for (i = 0; i < npages; i++, base++, phys += IO_PAGE_SIZE)
+ iopte_val(*base) = iopte_protection | phys;
return ret;
@@ -405,7 +322,7 @@ bad:
bad_no_ctx:
if (printk_ratelimit())
WARN_ON(1);
- return DMA_ERROR_CODE;
+ return DMA_MAPPING_ERROR;
}
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
@@ -474,9 +391,9 @@ do_flush_sync:
vaddr, ctx, npages);
}
-static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
+static void dma_4u_unmap_phys(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
@@ -495,7 +412,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
base = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+ ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
bus_addr &= IO_PAGE_MASK;
spin_lock_irqsave(&iommu->lock, flags);
@@ -506,7 +423,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
/* Step 1: Kick data out of streaming buffers if necessary. */
- if (strbuf->strbuf_enabled)
+ if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
strbuf_flush(strbuf, iommu, bus_addr, ctx,
npages, direction);
@@ -514,16 +431,15 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
- iommu_range_free(iommu, bus_addr, npages);
-
iommu_free_ctx(iommu, ctx);
-
spin_unlock_irqrestore(&iommu->lock, flags);
+
+ iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot, ctx;
@@ -540,7 +456,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (nelems == 0 || !iommu)
- return 0;
+ return -EINVAL;
spin_lock_irqsave(&iommu->lock, flags);
@@ -564,9 +480,8 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
outs->dma_length = 0;
max_seg_size = dma_get_max_seg_size(dev);
- seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
- base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
+ seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
+ base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
iopte_t *base;
@@ -580,10 +495,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
- entry = iommu_range_alloc(dev, iommu, npages, &handle);
+ entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
+ &handle, (unsigned long)(-1), 0);
/* Handle failure */
- if (unlikely(entry == DMA_ERROR_CODE)) {
+ if (unlikely(entry == IOMMU_ERROR_CODE)) {
if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
@@ -593,7 +509,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
base = iommu->page_table + entry;
/* Convert entry to a dma_addr_t */
- dma_addr = iommu->page_table_map_base +
+ dma_addr = iommu->tbl.table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
@@ -638,7 +554,6 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0;
}
@@ -653,16 +568,17 @@ iommu_map_failed:
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
- iommu_range_free(iommu, vaddr, npages);
- entry = (vaddr - iommu->page_table_map_base)
+ entry = (vaddr - iommu->tbl.table_map_base)
>> IO_PAGE_SHIFT;
base = iommu->page_table + entry;
for (j = 0; j < npages; j++)
iopte_make_dummy(iommu, base + j);
- s->dma_address = DMA_ERROR_CODE;
+ iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
+ IOMMU_ERROR_CODE);
+
s->dma_length = 0;
}
if (s == outs)
@@ -670,7 +586,7 @@ iommu_map_failed:
}
spin_unlock_irqrestore(&iommu->lock, flags);
- return 0;
+ return -EINVAL;
}
/* If contexts are being used, they are the same in all of the mappings
@@ -683,10 +599,11 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
if (iommu->iommu_ctxflush) {
iopte_t *base;
u32 bus_addr;
+ struct iommu_map_table *tbl = &iommu->tbl;
bus_addr = sg->dma_address & IO_PAGE_MASK;
base = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+ ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
}
@@ -695,7 +612,7 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long flags, ctx;
struct scatterlist *sg;
@@ -722,20 +639,21 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
- iommu_range_free(iommu, dma_handle, npages);
- entry = ((dma_handle - iommu->page_table_map_base)
+ entry = ((dma_handle - iommu->tbl.table_map_base)
>> IO_PAGE_SHIFT);
base = iommu->page_table + entry;
dma_handle &= IO_PAGE_MASK;
- if (strbuf->strbuf_enabled)
+ if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
strbuf_flush(strbuf, iommu, dma_handle, ctx,
npages, direction);
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
+ iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
+ IOMMU_ERROR_CODE);
sg = sg_next(sg);
}
@@ -769,9 +687,10 @@ static void dma_4u_sync_single_for_cpu(struct device *dev,
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
+ struct iommu_map_table *tbl = &iommu->tbl;
iopte = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
+ ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
@@ -804,9 +723,10 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
+ struct iommu_map_table *tbl = &iommu->tbl;
- iopte = iommu->page_table +
- ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+ iopte = iommu->page_table + ((sglist[0].dma_address -
+ tbl->table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
@@ -826,38 +746,29 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static struct dma_map_ops sun4u_dma_ops = {
+static int dma_4u_supported(struct device *dev, u64 device_mask)
+{
+ struct iommu *iommu = dev->archdata.iommu;
+
+ if (ali_sound_dma_hack(dev, device_mask))
+ return 1;
+
+ if (device_mask < iommu->dma_addr_mask)
+ return 0;
+ return 1;
+}
+
+static const struct dma_map_ops sun4u_dma_ops = {
.alloc = dma_4u_alloc_coherent,
.free = dma_4u_free_coherent,
- .map_page = dma_4u_map_page,
- .unmap_page = dma_4u_unmap_page,
+ .map_phys = dma_4u_map_phys,
+ .unmap_phys = dma_4u_unmap_phys,
.map_sg = dma_4u_map_sg,
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
+ .dma_supported = dma_4u_supported,
};
-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops);
-
-extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
-
-int dma_supported(struct device *dev, u64 device_mask)
-{
- struct iommu *iommu = dev->archdata.iommu;
- u64 dma_addr_mask = iommu->dma_addr_mask;
-
- if (device_mask >= (1UL << 32UL))
- return 0;
-
- if ((device_mask & dma_addr_mask) == dma_addr_mask)
- return 1;
-
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return pci64_dma_supported(to_pci_dev(dev), device_mask);
-#endif
-
- return 0;
-}
-EXPORT_SYMBOL(dma_supported);
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h
index 591f5879039c..d62ed9c5682d 100644
--- a/arch/sparc/kernel/iommu_common.h
+++ b/arch/sparc/kernel/iommu_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* iommu_common.h: UltraSparc SBUS/PCI common iommu declarations.
*
* Copyright (C) 1999, 2008 David S. Miller (davem@davemloft.net)
@@ -15,7 +16,6 @@
#include <linux/iommu-helper.h>
#include <asm/iommu.h>
-#include <asm/scatterlist.h>
/*
* These give mapping size of each iommu pte/tlb.
@@ -48,12 +48,4 @@ static inline int is_span_boundary(unsigned long entry,
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
}
-extern unsigned long iommu_range_alloc(struct device *dev,
- struct iommu *iommu,
- unsigned long npages,
- unsigned long *handle);
-extern void iommu_range_free(struct iommu *iommu,
- dma_addr_t dma_addr,
- unsigned long npages);
-
#endif /* _IOMMU_COMMON_H */
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 2096468de9b2..5ebca5c7af1e 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ioport.c: Simple io mapping allocator.
*
@@ -37,7 +38,8 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/scatterlist.h>
-#include <linux/of_device.h>
+#include <linux/dma-map-ops.h>
+#include <linux/of.h>
#include <asm/io.h>
#include <asm/vaddrs.h>
@@ -50,19 +52,6 @@
#include <asm/io-unit.h>
#include <asm/leon.h>
-const struct sparc32_dma_ops *sparc32_dma_ops;
-
-/* This function must make sure that caches and memory are coherent after DMA
- * On LEON systems without cache snooping it flushes the entire D-CACHE.
- */
-static inline void dma_make_coherent(unsigned long pa, unsigned long len)
-{
- if (sparc_cpu_model == sparc_leon) {
- if (!sparc_leon3_snooping_enabled())
- leon_flush_dcache_all();
- }
-}
-
static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
unsigned long size, char *name);
@@ -121,17 +110,17 @@ static void xres_free(struct xresource *xrp) {
*
* Bus type is always zero on IIep.
*/
-void __iomem *ioremap(unsigned long offset, unsigned long size)
+void __iomem *ioremap(phys_addr_t offset, size_t size)
{
char name[14];
sprintf(name, "phys_%08x", (u32)offset);
- return _sparc_alloc_io(0, offset, size, name);
+ return _sparc_alloc_io(0, (unsigned long)offset, size, name);
}
EXPORT_SYMBOL(ioremap);
/*
- * Comlimentary to ioremap().
+ * Complementary to ioremap().
*/
void iounmap(volatile void __iomem *virtual)
{
@@ -186,7 +175,7 @@ static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
if (name == NULL) name = "???";
- if ((xres = xres_alloc()) != 0) {
+ if ((xres = xres_alloc()) != NULL) {
tack = xres->xname;
res = &xres->xres;
} else {
@@ -202,7 +191,7 @@ static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
tack += sizeof (struct resource);
}
- strlcpy(tack, name, XNMLN+1);
+ strscpy(tack, name, XNMLN+1);
res->name = tack;
va = _sparc_ioremap(res, busno, phys, size);
@@ -233,7 +222,7 @@ _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
}
/*
- * Comlimentary to _sparc_ioremap().
+ * Complementary to _sparc_ioremap().
*/
static void _sparc_free_io(struct resource *res)
{
@@ -245,171 +234,60 @@ static void _sparc_free_io(struct resource *res)
release_resource(res);
}
-#ifdef CONFIG_SBUS
-
-void sbus_set_sbus64(struct device *dev, int x)
+unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len)
{
- printk("sbus_set_sbus64: unsupported\n");
-}
-EXPORT_SYMBOL(sbus_set_sbus64);
-
-/*
- * Allocate a chunk of memory suitable for DMA.
- * Typically devices use them for control blocks.
- * CPU may access them without any explicit flushing.
- */
-static void *sbus_alloc_coherent(struct device *dev, size_t len,
- dma_addr_t *dma_addrp, gfp_t gfp,
- struct dma_attrs *attrs)
-{
- struct platform_device *op = to_platform_device(dev);
- unsigned long len_total = PAGE_ALIGN(len);
- unsigned long va;
struct resource *res;
- int order;
- /* XXX why are some lengths signed, others unsigned? */
- if (len <= 0) {
- return NULL;
- }
- /* XXX So what is maxphys for us and how do drivers know it? */
- if (len > 256*1024) { /* __get_free_pages() limit */
- return NULL;
- }
-
- order = get_order(len_total);
- if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
- goto err_nopages;
-
- if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
- goto err_nomem;
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return 0;
+ res->name = dev->of_node->full_name;
- if (allocate_resource(&_sparc_dvma, res, len_total,
- _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
- printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
- goto err_nova;
+ if (allocate_resource(&_sparc_dvma, res, len, _sparc_dvma.start,
+ _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
+ printk("%s: cannot occupy 0x%zx", __func__, len);
+ kfree(res);
+ return 0;
}
- // XXX The sbus_map_dma_area does this for us below, see comments.
- // srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
- /*
- * XXX That's where sdev would be used. Currently we load
- * all iommu tables with the same translations.
- */
- if (sbus_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
- goto err_noiommu;
-
- res->name = op->dev.of_node->name;
-
- return (void *)(unsigned long)res->start;
-
-err_noiommu:
- release_resource(res);
-err_nova:
- kfree(res);
-err_nomem:
- free_pages(va, order);
-err_nopages:
- return NULL;
+ return res->start;
}
-static void sbus_free_coherent(struct device *dev, size_t n, void *p,
- dma_addr_t ba, struct dma_attrs *attrs)
+bool sparc_dma_free_resource(void *cpu_addr, size_t size)
{
+ unsigned long addr = (unsigned long)cpu_addr;
struct resource *res;
- struct page *pgv;
- if ((res = lookup_resource(&_sparc_dvma,
- (unsigned long)p)) == NULL) {
- printk("sbus_free_consistent: cannot free %p\n", p);
- return;
+ res = lookup_resource(&_sparc_dvma, addr);
+ if (!res) {
+ printk("%s: cannot free %p\n", __func__, cpu_addr);
+ return false;
}
- if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
- printk("sbus_free_consistent: unaligned va %p\n", p);
- return;
+ if ((addr & (PAGE_SIZE - 1)) != 0) {
+ printk("%s: unaligned va %p\n", __func__, cpu_addr);
+ return false;
}
- n = PAGE_ALIGN(n);
- if (resource_size(res) != n) {
- printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
- (long)resource_size(res), n);
- return;
+ size = PAGE_ALIGN(size);
+ if (resource_size(res) != size) {
+ printk("%s: region 0x%lx asked 0x%zx\n",
+ __func__, (long)resource_size(res), size);
+ return false;
}
release_resource(res);
kfree(res);
-
- pgv = virt_to_page(p);
- sbus_unmap_dma_area(dev, ba, n);
-
- __free_pages(pgv, get_order(n));
+ return true;
}
-/*
- * Map a chunk of memory so that devices can see it.
- * CPU view of this memory may be inconsistent with
- * a device view and explicit flushing is necessary.
- */
-static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t len,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- void *va = page_address(page) + offset;
-
- /* XXX why are some lengths signed, others unsigned? */
- if (len <= 0) {
- return 0;
- }
- /* XXX So what is maxphys for us and how do drivers know it? */
- if (len > 256*1024) { /* __get_free_pages() limit */
- return 0;
- }
- return mmu_get_scsi_one(dev, va, len);
-}
-
-static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
- enum dma_data_direction dir, struct dma_attrs *attrs)
-{
- mmu_release_scsi_one(dev, ba, n);
-}
-
-static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
- enum dma_data_direction dir, struct dma_attrs *attrs)
-{
- mmu_get_scsi_sgl(dev, sg, n);
- return n;
-}
-
-static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
- enum dma_data_direction dir, struct dma_attrs *attrs)
-{
- mmu_release_scsi_sgl(dev, sg, n);
-}
-
-static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int n, enum dma_data_direction dir)
-{
- BUG();
-}
+#ifdef CONFIG_SBUS
-static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int n, enum dma_data_direction dir)
+void sbus_set_sbus64(struct device *dev, int x)
{
- BUG();
+ printk("sbus_set_sbus64: unsupported\n");
}
-
-struct dma_map_ops sbus_dma_ops = {
- .alloc = sbus_alloc_coherent,
- .free = sbus_free_coherent,
- .map_page = sbus_map_page,
- .unmap_page = sbus_unmap_page,
- .map_sg = sbus_map_sg,
- .unmap_sg = sbus_unmap_sg,
- .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
- .sync_sg_for_device = sbus_sync_sg_for_device,
-};
+EXPORT_SYMBOL(sbus_set_sbus64);
static int __init sparc_register_ioport(void)
{
@@ -422,258 +300,21 @@ arch_initcall(sparc_register_ioport);
#endif /* CONFIG_SBUS */
-
-/* Allocate and map kernel buffer using consistent mode DMA for a device.
- * hwdev should be valid struct pci_dev pointer for PCI devices.
- */
-static void *pci32_alloc_coherent(struct device *dev, size_t len,
- dma_addr_t *pba, gfp_t gfp,
- struct dma_attrs *attrs)
-{
- unsigned long len_total = PAGE_ALIGN(len);
- void *va;
- struct resource *res;
- int order;
-
- if (len == 0) {
- return NULL;
- }
- if (len > 256*1024) { /* __get_free_pages() limit */
- return NULL;
- }
-
- order = get_order(len_total);
- va = (void *) __get_free_pages(GFP_KERNEL, order);
- if (va == NULL) {
- printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
- goto err_nopages;
- }
-
- if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
- printk("pci_alloc_consistent: no core\n");
- goto err_nomem;
- }
-
- if (allocate_resource(&_sparc_dvma, res, len_total,
- _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
- printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
- goto err_nova;
- }
- srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
-
- *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
- return (void *) res->start;
-
-err_nova:
- kfree(res);
-err_nomem:
- free_pages((unsigned long)va, order);
-err_nopages:
- return NULL;
-}
-
-/* Free and unmap a consistent DMA buffer.
- * cpu_addr is what was returned from pci_alloc_consistent,
- * size must be the same as what as passed into pci_alloc_consistent,
- * and likewise dma_addr must be the same as what *dma_addrp was set to.
- *
- * References to the memory and mappings associated with cpu_addr/dma_addr
- * past this call are illegal.
- */
-static void pci32_free_coherent(struct device *dev, size_t n, void *p,
- dma_addr_t ba, struct dma_attrs *attrs)
-{
- struct resource *res;
-
- if ((res = lookup_resource(&_sparc_dvma,
- (unsigned long)p)) == NULL) {
- printk("pci_free_consistent: cannot free %p\n", p);
- return;
- }
-
- if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
- printk("pci_free_consistent: unaligned va %p\n", p);
- return;
- }
-
- n = PAGE_ALIGN(n);
- if (resource_size(res) != n) {
- printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
- (long)resource_size(res), (long)n);
- return;
- }
-
- dma_make_coherent(ba, n);
- srmmu_unmapiorange((unsigned long)p, n);
-
- release_resource(res);
- kfree(res);
- free_pages((unsigned long)phys_to_virt(ba), get_order(n));
-}
-
/*
- * Same as pci_map_single, but with pages.
- */
-static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- /* IIep is write-through, not flushing. */
- return page_to_phys(page) + offset;
-}
-
-static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
-{
- if (dir != PCI_DMA_TODEVICE)
- dma_make_coherent(ba, PAGE_ALIGN(size));
-}
-
-/* Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scather-gather version of the
- * above pci_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
+ * IIep is write-through, not flushing on cpu to device transfer.
*
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- struct scatterlist *sg;
- int n;
-
- /* IIep is write-through, not flushing. */
- for_each_sg(sgl, sg, nents, n) {
- sg->dma_address = sg_phys(sg);
- sg->dma_length = sg->length;
- }
- return nents;
-}
-
-/* Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
+ * On LEON systems without cache snooping, the entire D-CACHE must be flushed to
+ * make DMA to cacheable memory coherent.
*/
-static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- struct scatterlist *sg;
- int n;
-
- if (dir != PCI_DMA_TODEVICE) {
- for_each_sg(sgl, sg, nents, n) {
- dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
- }
- }
+ if (dir != DMA_TO_DEVICE &&
+ sparc_cpu_model == sparc_leon &&
+ !sparc_leon3_snooping_enabled())
+ leon_flush_dcache_all();
}
-/* Make physical memory consistent for a single
- * streaming mode DMA translation before or after a transfer.
- *
- * If you perform a pci_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so. At the
- * next point you give the PCI dma address back to the card, you
- * must first perform a pci_dma_sync_for_device, and then the
- * device again owns the buffer.
- */
-static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
- size_t size, enum dma_data_direction dir)
-{
- if (dir != PCI_DMA_TODEVICE) {
- dma_make_coherent(ba, PAGE_ALIGN(size));
- }
-}
-
-static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
- size_t size, enum dma_data_direction dir)
-{
- if (dir != PCI_DMA_TODEVICE) {
- dma_make_coherent(ba, PAGE_ALIGN(size));
- }
-}
-
-/* Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as pci_dma_sync_single_* but for a scatter-gather list,
- * same rules and usage.
- */
-static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int n;
-
- if (dir != PCI_DMA_TODEVICE) {
- for_each_sg(sgl, sg, nents, n) {
- dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
- }
- }
-}
-
-static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int n;
-
- if (dir != PCI_DMA_TODEVICE) {
- for_each_sg(sgl, sg, nents, n) {
- dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
- }
- }
-}
-
-struct dma_map_ops pci32_dma_ops = {
- .alloc = pci32_alloc_coherent,
- .free = pci32_free_coherent,
- .map_page = pci32_map_page,
- .unmap_page = pci32_unmap_page,
- .map_sg = pci32_map_sg,
- .unmap_sg = pci32_unmap_sg,
- .sync_single_for_cpu = pci32_sync_single_for_cpu,
- .sync_single_for_device = pci32_sync_single_for_device,
- .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
- .sync_sg_for_device = pci32_sync_sg_for_device,
-};
-EXPORT_SYMBOL(pci32_dma_ops);
-
-/* leon re-uses pci32_dma_ops */
-struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
-EXPORT_SYMBOL(leon_dma_ops);
-
-struct dma_map_ops *dma_ops = &sbus_dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
-
-/*
- * Return whether the given PCI device DMA address mask can be
- * supported properly. For example, if your device can only drive the
- * low 24-bits during PCI bus mastering, then you would pass
- * 0x00ffffff as the mask to this function.
- */
-int dma_supported(struct device *dev, u64 mask)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return 1;
-#endif
- return 0;
-}
-EXPORT_SYMBOL(dma_supported);
-
#ifdef CONFIG_PROC_FS
static int sparc_io_proc_show(struct seq_file *m, void *v)
@@ -682,7 +323,7 @@ static int sparc_io_proc_show(struct seq_file *m, void *v)
const char *nm;
for (r = root->child; r != NULL; r = r->sibling) {
- if ((nm = r->name) == 0) nm = "???";
+ if ((nm = r->name) == NULL) nm = "???";
seq_printf(m, "%016llx-%016llx: %s\n",
(unsigned long long)r->start,
(unsigned long long)r->end, nm);
@@ -690,25 +331,14 @@ static int sparc_io_proc_show(struct seq_file *m, void *v)
return 0;
}
-
-static int sparc_io_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sparc_io_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations sparc_io_proc_fops = {
- .owner = THIS_MODULE,
- .open = sparc_io_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
#endif /* CONFIG_PROC_FS */
static void register_proc_sparc_ioport(void)
{
#ifdef CONFIG_PROC_FS
- proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap);
- proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma);
+ proc_create_single_data("io_map", 0, NULL, sparc_io_proc_show,
+ &sparc_iomap);
+ proc_create_single_data("dvma_map", 0, NULL, sparc_io_proc_show,
+ &_sparc_dvma);
#endif
}
diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h
index b66b6aad1d6d..b02026ad6e34 100644
--- a/arch/sparc/kernel/irq.h
+++ b/arch/sparc/kernel/irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/platform_device.h>
#include <asm/cpu_type.h>
@@ -82,11 +83,20 @@ void handler_irq(unsigned int pil, struct pt_regs *regs);
unsigned long leon_get_irqmask(unsigned int irq);
+/* irq_32.c */
+void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs);
+
+/* sun4m_irq.c */
+void sun4m_nmi(struct pt_regs *regs);
+
+/* sun4d_irq.c */
+void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs);
+
#ifdef CONFIG_SMP
/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
#define SUN4D_IPI_IRQ 13
-extern void sun4d_ipi_interrupt(void);
+void sun4d_ipi_interrupt(void);
#endif
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index c145f6fd123b..5210991429d5 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Interrupt request handling routines. On the
* Sparc the IRQs are basically 'cast in stone'
@@ -17,6 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/cpudata.h>
+#include <asm/setup.h>
#include <asm/pcic.h>
#include <asm/leon.h>
@@ -164,7 +166,7 @@ void irq_link(unsigned int irq)
p = &irq_table[irq];
pil = p->pil;
- BUG_ON(pil > SUN4D_MAX_IRQ);
+ BUG_ON(pil >= SUN4D_MAX_IRQ);
p->next = irq_map[pil];
irq_map[pil] = p;
@@ -181,7 +183,7 @@ void irq_unlink(unsigned int irq)
spin_lock_irqsave(&irq_map_lock, flags);
p = &irq_table[irq];
- BUG_ON(p->pil > SUN4D_MAX_IRQ);
+ BUG_ON(p->pil >= SUN4D_MAX_IRQ);
pnext = &irq_map[p->pil];
while (*pnext != p)
pnext = &(*pnext)->next;
@@ -197,18 +199,18 @@ int arch_show_interrupts(struct seq_file *p, int prec)
int j;
#ifdef CONFIG_SMP
- seq_printf(p, "RES: ");
+ seq_printf(p, "RES:");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", cpu_data(j).irq_resched_count);
+ seq_put_decimal_ull_width(p, " ", cpu_data(j).irq_resched_count, 10);
seq_printf(p, " IPI rescheduling interrupts\n");
- seq_printf(p, "CAL: ");
+ seq_printf(p, "CAL:");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", cpu_data(j).irq_call_count);
+ seq_put_decimal_ull_width(p, " ", cpu_data(j).irq_call_count, 10);
seq_printf(p, " IPI function call interrupts\n");
#endif
- seq_printf(p, "NMI: ");
+ seq_printf(p, "NMI:");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", cpu_data(j).counter);
+ seq_put_decimal_ull_width(p, " ", cpu_data(j).counter, 10);
seq_printf(p, " Non-maskable interrupts\n");
return 0;
}
@@ -266,11 +268,11 @@ int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
if (sparc_cpu_model != sparc_leon) {
struct tt_entry *trap_table;
- trap_table = &trapbase_cpu1;
+ trap_table = &trapbase_cpu1[0];
INSTANTIATE(trap_table)
- trap_table = &trapbase_cpu2;
+ trap_table = &trapbase_cpu2[0];
INSTANTIATE(trap_table)
- trap_table = &trapbase_cpu3;
+ trap_table = &trapbase_cpu3[0];
INSTANTIATE(trap_table)
}
#endif
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 9bcbbe2c4e7e..ded463c82abd 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* irq.c: UltraSparc IRQ handling/init/registry.
*
* Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
@@ -21,7 +22,7 @@
#include <linux/seq_file.h>
#include <linux/ftrace.h>
#include <linux/irq.h>
-#include <linux/kmemleak.h>
+#include <linux/string_choices.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
@@ -35,20 +36,19 @@
#include <asm/timer.h>
#include <asm/smp.h>
#include <asm/starfire.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/cache.h>
#include <asm/cpudata.h>
#include <asm/auxio.h>
#include <asm/head.h>
#include <asm/hypervisor.h>
#include <asm/cacheflush.h>
+#include <asm/softirq_stack.h>
#include "entry.h"
#include "cpumap.h"
#include "kstack.h"
-#define NUM_IVECS (IMAP_INR + 1)
-
struct ino_bucket *ivector_table;
unsigned long ivector_table_pa;
@@ -107,55 +107,194 @@ static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
-static struct {
- unsigned int dev_handle;
- unsigned int dev_ino;
- unsigned int in_use;
-} irq_table[NR_IRQS];
-static DEFINE_SPINLOCK(irq_alloc_lock);
+static unsigned long hvirq_major __initdata;
+static int __init early_hvirq_major(char *p)
+{
+ int rc = kstrtoul(p, 10, &hvirq_major);
+
+ return rc;
+}
+early_param("hvirq", early_hvirq_major);
+
+static int hv_irq_version;
+
+/* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie
+ * based interfaces, but:
+ *
+ * 1) Several OSs, Solaris and Linux included, use them even when only
+ * negotiating version 1.0 (or failing to negotiate at all). So the
+ * hypervisor has a workaround that provides the VIRQ interfaces even
+ * when only verion 1.0 of the API is in use.
+ *
+ * 2) Second, and more importantly, with major version 2.0 these VIRQ
+ * interfaces only were actually hooked up for LDC interrupts, even
+ * though the Hypervisor specification clearly stated:
+ *
+ * The new interrupt API functions will be available to a guest
+ * when it negotiates version 2.0 in the interrupt API group 0x2. When
+ * a guest negotiates version 2.0, all interrupt sources will only
+ * support using the cookie interface, and any attempt to use the
+ * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the
+ * ENOTSUPPORTED error being returned.
+ *
+ * with an emphasis on "all interrupt sources".
+ *
+ * To correct this, major version 3.0 was created which does actually
+ * support VIRQs for all interrupt sources (not just LDC devices). So
+ * if we want to move completely over the cookie based VIRQs we must
+ * negotiate major version 3.0 or later of HV_GRP_INTR.
+ */
+static bool sun4v_cookie_only_virqs(void)
+{
+ return hv_irq_version >= 3;
+}
-unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
+static void __init irq_init_hv(void)
{
- unsigned long flags;
- unsigned char ent;
+ unsigned long hv_error, major, minor = 0;
- BUILD_BUG_ON(NR_IRQS >= 256);
+ if (tlb_type != hypervisor)
+ return;
+
+ if (hvirq_major)
+ major = hvirq_major;
+ else
+ major = 3;
+
+ hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor);
+ if (!hv_error)
+ hv_irq_version = major;
+ else
+ hv_irq_version = 1;
+
+ pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n",
+ hv_irq_version,
+ str_enabled_disabled(sun4v_cookie_only_virqs()));
+}
+
+/* This function is for the timer interrupt.*/
+int __init arch_probe_nr_irqs(void)
+{
+ return 1;
+}
- spin_lock_irqsave(&irq_alloc_lock, flags);
+#define DEFAULT_NUM_IVECS (0xfffU)
+static unsigned int nr_ivec = DEFAULT_NUM_IVECS;
+#define NUM_IVECS (nr_ivec)
- for (ent = 1; ent < NR_IRQS; ent++) {
- if (!irq_table[ent].in_use)
+static unsigned int __init size_nr_ivec(void)
+{
+ if (tlb_type == hypervisor) {
+ switch (sun4v_chip_type) {
+ /* Athena's devhandle|devino is large.*/
+ case SUN4V_CHIP_SPARC64X:
+ nr_ivec = 0xffff;
break;
+ }
}
- if (ent >= NR_IRQS) {
- printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
- ent = 0;
- } else {
- irq_table[ent].dev_handle = dev_handle;
- irq_table[ent].dev_ino = dev_ino;
- irq_table[ent].in_use = 1;
- }
+ return nr_ivec;
+}
- spin_unlock_irqrestore(&irq_alloc_lock, flags);
+struct irq_handler_data {
+ union {
+ struct {
+ unsigned int dev_handle;
+ unsigned int dev_ino;
+ };
+ unsigned long sysino;
+ };
+ struct ino_bucket bucket;
+ unsigned long iclr;
+ unsigned long imap;
+};
+
+static inline unsigned int irq_data_to_handle(struct irq_data *data)
+{
+ struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
+
+ return ihd->dev_handle;
+}
+
+static inline unsigned int irq_data_to_ino(struct irq_data *data)
+{
+ struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
+
+ return ihd->dev_ino;
+}
+
+static inline unsigned long irq_data_to_sysino(struct irq_data *data)
+{
+ struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
- return ent;
+ return ihd->sysino;
}
-#ifdef CONFIG_PCI_MSI
void irq_free(unsigned int irq)
{
- unsigned long flags;
+ void *data = irq_get_handler_data(irq);
- if (irq >= NR_IRQS)
- return;
+ kfree(data);
+ irq_set_handler_data(irq, NULL);
+ irq_free_descs(irq, 1);
+}
- spin_lock_irqsave(&irq_alloc_lock, flags);
+unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
+{
+ int irq;
- irq_table[irq].in_use = 0;
+ irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL, NULL);
+ if (irq <= 0)
+ goto out;
- spin_unlock_irqrestore(&irq_alloc_lock, flags);
+ return irq;
+out:
+ return 0;
+}
+
+static unsigned int cookie_exists(u32 devhandle, unsigned int devino)
+{
+ unsigned long hv_err, cookie;
+ struct ino_bucket *bucket;
+ unsigned int irq = 0U;
+
+ hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie);
+ if (hv_err) {
+ pr_err("HV get cookie failed hv_err = %ld\n", hv_err);
+ goto out;
+ }
+
+ if (cookie & ((1UL << 63UL))) {
+ cookie = ~cookie;
+ bucket = (struct ino_bucket *) __va(cookie);
+ irq = bucket->__irq;
+ }
+out:
+ return irq;
+}
+
+static unsigned int sysino_exists(u32 devhandle, unsigned int devino)
+{
+ unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
+ struct ino_bucket *bucket;
+ unsigned int irq;
+
+ bucket = &ivector_table[sysino];
+ irq = bucket_get_irq(__pa(bucket));
+
+ return irq;
+}
+
+void ack_bad_irq(unsigned int irq)
+{
+ pr_crit("BAD IRQ ack %d\n", irq);
+}
+
+void irq_install_pre_handler(int irq,
+ void (*func)(unsigned int, void *, void *),
+ void *arg1, void *arg2)
+{
+ pr_warn("IRQ pre handler NOT supported.\n");
}
-#endif
/*
* /proc/interrupts printing:
@@ -164,9 +303,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
- seq_printf(p, "NMI: ");
+ seq_printf(p, "NMI:");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
+ seq_put_decimal_ull_width(p, " ", cpu_data(j).__nmi_count, 10);
seq_printf(p, " Non-maskable interrupts\n");
return 0;
}
@@ -206,29 +345,16 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
return tid;
}
-struct irq_handler_data {
- unsigned long iclr;
- unsigned long imap;
-
- void (*pre_handler)(unsigned int, void *, void *);
- void *arg1;
- void *arg2;
-};
-
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
{
- cpumask_t mask;
int cpuid;
- cpumask_copy(&mask, affinity);
- if (cpumask_equal(&mask, cpu_online_mask)) {
+ if (cpumask_equal(affinity, cpu_online_mask)) {
cpuid = map_to_cpu(irq);
} else {
- cpumask_t tmp;
-
- cpumask_and(&tmp, cpu_online_mask, &mask);
- cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
+ cpuid = cpumask_first_and(affinity, cpu_online_mask);
+ cpuid = cpuid < nr_cpu_ids ? cpuid : map_to_cpu(irq);
}
return cpuid;
@@ -240,13 +366,15 @@ static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
static void sun4u_irq_enable(struct irq_data *data)
{
- struct irq_handler_data *handler_data = data->handler_data;
+ struct irq_handler_data *handler_data;
+ handler_data = irq_data_get_irq_handler_data(data);
if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
- cpuid = irq_choose_cpu(data->irq, data->affinity);
+ cpuid = irq_choose_cpu(data->irq,
+ irq_data_get_affinity_mask(data));
imap = handler_data->imap;
tid = sun4u_compute_tid(imap, cpuid);
@@ -263,8 +391,9 @@ static void sun4u_irq_enable(struct irq_data *data)
static int sun4u_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
- struct irq_handler_data *handler_data = data->handler_data;
+ struct irq_handler_data *handler_data;
+ handler_data = irq_data_get_irq_handler_data(data);
if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
@@ -308,16 +437,18 @@ static void sun4u_irq_disable(struct irq_data *data)
static void sun4u_irq_eoi(struct irq_data *data)
{
- struct irq_handler_data *handler_data = data->handler_data;
+ struct irq_handler_data *handler_data;
+ handler_data = irq_data_get_irq_handler_data(data);
if (likely(handler_data))
upa_writeq(ICLR_IDLE, handler_data->iclr);
}
static void sun4v_irq_enable(struct irq_data *data)
{
- unsigned int ino = irq_table[data->irq].dev_ino;
- unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
+ unsigned long cpuid = irq_choose_cpu(data->irq,
+ irq_data_get_affinity_mask(data));
+ unsigned int ino = irq_data_to_sysino(data);
int err;
err = sun4v_intr_settarget(ino, cpuid);
@@ -337,8 +468,8 @@ static void sun4v_irq_enable(struct irq_data *data)
static int sun4v_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
- unsigned int ino = irq_table[data->irq].dev_ino;
unsigned long cpuid = irq_choose_cpu(data->irq, mask);
+ unsigned int ino = irq_data_to_sysino(data);
int err;
err = sun4v_intr_settarget(ino, cpuid);
@@ -351,7 +482,7 @@ static int sun4v_set_affinity(struct irq_data *data,
static void sun4v_irq_disable(struct irq_data *data)
{
- unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned int ino = irq_data_to_sysino(data);
int err;
err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
@@ -362,7 +493,7 @@ static void sun4v_irq_disable(struct irq_data *data)
static void sun4v_irq_eoi(struct irq_data *data)
{
- unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned int ino = irq_data_to_sysino(data);
int err;
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
@@ -373,13 +504,12 @@ static void sun4v_irq_eoi(struct irq_data *data)
static void sun4v_virq_enable(struct irq_data *data)
{
- unsigned long cpuid, dev_handle, dev_ino;
+ unsigned long dev_handle = irq_data_to_handle(data);
+ unsigned long dev_ino = irq_data_to_ino(data);
+ unsigned long cpuid;
int err;
- cpuid = irq_choose_cpu(data->irq, data->affinity);
-
- dev_handle = irq_table[data->irq].dev_handle;
- dev_ino = irq_table[data->irq].dev_ino;
+ cpuid = irq_choose_cpu(data->irq, irq_data_get_affinity_mask(data));
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
@@ -403,14 +533,13 @@ static void sun4v_virq_enable(struct irq_data *data)
static int sun4v_virt_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
- unsigned long cpuid, dev_handle, dev_ino;
+ unsigned long dev_handle = irq_data_to_handle(data);
+ unsigned long dev_ino = irq_data_to_ino(data);
+ unsigned long cpuid;
int err;
cpuid = irq_choose_cpu(data->irq, mask);
- dev_handle = irq_table[data->irq].dev_handle;
- dev_ino = irq_table[data->irq].dev_ino;
-
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
@@ -422,11 +551,10 @@ static int sun4v_virt_set_affinity(struct irq_data *data,
static void sun4v_virq_disable(struct irq_data *data)
{
- unsigned long dev_handle, dev_ino;
+ unsigned long dev_handle = irq_data_to_handle(data);
+ unsigned long dev_ino = irq_data_to_ino(data);
int err;
- dev_handle = irq_table[data->irq].dev_handle;
- dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_DISABLED);
@@ -438,12 +566,10 @@ static void sun4v_virq_disable(struct irq_data *data)
static void sun4v_virq_eoi(struct irq_data *data)
{
- unsigned long dev_handle, dev_ino;
+ unsigned long dev_handle = irq_data_to_handle(data);
+ unsigned long dev_ino = irq_data_to_ino(data);
int err;
- dev_handle = irq_table[data->irq].dev_handle;
- dev_ino = irq_table[data->irq].dev_ino;
-
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
if (err != HV_EOK)
@@ -479,31 +605,10 @@ static struct irq_chip sun4v_virq = {
.flags = IRQCHIP_EOI_IF_HANDLED,
};
-static void pre_flow_handler(struct irq_data *d)
-{
- struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
- unsigned int ino = irq_table[d->irq].dev_ino;
-
- handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
-}
-
-void irq_install_pre_handler(int irq,
- void (*func)(unsigned int, void *, void *),
- void *arg1, void *arg2)
-{
- struct irq_handler_data *handler_data = irq_get_handler_data(irq);
-
- handler_data->pre_handler = func;
- handler_data->arg1 = arg1;
- handler_data->arg2 = arg2;
-
- __irq_set_preflow_handler(irq, pre_flow_handler);
-}
-
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
- struct ino_bucket *bucket;
struct irq_handler_data *handler_data;
+ struct ino_bucket *bucket;
unsigned int irq;
int ino;
@@ -537,119 +642,166 @@ out:
return irq;
}
-static unsigned int sun4v_build_common(unsigned long sysino,
- struct irq_chip *chip)
+static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino,
+ void (*handler_data_init)(struct irq_handler_data *data,
+ u32 devhandle, unsigned int devino),
+ struct irq_chip *chip)
{
- struct ino_bucket *bucket;
- struct irq_handler_data *handler_data;
+ struct irq_handler_data *data;
unsigned int irq;
- BUG_ON(tlb_type != hypervisor);
+ irq = irq_alloc(devhandle, devino);
+ if (!irq)
+ goto out;
- bucket = &ivector_table[sysino];
- irq = bucket_get_irq(__pa(bucket));
- if (!irq) {
- irq = irq_alloc(0, sysino);
- bucket_set_irq(__pa(bucket), irq);
- irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
- "IVEC");
+ data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+ if (unlikely(!data)) {
+ pr_err("IRQ handler data allocation failed.\n");
+ irq_free(irq);
+ irq = 0;
+ goto out;
}
- handler_data = irq_get_handler_data(irq);
- if (unlikely(handler_data))
- goto out;
+ irq_set_handler_data(irq, data);
+ handler_data_init(data, devhandle, devino);
+ irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC");
+ data->imap = ~0UL;
+ data->iclr = ~0UL;
+out:
+ return irq;
+}
- handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!handler_data)) {
- prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
- prom_halt();
- }
- irq_set_handler_data(irq, handler_data);
+static unsigned long cookie_assign(unsigned int irq, u32 devhandle,
+ unsigned int devino)
+{
+ struct irq_handler_data *ihd = irq_get_handler_data(irq);
+ unsigned long hv_error, cookie;
- /* Catch accidental accesses to these things. IMAP/ICLR handling
- * is done by hypervisor calls on sun4v platforms, not by direct
- * register accesses.
+ /* handler_irq needs to find the irq. cookie is seen signed in
+ * sun4v_dev_mondo and treated as a non ivector_table delivery.
*/
- handler_data->imap = ~0UL;
- handler_data->iclr = ~0UL;
+ ihd->bucket.__irq = irq;
+ cookie = ~__pa(&ihd->bucket);
-out:
- return irq;
+ hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie);
+ if (hv_error)
+ pr_err("HV vintr set cookie failed = %ld\n", hv_error);
+
+ return hv_error;
}
-unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
+static void cookie_handler_data(struct irq_handler_data *data,
+ u32 devhandle, unsigned int devino)
{
- unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
+ data->dev_handle = devhandle;
+ data->dev_ino = devino;
+}
+
+static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino,
+ struct irq_chip *chip)
+{
+ unsigned long hv_error;
+ unsigned int irq;
+
+ irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip);
+
+ hv_error = cookie_assign(irq, devhandle, devino);
+ if (hv_error) {
+ irq_free(irq);
+ irq = 0;
+ }
- return sun4v_build_common(sysino, &sun4v_irq);
+ return irq;
}
-unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
+static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino)
{
- struct irq_handler_data *handler_data;
- unsigned long hv_err, cookie;
- struct ino_bucket *bucket;
unsigned int irq;
- bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
- if (unlikely(!bucket))
- return 0;
+ irq = cookie_exists(devhandle, devino);
+ if (irq)
+ goto out;
- /* The only reference we store to the IRQ bucket is
- * by physical address which kmemleak can't see, tell
- * it that this object explicitly is not a leak and
- * should be scanned.
- */
- kmemleak_not_leak(bucket);
+ irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
- __flush_dcache_range((unsigned long) bucket,
- ((unsigned long) bucket +
- sizeof(struct ino_bucket)));
+out:
+ return irq;
+}
- irq = irq_alloc(devhandle, devino);
+static void sysino_set_bucket(unsigned int irq)
+{
+ struct irq_handler_data *ihd = irq_get_handler_data(irq);
+ struct ino_bucket *bucket;
+ unsigned long sysino;
+
+ sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino);
+ BUG_ON(sysino >= nr_ivec);
+ bucket = &ivector_table[sysino];
bucket_set_irq(__pa(bucket), irq);
+}
- irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
- "IVEC");
+static void sysino_handler_data(struct irq_handler_data *data,
+ u32 devhandle, unsigned int devino)
+{
+ unsigned long sysino;
- handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!handler_data))
- return 0;
+ sysino = sun4v_devino_to_sysino(devhandle, devino);
+ data->sysino = sysino;
+}
- /* In order to make the LDC channel startup sequence easier,
- * especially wrt. locking, we do not let request_irq() enable
- * the interrupt.
- */
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
- irq_set_handler_data(irq, handler_data);
+static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino,
+ struct irq_chip *chip)
+{
+ unsigned int irq;
- /* Catch accidental accesses to these things. IMAP/ICLR handling
- * is done by hypervisor calls on sun4v platforms, not by direct
- * register accesses.
- */
- handler_data->imap = ~0UL;
- handler_data->iclr = ~0UL;
+ irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip);
+ if (!irq)
+ goto out;
- cookie = ~__pa(bucket);
- hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
- if (hv_err) {
- prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
- "err=%lu\n", devhandle, devino, hv_err);
- prom_halt();
- }
+ sysino_set_bucket(irq);
+out:
+ return irq;
+}
+
+static int sun4v_build_sysino(u32 devhandle, unsigned int devino)
+{
+ int irq;
+ irq = sysino_exists(devhandle, devino);
+ if (irq)
+ goto out;
+
+ irq = sysino_build_irq(devhandle, devino, &sun4v_irq);
+out:
return irq;
}
-void ack_bad_irq(unsigned int irq)
+unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
{
- unsigned int ino = irq_table[irq].dev_ino;
+ unsigned int irq;
- if (!ino)
- ino = 0xdeadbeef;
+ if (sun4v_cookie_only_virqs())
+ irq = sun4v_build_cookie(devhandle, devino);
+ else
+ irq = sun4v_build_sysino(devhandle, devino);
- printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
- ino, irq);
+ return irq;
+}
+
+unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
+{
+ int irq;
+
+ irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
+ if (!irq)
+ goto out;
+
+ /* This is borrowed from the original function.
+ */
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+out:
+ return irq;
}
void *hardirq_stack[NR_CPUS];
@@ -698,31 +850,22 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
set_irq_regs(old_regs);
}
-void do_softirq(void)
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
+void do_softirq_own_stack(void)
{
- unsigned long flags;
-
- if (in_interrupt())
- return;
+ void *orig_sp, *sp = softirq_stack[smp_processor_id()];
- local_irq_save(flags);
+ sp += THREAD_SIZE - 192 - STACK_BIAS;
- if (local_softirq_pending()) {
- void *orig_sp, *sp = softirq_stack[smp_processor_id()];
-
- sp += THREAD_SIZE - 192 - STACK_BIAS;
-
- __asm__ __volatile__("mov %%sp, %0\n\t"
- "mov %1, %%sp"
- : "=&r" (orig_sp)
- : "r" (sp));
- __do_softirq();
- __asm__ __volatile__("mov %0, %%sp"
- : : "r" (orig_sp));
- }
-
- local_irq_restore(flags);
+ __asm__ __volatile__("mov %%sp, %0\n\t"
+ "mov %1, %%sp"
+ : "=&r" (orig_sp)
+ : "r" (sp));
+ __do_softirq();
+ __asm__ __volatile__("mov %0, %%sp"
+ : : "r" (orig_sp));
}
+#endif
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
@@ -731,15 +874,18 @@ void fixup_irqs(void)
for (irq = 0; irq < NR_IRQS; irq++) {
struct irq_desc *desc = irq_to_desc(irq);
- struct irq_data *data = irq_desc_get_irq_data(desc);
+ struct irq_data *data;
unsigned long flags;
+ if (!desc)
+ continue;
+ data = irq_desc_get_irq_data(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action && !irqd_is_per_cpu(data)) {
if (data->chip->irq_set_affinity)
data->chip->irq_set_affinity(data,
- data->affinity,
- false);
+ irq_data_get_affinity_mask(data),
+ false);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -767,7 +913,7 @@ static void map_prom_timers(void)
dp = of_find_node_by_path("/");
dp = dp->child;
while (dp) {
- if (!strcmp(dp->name, "counter-timer"))
+ if (of_node_name_eq(dp, "counter-timer"))
break;
dp = dp->sibling;
}
@@ -829,13 +975,14 @@ void notrace init_irqwork_curcpu(void)
*
* On SMP this gets invoked from the CPU trampoline before
* the cpu has fully taken over the trap table from OBP,
- * and it's kernel stack + %g6 thread register state is
+ * and its kernel stack + %g6 thread register state is
* not fully cooked yet.
*
* Therefore you cannot make any OBP calls, not even prom_printf,
* from these two routines.
*/
-static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
+static void notrace register_one_mondo(unsigned long paddr, unsigned long type,
+ unsigned long qmask)
{
unsigned long num_entries = (qmask + 1) / 64;
unsigned long status;
@@ -848,7 +995,7 @@ static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned l
}
}
-void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
+void notrace sun4v_register_mondo_queues(int this_cpu)
{
struct trap_per_cpu *tb = &trap_block[this_cpu];
@@ -872,7 +1019,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
unsigned long order = get_order(size);
unsigned long p;
- p = __get_free_pages(GFP_KERNEL, order);
+ p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate queue.\n");
prom_halt();
@@ -885,17 +1032,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
{
#ifdef CONFIG_SMP
unsigned long page;
+ void *mondo, *p;
- BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
+ BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
+
+ /* Make sure mondo block is 64byte aligned */
+ p = kzalloc(127, GFP_KERNEL);
+ if (!p) {
+ prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
+ prom_halt();
+ }
+ mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
+ tb->cpu_mondo_block_pa = __pa(mondo);
page = get_zeroed_page(GFP_KERNEL);
if (!page) {
- prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
+ prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
prom_halt();
}
- tb->cpu_mondo_block_pa = __pa(page);
- tb->cpu_list_pa = __pa(page + 64);
+ tb->cpu_list_pa = __pa(page);
#endif
}
@@ -932,16 +1088,22 @@ static struct irqaction timer_irq_action = {
.name = "timer",
};
-/* Only invoked on boot processor. */
-void __init init_IRQ(void)
+static void __init irq_ivector_init(void)
{
- unsigned long size;
+ unsigned long size, order;
+ unsigned int ivecs;
- map_prom_timers();
- kill_prom_timer();
+ /* If we are doing cookie only VIRQs then we do not need the ivector
+ * table to process interrupts.
+ */
+ if (sun4v_cookie_only_virqs())
+ return;
- size = sizeof(struct ino_bucket) * NUM_IVECS;
- ivector_table = kzalloc(size, GFP_KERNEL);
+ ivecs = size_nr_ivec();
+ size = sizeof(struct ino_bucket) * ivecs;
+ order = get_order(size);
+ ivector_table = (struct ino_bucket *)
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!ivector_table) {
prom_printf("Fatal error, cannot allocate ivector_table\n");
prom_halt();
@@ -950,6 +1112,15 @@ void __init init_IRQ(void)
((unsigned long) ivector_table) + size);
ivector_table_pa = __pa(ivector_table);
+}
+
+/* Only invoked on boot processor.*/
+void __init init_IRQ(void)
+{
+ irq_init_hv();
+ irq_ivector_init();
+ map_prom_timers();
+ kill_prom_timer();
if (tlb_type == hypervisor)
sun4v_init_mondo_queues();
diff --git a/arch/sparc/kernel/itlb_miss.S b/arch/sparc/kernel/itlb_miss.S
index 5a8377b54955..5a5d92482e8d 100644
--- a/arch/sparc/kernel/itlb_miss.S
+++ b/arch/sparc/kernel/itlb_miss.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* ITLB ** ICACHE line 1: Context 0 check and TSB load */
ldxa [%g0] ASI_IMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer
ldxa [%g0] ASI_IMMU, %g6 ! Get TAG TARGET
diff --git a/arch/sparc/kernel/ivec.S b/arch/sparc/kernel/ivec.S
index d29f92ebca5e..94ba2c3a29c1 100644
--- a/arch/sparc/kernel/ivec.S
+++ b/arch/sparc/kernel/ivec.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* The registers for cross calls will be:
*
* DATA 0: [low 32-bits] Address of function to call, jmp to this
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 48565c11e82a..a4cfaeecaf5e 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
@@ -8,34 +9,39 @@
#include <asm/cacheflush.h>
-#ifdef HAVE_JUMP_LABEL
-
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- u32 val;
u32 *insn = (u32 *) (unsigned long) entry->code;
+ u32 val;
- if (type == JUMP_LABEL_ENABLE) {
+ if (type == JUMP_LABEL_JMP) {
s32 off = (s32)entry->target - (s32)entry->code;
+ bool use_v9_branch = false;
+
+ BUG_ON(off & 3);
#ifdef CONFIG_SPARC64
- /* ba,pt %xcc, . + (off << 2) */
- val = 0x10680000 | ((u32) off >> 2);
-#else
- /* ba . + (off << 2) */
- val = 0x10800000 | ((u32) off >> 2);
+ if (off <= 0xfffff && off >= -0x100000)
+ use_v9_branch = true;
#endif
+ if (use_v9_branch) {
+ /* WDISP19 - target is . + immed << 2 */
+ /* ba,pt %xcc, . + off */
+ val = 0x10680000 | (((u32) off >> 2) & 0x7ffff);
+ } else {
+ /* WDISP22 - target is . + immed << 2 */
+ BUG_ON(off > 0x7fffff);
+ BUG_ON(off < -0x800000);
+ /* ba . + off */
+ val = 0x10800000 | (((u32) off >> 2) & 0x3fffff);
+ }
} else {
val = 0x01000000;
}
- get_online_cpus();
mutex_lock(&text_mutex);
*insn = val;
flushi(insn);
mutex_unlock(&text_mutex);
- put_online_cpus();
}
-
-#endif
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index a702d9ab019c..8328a3b78a44 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SPARC_KERNEL_H
#define __SPARC_KERNEL_H
#include <linux/interrupt.h>
+#include <linux/ftrace.h>
#include <asm/traps.h>
#include <asm/head.h>
@@ -12,74 +14,138 @@ extern const char *sparc_pmu_type;
extern unsigned int fsr_storage;
extern int ncpus_probed;
+/* process{_32,_64}.c */
+asmlinkage long sparc_clone(struct pt_regs *regs);
+asmlinkage long sparc_fork(struct pt_regs *regs);
+asmlinkage long sparc_vfork(struct pt_regs *regs);
+
#ifdef CONFIG_SPARC64
/* setup_64.c */
struct seq_file;
-extern void cpucap_info(struct seq_file *);
+void cpucap_info(struct seq_file *);
-static inline unsigned long kimage_addr_to_ra(const char *p)
+static inline unsigned long kimage_addr_to_ra(const void *p)
{
unsigned long val = (unsigned long) p;
return kern_base + (val - KERNBASE);
}
+
+/* sys_sparc_64.c */
+asmlinkage long sys_kern_features(void);
+
+/* unaligned_64.c */
+asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
+int handle_popc(u32 insn, struct pt_regs *regs);
+void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr);
+void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr);
+
+/* uprobes.c */
+asmlinkage void uprobe_trap(struct pt_regs *regs,
+ unsigned long trap_level);
+
+/* smp_64.c */
+void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
+void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
+void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
+void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
+
+/* kgdb_64.c */
+void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs);
+
+/* pci.c */
+#ifdef CONFIG_PCI
+int ali_sound_dma_hack(struct device *dev, u64 device_mask);
+#else
+#define ali_sound_dma_hack(dev, mask) (0)
+#endif
+
+/* signal32.c */
+void do_sigreturn32(struct pt_regs *regs);
+asmlinkage void do_rt_sigreturn32(struct pt_regs *regs);
+void do_signal32(struct pt_regs * regs);
+asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp);
+
+/* time_64.c */
+void __init time_init_early(void);
+
+/* compat_audit.c */
+extern unsigned int sparc32_dir_class[];
+extern unsigned int sparc32_chattr_class[];
+extern unsigned int sparc32_write_class[];
+extern unsigned int sparc32_read_class[];
+extern unsigned int sparc32_signal_class[];
+int sparc32_classify_syscall(unsigned int syscall);
#endif
#ifdef CONFIG_SPARC32
/* setup_32.c */
+struct linux_romvec;
void sparc32_start_kernel(struct linux_romvec *rp);
/* cpu.c */
-extern void cpu_probe(void);
+void cpu_probe(void);
/* traps_32.c */
-extern void handle_hw_divzero(struct pt_regs *regs, unsigned long pc,
- unsigned long npc, unsigned long psr);
+void handle_hw_divzero(struct pt_regs *regs, unsigned long pc,
+ unsigned long npc, unsigned long psr);
/* irq_32.c */
extern struct irqaction static_irqaction[];
extern int static_irq_count;
extern spinlock_t irq_action_lock;
-extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
-extern void init_IRQ(void);
+void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
/* sun4m_irq.c */
-extern void sun4m_init_IRQ(void);
-extern void sun4m_unmask_profile_irq(void);
-extern void sun4m_clear_profile_irq(int cpu);
+void sun4m_init_IRQ(void);
+void sun4m_unmask_profile_irq(void);
+void sun4m_clear_profile_irq(int cpu);
/* sun4m_smp.c */
void sun4m_cpu_pre_starting(void *arg);
void sun4m_cpu_pre_online(void *arg);
+void __init smp4m_boot_cpus(void);
+int smp4m_boot_one_cpu(int i, struct task_struct *idle);
+void __init smp4m_smp_done(void);
+void smp4m_cross_call_irq(void);
+void smp4m_percpu_timer_interrupt(struct pt_regs *regs);
/* sun4d_irq.c */
extern spinlock_t sun4d_imsk_lock;
-extern void sun4d_init_IRQ(void);
-extern int sun4d_request_irq(unsigned int irq,
- irq_handler_t handler,
- unsigned long irqflags,
- const char *devname, void *dev_id);
-extern int show_sun4d_interrupts(struct seq_file *, void *);
-extern void sun4d_distribute_irqs(void);
-extern void sun4d_free_irq(unsigned int irq, void *dev_id);
+void sun4d_init_IRQ(void);
+int sun4d_request_irq(unsigned int irq,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname, void *dev_id);
+int show_sun4d_interrupts(struct seq_file *, void *);
+void sun4d_distribute_irqs(void);
+void sun4d_free_irq(unsigned int irq, void *dev_id);
/* sun4d_smp.c */
void sun4d_cpu_pre_starting(void *arg);
void sun4d_cpu_pre_online(void *arg);
+void __init smp4d_boot_cpus(void);
+int smp4d_boot_one_cpu(int i, struct task_struct *idle);
+void __init smp4d_smp_done(void);
+void smp4d_cross_call_irq(void);
+void smp4d_percpu_timer_interrupt(struct pt_regs *regs);
/* leon_smp.c */
void leon_cpu_pre_starting(void *arg);
void leon_cpu_pre_online(void *arg);
+void leonsmp_ipi_interrupt(void);
+void leon_cross_call_irq(void);
/* head_32.S */
extern unsigned int t_nmi[];
extern unsigned int linux_trap_ipi15_sun4d[];
extern unsigned int linux_trap_ipi15_sun4m[];
-extern struct tt_entry trapbase_cpu1;
-extern struct tt_entry trapbase_cpu2;
-extern struct tt_entry trapbase_cpu3;
+extern struct tt_entry trapbase[];
+extern struct tt_entry trapbase_cpu1[];
+extern struct tt_entry trapbase_cpu2[];
+extern struct tt_entry trapbase_cpu3[];
extern char cputypval[];
@@ -89,12 +155,42 @@ extern unsigned int real_irq_entry[];
extern unsigned int smp4d_ticker[];
extern unsigned int patchme_maybe_smp_msg[];
-extern void floppy_hardint(void);
+void floppy_hardint(void);
/* trampoline_32.S */
extern unsigned long sun4m_cpu_startup;
extern unsigned long sun4d_cpu_startup;
+/* signal_32.c */
+asmlinkage void do_sigreturn(struct pt_regs *regs);
+asmlinkage void do_rt_sigreturn(struct pt_regs *regs);
+void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
+ unsigned long thread_info_flags);
+asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr,
+ struct sigstack __user *ossptr,
+ unsigned long sp);
+
+/* ptrace_32.c */
+asmlinkage int syscall_trace(struct pt_regs *regs, int syscall_exit_p);
+
+/* unaligned_32.c */
+asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
+asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn);
+
+/* windows.c */
+void try_to_clear_window_buffer(struct pt_regs *regs, int who);
+
+/* auxio_32.c */
+void __init auxio_probe(void);
+void __init auxio_power_probe(void);
+
+/* pcic.c */
+extern void __iomem *pcic_regs;
+void pcic_nmi(unsigned int pend, struct pt_regs *regs);
+
+/* time_32.c */
+void __init time_init(void);
+
#else /* CONFIG_SPARC32 */
#endif /* CONFIG_SPARC32 */
#endif /* !(__SPARC_KERNEL_H) */
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
index dcf210811af4..3b2c673ec627 100644
--- a/arch/sparc/kernel/kgdb_32.c
+++ b/arch/sparc/kernel/kgdb_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* kgdb.c: KGDB support for 32-bit sparc.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
@@ -12,7 +13,8 @@
#include <asm/irq.h>
#include <asm/cacheflush.h>
-extern unsigned long trapbase;
+#include "kernel.h"
+#include "entry.h"
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
@@ -35,7 +37,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
gdb_regs[GDB_Y] = regs->y;
gdb_regs[GDB_PSR] = regs->psr;
gdb_regs[GDB_WIM] = 0;
- gdb_regs[GDB_TBR] = (unsigned long) &trapbase;
+ gdb_regs[GDB_TBR] = (unsigned long) &trapbase[0];
gdb_regs[GDB_PC] = regs->pc;
gdb_regs[GDB_NPC] = regs->npc;
gdb_regs[GDB_FSR] = 0;
@@ -70,7 +72,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
gdb_regs[GDB_PSR] = t->kpsr;
gdb_regs[GDB_WIM] = t->kwim;
- gdb_regs[GDB_TBR] = (unsigned long) &trapbase;
+ gdb_regs[GDB_TBR] = (unsigned long) &trapbase[0];
gdb_regs[GDB_PC] = t->kpc;
gdb_regs[GDB_NPC] = t->kpc + 4;
gdb_regs[GDB_FSR] = 0;
@@ -120,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->pc = addr;
linux_regs->npc = addr + 4;
}
- /* fallthru */
+ fallthrough;
case 'D':
case 'k':
@@ -133,21 +135,19 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
return -1;
}
-extern void do_hw_interrupt(struct pt_regs *regs, unsigned long type);
-
-asmlinkage void kgdb_trap(struct pt_regs *regs)
+asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs)
{
unsigned long flags;
if (user_mode(regs)) {
- do_hw_interrupt(regs, 0xfd);
+ do_hw_interrupt(regs, trap_level);
return;
}
flushw_all();
local_irq_save(flags);
- kgdb_handle_exception(0x172, SIGTRAP, 0, regs);
+ kgdb_handle_exception(trap_level, SIGTRAP, 0, regs);
local_irq_restore(flags);
}
@@ -166,7 +166,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
regs->npc = regs->pc + 4;
}
-struct kgdb_arch arch_kgdb_ops = {
+const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x7d */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
};
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index c8759550799f..177746ae2c81 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* kgdb.c: KGDB support for 64-bit sparc.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
@@ -6,12 +7,15 @@
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/ftrace.h>
+#include <linux/context_tracking.h>
#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
+#include "kernel.h"
+
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
struct reg_window *win;
@@ -42,7 +46,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
struct thread_info *t = task_thread_info(p);
extern unsigned int switch_to_pc;
- extern unsigned int ret_from_syscall;
+ extern unsigned int ret_from_fork;
struct reg_window *win;
unsigned long pc, cwp;
int i;
@@ -66,7 +70,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
gdb_regs[i] = 0;
if (t->new_child)
- pc = (unsigned long) &ret_from_syscall;
+ pc = (unsigned long) &ret_from_fork;
else
pc = (unsigned long) &switch_to_pc;
@@ -144,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->tpc = addr;
linux_regs->tnpc = addr + 4;
}
- /* fallthru */
+ fallthrough;
case 'D':
case 'k':
@@ -159,11 +163,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
unsigned long flags;
if (user_mode(regs)) {
bad_trap(regs, trap_level);
- return;
+ goto out;
}
flushw_all();
@@ -171,6 +176,8 @@ asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs)
local_irq_save(flags);
kgdb_handle_exception(0x172, SIGTRAP, 0, regs);
local_irq_restore(flags);
+out:
+ exception_exit(prev_state);
}
int kgdb_arch_init(void)
@@ -188,7 +195,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
regs->tnpc = regs->tpc + 4;
}
-struct kgdb_arch arch_kgdb_ops = {
+const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x72 */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
};
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index e72212148d2a..191bbaca9921 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* arch/sparc64/kernel/kprobes.c
*
* Copyright (C) 2004 David S. Miller <davem@davemloft.net>
@@ -5,12 +6,13 @@
#include <linux/kernel.h>
#include <linux/kprobes.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
+#include <linux/context_tracking.h>
#include <asm/signal.h>
#include <asm/cacheflush.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* We do not have hardware single-stepping on sparc64.
* So we implement software single-stepping with breakpoint
@@ -82,7 +84,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc;
kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
@@ -91,7 +93,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = p;
+ __this_cpu_write(current_kprobe, p);
kcb->kprobe_orig_tnpc = regs->tnpc;
kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
}
@@ -145,18 +147,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_REENTER;
prepare_singlestep(p, regs, kcb);
return 1;
- } else {
- if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
+ } else if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
/* The breakpoint instruction was removed by
* another cpu right after we hit, no further
* handling of this interrupt is appropriate
*/
- ret = 1;
- goto no_kprobe;
- }
- p = __get_cpu_var(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs))
- goto ss_probe;
+ ret = 1;
}
goto no_kprobe;
}
@@ -179,10 +175,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
prepare_singlestep(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
@@ -232,7 +230,7 @@ static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p,
return regs->tnpc;
}
-/* If INSN is an instruction which writes it's PC location
+/* If INSN is an instruction which writes its PC location
* into a destination register, fix that up.
*/
static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
@@ -348,23 +346,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accouting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
@@ -418,12 +399,14 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
+
BUG_ON(trap_level != 0x170 && trap_level != 0x171);
if (user_mode(regs)) {
local_irq_enable();
bad_trap(regs, trap_level);
- return;
+ goto out;
}
/* trap_level == 0x170 --> ta 0x70
@@ -433,53 +416,8 @@ asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
(trap_level == 0x170) ? "debug" : "debug_2",
regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
bad_trap(regs, trap_level);
-}
-
-/* Jprobes support. */
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
-
- regs->tpc = (unsigned long) jp->entry;
- regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
- regs->tstate |= TSTATE_PIL;
-
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- register unsigned long orig_fp asm("g1");
-
- orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
- __asm__ __volatile__("\n"
-"1: cmp %%sp, %0\n\t"
- "blu,a,pt %%xcc, 1b\n\t"
- " restore\n\t"
- ".globl jprobe_return_trap_instruction\n"
-"jprobe_return_trap_instruction:\n\t"
- "ta 0x70"
- : /* no outputs */
- : "r" (orig_fp));
-}
-
-extern void jprobe_return_trap_instruction(void);
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- u32 *addr = (u32 *) regs->tpc;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (addr == (u32 *) jprobe_return_trap_instruction) {
- memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
+out:
+ exception_exit(prev_state);
}
/* The value stored in the return address register is actually 2
@@ -498,71 +436,25 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8);
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->u_regs[UREG_RETPC] =
- ((unsigned long)kretprobe_trampoline) - 8;
+ ((unsigned long)__kretprobe_trampoline) - 8;
}
/*
* Called when the probe at kretprobe trampoline is hit
*/
-int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
+ unsigned long orig_ret_address = 0;
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more than one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ orig_ret_address = __kretprobe_trampoline_handler(regs, NULL);
regs->tpc = orig_ret_address;
regs->tnpc = orig_ret_address + 4;
- reset_current_kprobe();
- kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
@@ -571,15 +463,15 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
return 1;
}
-void kretprobe_trampoline_holder(void)
+static void __used kretprobe_trampoline_holder(void)
{
- asm volatile(".global kretprobe_trampoline\n"
- "kretprobe_trampoline:\n"
+ asm volatile(".global __kretprobe_trampoline\n"
+ "__kretprobe_trampoline:\n"
"\tnop\n"
"\tnop\n");
}
static struct kprobe trampoline_p = {
- .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
+ .addr = (kprobe_opcode_t *) &__kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
@@ -590,7 +482,7 @@ int __init arch_init_kprobes(void)
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
- if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
+ if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
return 1;
return 0;
diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h
index 53dfb92e09fb..b3c5e8f2443a 100644
--- a/arch/sparc/kernel/kstack.h
+++ b/arch/sparc/kernel/kstack.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _KSTACK_H
#define _KSTACK_H
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 0746e5e32b37..6bfaf73ce8a0 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
*
* Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
@@ -6,10 +7,10 @@
* Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <linux/pgtable.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tsb.h>
.text
@@ -20,16 +21,19 @@ kvmap_itlb:
mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_IMMU, %g4
+ /* The kernel executes in context zero, therefore we do not
+ * need to clear the context ID bits out of %g4 here.
+ */
+
/* sun4v_itlb_miss branches here with the missing virtual
* address already loaded into %g4
*/
kvmap_itlb_4v:
-kvmap_itlb_nonlinear:
/* Catch kernel NULL pointer calls. */
sethi %hi(PAGE_SIZE), %g5
cmp %g4, %g5
- bleu,pn %xcc, kvmap_dtlb_longpath
+ blu,pn %xcc, kvmap_itlb_longpath
nop
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
@@ -48,14 +52,6 @@ kvmap_itlb_vmalloc_addr:
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
TSB_LOCK_TAG(%g1, %g2, %g7)
-
- /* Load and check PTE. */
- ldxa [%g5] ASI_PHYS_USE_EC, %g5
- mov 1, %g7
- sllx %g7, TSB_TAG_INVALID_BIT, %g7
- brgez,a,pn %g5, kvmap_itlb_longpath
- TSB_STORE(%g1, %g7)
-
TSB_WRITE(%g1, %g5, %g6)
/* fallthrough to TLB load */
@@ -119,6 +115,12 @@ kvmap_dtlb_obp:
ba,pt %xcc, kvmap_dtlb_load
nop
+kvmap_linear_early:
+ sethi %hi(kern_linear_pte_xor), %g7
+ ldx [%g7 + %lo(kern_linear_pte_xor)], %g2
+ ba,pt %xcc, kvmap_dtlb_tsb4m_load
+ xor %g2, %g4, %g5
+
.align 32
kvmap_dtlb_tsb4m_load:
TSB_LOCK_TAG(%g1, %g2, %g7)
@@ -131,6 +133,10 @@ kvmap_dtlb:
mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_DMMU, %g4
+ /* The kernel executes in context zero, therefore we do not
+ * need to clear the context ID bits out of %g4 here.
+ */
+
/* sun4v_dtlb_miss branches here with the missing virtual
* address already loaded into %g4
*/
@@ -147,85 +153,17 @@ kvmap_dtlb_4v:
/* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
#endif
- /* TSB entry address left in %g1, lookup linear PTE.
- * Must preserve %g1 and %g6 (TAG).
- */
-kvmap_dtlb_tsb4m_miss:
- /* Clear the PAGE_OFFSET top virtual bits, shift
- * down to get PFN, and make sure PFN is in range.
- */
- sllx %g4, 21, %g5
-
- /* Check to see if we know about valid memory at the 4MB
- * chunk this physical address will reside within.
- */
- srlx %g5, 21 + 41, %g2
- brnz,pn %g2, kvmap_dtlb_longpath
- nop
-
- /* This unconditional branch and delay-slot nop gets patched
- * by the sethi sequence once the bitmap is properly setup.
+ /* Linear mapping TSB lookup failed. Fallthrough to kernel
+ * page table based lookup.
*/
- .globl valid_addr_bitmap_insn
-valid_addr_bitmap_insn:
- ba,pt %xcc, 2f
- nop
- .subsection 2
- .globl valid_addr_bitmap_patch
-valid_addr_bitmap_patch:
- sethi %hi(sparc64_valid_addr_bitmap), %g7
- or %g7, %lo(sparc64_valid_addr_bitmap), %g7
- .previous
-
- srlx %g5, 21 + 22, %g2
- srlx %g2, 6, %g5
- and %g2, 63, %g2
- sllx %g5, 3, %g5
- ldx [%g7 + %g5], %g5
- mov 1, %g7
- sllx %g7, %g2, %g7
- andcc %g5, %g7, %g0
- be,pn %xcc, kvmap_dtlb_longpath
-
-2: sethi %hi(kpte_linear_bitmap), %g2
-
- /* Get the 256MB physical address index. */
- sllx %g4, 21, %g5
- or %g2, %lo(kpte_linear_bitmap), %g2
- srlx %g5, 21 + 28, %g5
- and %g5, (32 - 1), %g7
-
- /* Divide by 32 to get the offset into the bitmask. */
- srlx %g5, 5, %g5
- add %g7, %g7, %g7
- sllx %g5, 3, %g5
-
- /* kern_linear_pte_xor[(mask >> shift) & 3)] */
- ldx [%g2 + %g5], %g2
- srlx %g2, %g7, %g7
- sethi %hi(kern_linear_pte_xor), %g5
- and %g7, 3, %g7
- or %g5, %lo(kern_linear_pte_xor), %g5
- sllx %g7, 3, %g7
- ldx [%g5 + %g7], %g2
-
.globl kvmap_linear_patch
kvmap_linear_patch:
- ba,pt %xcc, kvmap_dtlb_tsb4m_load
- xor %g2, %g4, %g5
+ ba,a,pt %xcc, kvmap_linear_early
kvmap_dtlb_vmalloc_addr:
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
TSB_LOCK_TAG(%g1, %g2, %g7)
-
- /* Load and check PTE. */
- ldxa [%g5] ASI_PHYS_USE_EC, %g5
- mov 1, %g7
- sllx %g7, TSB_TAG_INVALID_BIT, %g7
- brgez,a,pn %g5, kvmap_dtlb_longpath
- TSB_STORE(%g1, %g7)
-
TSB_WRITE(%g1, %g5, %g6)
/* fallthrough to TLB load */
@@ -257,13 +195,8 @@ kvmap_dtlb_load:
#ifdef CONFIG_SPARSEMEM_VMEMMAP
kvmap_vmemmap:
- sub %g4, %g5, %g5
- srlx %g5, 22, %g5
- sethi %hi(vmemmap_table), %g1
- sllx %g5, 3, %g5
- or %g1, %lo(vmemmap_table), %g1
- ba,pt %xcc, kvmap_dtlb_load
- ldx [%g1 + %g5], %g5
+ KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
+ ba,a,pt %xcc, kvmap_dtlb_load
#endif
kvmap_dtlb_nonlinear:
@@ -275,8 +208,8 @@ kvmap_dtlb_nonlinear:
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* Do not use the TSB for vmemmap. */
- mov (VMEMMAP_BASE >> 40), %g5
- sllx %g5, 40, %g5
+ sethi %hi(VMEMMAP_BASE), %g5
+ ldx [%g5 + %lo(VMEMMAP_BASE)], %g5
cmp %g4,%g5
bgeu,pn %xcc, kvmap_vmemmap
nop
@@ -288,8 +221,8 @@ kvmap_dtlb_tsbmiss:
sethi %hi(MODULES_VADDR), %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_dtlb_longpath
- mov (VMALLOC_END >> 40), %g5
- sllx %g5, 40, %g5
+ sethi %hi(VMALLOC_END), %g5
+ ldx [%g5 + %lo(VMALLOC_END)], %g5
cmp %g4, %g5
bgeu,pn %xcc, kvmap_dtlb_longpath
nop
@@ -327,6 +260,10 @@ kvmap_dtlb_longpath:
nop
.previous
+ /* The kernel executes in context zero, therefore we do not
+ * need to clear the context ID bits out of %g5 here.
+ */
+
be,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_DTLB, %g4
ba,pt %xcc, winfix_trampoline
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 54df554b82d9..7f3cdb6f644d 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* ldc.c: Logical Domain Channel link-layer protocol driver.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
@@ -15,6 +16,7 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/bitmap.h>
+#include <asm/iommu-common.h>
#include <asm/hypervisor.h>
#include <asm/iommu.h>
@@ -27,9 +29,12 @@
#define DRV_MODULE_VERSION "1.1"
#define DRV_MODULE_RELDATE "July 22, 2008"
+#define COOKIE_PGSZ_CODE 0xf000000000000000ULL
+#define COOKIE_PGSZ_CODE_SHIFT 60ULL
+
+
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-#define LDC_PACKET_SIZE 64
/* Packet header layout for unreliable and reliable mode frames.
* When in RAW mode, packets are simply straight 64-byte payloads
@@ -98,10 +103,10 @@ static const struct ldc_mode_ops stream_ops;
int ldom_domaining_enabled;
struct ldc_iommu {
- /* Protects arena alloc/free. */
+ /* Protects ldc_unmap. */
spinlock_t lock;
- struct iommu_arena arena;
struct ldc_mtable_entry *page_table;
+ struct iommu_map_table iommu_map_table;
};
struct ldc_channel {
@@ -173,6 +178,8 @@ do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
} while (0)
+#define LDC_ABORT(lp) ldc_abort((lp), __func__)
+
static const char *state_to_str(u8 state)
{
switch (state) {
@@ -191,15 +198,6 @@ static const char *state_to_str(u8 state)
}
}
-static void ldc_set_state(struct ldc_channel *lp, u8 state)
-{
- ldcdbg(STATE, "STATE (%s) --> (%s)\n",
- state_to_str(lp->state),
- state_to_str(state));
-
- lp->state = state;
-}
-
static unsigned long __advance(unsigned long off, unsigned long num_entries)
{
off += LDC_PACKET_SIZE;
@@ -511,11 +509,12 @@ static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt)
return err;
}
-static int ldc_abort(struct ldc_channel *lp)
+static int ldc_abort(struct ldc_channel *lp, const char *msg)
{
unsigned long hv_err;
- ldcdbg(STATE, "ABORT\n");
+ ldcdbg(STATE, "ABORT[%s]\n", msg);
+ ldc_print(lp);
/* We report but do not act upon the hypervisor errors because
* there really isn't much we can do if they fail at this point.
@@ -600,7 +599,7 @@ static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp)
}
}
if (err)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -613,13 +612,13 @@ static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp)
if (lp->hs_state == LDC_HS_GOTVERS) {
if (lp->ver.major != vp->major ||
lp->ver.minor != vp->minor)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
} else {
lp->ver = *vp;
lp->hs_state = LDC_HS_GOTVERS;
}
if (send_rts(lp))
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -630,17 +629,17 @@ static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
unsigned long new_tail;
if (vp->major == 0 && vp->minor == 0)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
vap = find_by_major(vp->major);
if (!vap)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
vap, sizeof(*vap),
&new_tail);
if (!p)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return send_tx_packet(lp, p, new_tail);
}
@@ -663,7 +662,7 @@ static int process_version(struct ldc_channel *lp,
return process_ver_nack(lp, vp);
default:
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
}
}
@@ -676,13 +675,13 @@ static int process_rts(struct ldc_channel *lp,
if (p->stype != LDC_INFO ||
lp->hs_state != LDC_HS_GOTVERS ||
p->env != lp->cfg.mode)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
lp->snd_nxt = p->seqid;
lp->rcv_nxt = p->seqid;
lp->hs_state = LDC_HS_SENTRTR;
if (send_rtr(lp))
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -695,7 +694,7 @@ static int process_rtr(struct ldc_channel *lp,
if (p->stype != LDC_INFO ||
p->env != lp->cfg.mode)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
lp->snd_nxt = p->seqid;
lp->hs_state = LDC_HS_COMPLETE;
@@ -718,7 +717,7 @@ static int process_rdx(struct ldc_channel *lp,
if (p->stype != LDC_INFO ||
!(rx_seq_ok(lp, p->seqid)))
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
lp->rcv_nxt = p->seqid;
@@ -745,14 +744,14 @@ static int process_control_frame(struct ldc_channel *lp,
return process_rdx(lp, p);
default:
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
}
}
static int process_error_frame(struct ldc_channel *lp,
struct ldc_packet *p)
{
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
}
static int process_data_ack(struct ldc_channel *lp,
@@ -771,7 +770,7 @@ static int process_data_ack(struct ldc_channel *lp,
return 0;
}
if (head == lp->tx_tail)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
}
return 0;
@@ -815,16 +814,21 @@ static irqreturn_t ldc_rx(int irq, void *dev_id)
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
- event_mask |= LDC_EVENT_UP;
-
- orig_state = lp->chan_state;
+ /*
+ * Generate an LDC_EVENT_UP event if the channel
+ * was not already up.
+ */
+ if (orig_state != LDC_CHANNEL_UP) {
+ event_mask |= LDC_EVENT_UP;
+ orig_state = lp->chan_state;
+ }
}
/* If we are in reset state, flush the RX queue and ignore
* everything.
*/
if (lp->flags & LDC_FLAG_RESET) {
- (void) __set_rx_head(lp, lp->rx_tail);
+ (void) ldc_rx_reset(lp);
goto out;
}
@@ -875,7 +879,7 @@ handshake_complete:
break;
default:
- err = ldc_abort(lp);
+ err = LDC_ABORT(lp);
break;
}
@@ -890,7 +894,7 @@ handshake_complete:
err = __set_rx_head(lp, new);
if (err < 0) {
- (void) ldc_abort(lp);
+ (void) LDC_ABORT(lp);
break;
}
if (lp->hs_state == LDC_HS_COMPLETE)
@@ -931,7 +935,14 @@ static irqreturn_t ldc_tx(int irq, void *dev_id)
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
- event_mask |= LDC_EVENT_UP;
+ /*
+ * Generate an LDC_EVENT_UP event if the channel
+ * was not already up.
+ */
+ if (orig_state != LDC_CHANNEL_UP) {
+ event_mask |= LDC_EVENT_UP;
+ orig_state = lp->chan_state;
+ }
}
spin_unlock_irqrestore(&lp->lock, flags);
@@ -998,31 +1009,59 @@ static void free_queue(unsigned long num_entries, struct ldc_packet *q)
free_pages((unsigned long)q, order);
}
+static unsigned long ldc_cookie_to_index(u64 cookie, void *arg)
+{
+ u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
+ /* struct ldc_iommu *ldc_iommu = (struct ldc_iommu *)arg; */
+
+ cookie &= ~COOKIE_PGSZ_CODE;
+
+ return (cookie >> (13ULL + (szcode * 3ULL)));
+}
+
+static void ldc_demap(struct ldc_iommu *iommu, unsigned long id, u64 cookie,
+ unsigned long entry, unsigned long npages)
+{
+ struct ldc_mtable_entry *base;
+ unsigned long i, shift;
+
+ shift = (cookie >> COOKIE_PGSZ_CODE_SHIFT) * 3;
+ base = iommu->page_table + entry;
+ for (i = 0; i < npages; i++) {
+ if (base->cookie)
+ sun4v_ldc_revoke(id, cookie + (i << shift),
+ base->cookie);
+ base->mte = 0;
+ }
+}
+
/* XXX Make this configurable... XXX */
#define LDC_IOTABLE_SIZE (8 * 1024)
-static int ldc_iommu_init(struct ldc_channel *lp)
+static int ldc_iommu_init(const char *name, struct ldc_channel *lp)
{
unsigned long sz, num_tsb_entries, tsbsize, order;
- struct ldc_iommu *iommu = &lp->iommu;
+ struct ldc_iommu *ldc_iommu = &lp->iommu;
+ struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table;
struct ldc_mtable_entry *table;
unsigned long hv_err;
int err;
num_tsb_entries = LDC_IOTABLE_SIZE;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
-
- spin_lock_init(&iommu->lock);
+ spin_lock_init(&ldc_iommu->lock);
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->arena.map = kzalloc(sz, GFP_KERNEL);
- if (!iommu->arena.map) {
+ iommu->map = kzalloc(sz, GFP_KERNEL);
+ if (!iommu->map) {
printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz);
return -ENOMEM;
}
-
- iommu->arena.limit = num_tsb_entries;
+ iommu_tbl_pool_init(iommu, num_tsb_entries, PAGE_SHIFT,
+ NULL, false /* no large pool */,
+ 1 /* npools */,
+ true /* skip span boundary check */);
order = get_order(tsbsize);
@@ -1037,7 +1076,7 @@ static int ldc_iommu_init(struct ldc_channel *lp)
memset(table, 0, PAGE_SIZE << order);
- iommu->page_table = table;
+ ldc_iommu->page_table = table;
hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table),
num_tsb_entries);
@@ -1049,36 +1088,38 @@ static int ldc_iommu_init(struct ldc_channel *lp)
out_free_table:
free_pages((unsigned long) table, order);
- iommu->page_table = NULL;
+ ldc_iommu->page_table = NULL;
out_free_map:
- kfree(iommu->arena.map);
- iommu->arena.map = NULL;
+ kfree(iommu->map);
+ iommu->map = NULL;
return err;
}
static void ldc_iommu_release(struct ldc_channel *lp)
{
- struct ldc_iommu *iommu = &lp->iommu;
+ struct ldc_iommu *ldc_iommu = &lp->iommu;
+ struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table;
unsigned long num_tsb_entries, tsbsize, order;
(void) sun4v_ldc_set_map_table(lp->id, 0, 0);
- num_tsb_entries = iommu->arena.limit;
+ num_tsb_entries = iommu->poolsize * iommu->nr_pools;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
order = get_order(tsbsize);
- free_pages((unsigned long) iommu->page_table, order);
- iommu->page_table = NULL;
+ free_pages((unsigned long) ldc_iommu->page_table, order);
+ ldc_iommu->page_table = NULL;
- kfree(iommu->arena.map);
- iommu->arena.map = NULL;
+ kfree(iommu->map);
+ iommu->map = NULL;
}
struct ldc_channel *ldc_alloc(unsigned long id,
const struct ldc_channel_config *cfgp,
- void *event_arg)
+ void *event_arg,
+ const char *name)
{
struct ldc_channel *lp;
const struct ldc_mode_ops *mops;
@@ -1093,6 +1134,8 @@ struct ldc_channel *ldc_alloc(unsigned long id,
err = -EINVAL;
if (!cfgp)
goto out_err;
+ if (!name)
+ goto out_err;
switch (cfgp->mode) {
case LDC_MODE_RAW:
@@ -1137,7 +1180,7 @@ struct ldc_channel *ldc_alloc(unsigned long id,
lp->id = id;
- err = ldc_iommu_init(lp);
+ err = ldc_iommu_init(name, lp);
if (err)
goto out_free_ldc;
@@ -1185,6 +1228,21 @@ struct ldc_channel *ldc_alloc(unsigned long id,
INIT_HLIST_HEAD(&lp->mh_list);
+ snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
+ snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
+
+ err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
+ lp->rx_irq_name, lp);
+ if (err)
+ goto out_free_txq;
+
+ err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
+ lp->tx_irq_name, lp);
+ if (err) {
+ free_irq(lp->cfg.rx_irq, lp);
+ goto out_free_txq;
+ }
+
return lp;
out_free_txq:
@@ -1204,11 +1262,12 @@ out_err:
}
EXPORT_SYMBOL(ldc_alloc);
-void ldc_free(struct ldc_channel *lp)
+void ldc_unbind(struct ldc_channel *lp)
{
if (lp->flags & LDC_FLAG_REGISTERED_IRQS) {
free_irq(lp->cfg.rx_irq, lp);
free_irq(lp->cfg.tx_irq, lp);
+ lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
}
if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) {
@@ -1222,10 +1281,15 @@ void ldc_free(struct ldc_channel *lp)
lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES;
}
- hlist_del(&lp->list);
+ ldc_set_state(lp, LDC_STATE_INIT);
+}
+EXPORT_SYMBOL(ldc_unbind);
+void ldc_free(struct ldc_channel *lp)
+{
+ ldc_unbind(lp);
+ hlist_del(&lp->list);
kfree(lp->mssbuf);
-
ldc_iommu_release(lp);
kfree(lp);
@@ -1237,31 +1301,14 @@ EXPORT_SYMBOL(ldc_free);
* state. This does not initiate a handshake, ldc_connect() does
* that.
*/
-int ldc_bind(struct ldc_channel *lp, const char *name)
+int ldc_bind(struct ldc_channel *lp)
{
unsigned long hv_err, flags;
int err = -EINVAL;
- if (!name ||
- (lp->state != LDC_STATE_INIT))
+ if (lp->state != LDC_STATE_INIT)
return -EINVAL;
- snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
- snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
-
- err = request_irq(lp->cfg.rx_irq, ldc_rx, IRQF_DISABLED,
- lp->rx_irq_name, lp);
- if (err)
- return err;
-
- err = request_irq(lp->cfg.tx_irq, ldc_tx, IRQF_DISABLED,
- lp->tx_irq_name, lp);
- if (err) {
- free_irq(lp->cfg.rx_irq, lp);
- return err;
- }
-
-
spin_lock_irqsave(&lp->lock, flags);
enable_irq(lp->cfg.rx_irq);
@@ -1301,6 +1348,14 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
lp->hs_state = LDC_HS_OPEN;
ldc_set_state(lp, LDC_STATE_BOUND);
+ if (lp->cfg.mode == LDC_MODE_RAW) {
+ /*
+ * There is no handshake in RAW mode, so handshake
+ * is completed.
+ */
+ lp->hs_state = LDC_HS_COMPLETE;
+ }
+
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
@@ -1336,7 +1391,7 @@ int ldc_connect(struct ldc_channel *lp)
if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
!(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
lp->hs_state != LDC_HS_OPEN)
- err = -EINVAL;
+ err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
else
err = start_handshake(lp);
@@ -1406,12 +1461,56 @@ int ldc_state(struct ldc_channel *lp)
}
EXPORT_SYMBOL(ldc_state);
+void ldc_set_state(struct ldc_channel *lp, u8 state)
+{
+ ldcdbg(STATE, "STATE (%s) --> (%s)\n",
+ state_to_str(lp->state),
+ state_to_str(state));
+
+ lp->state = state;
+}
+EXPORT_SYMBOL(ldc_set_state);
+
+int ldc_mode(struct ldc_channel *lp)
+{
+ return lp->cfg.mode;
+}
+EXPORT_SYMBOL(ldc_mode);
+
+int ldc_rx_reset(struct ldc_channel *lp)
+{
+ return __set_rx_head(lp, lp->rx_tail);
+}
+EXPORT_SYMBOL(ldc_rx_reset);
+
+void __ldc_print(struct ldc_channel *lp, const char *caller)
+{
+ pr_info("%s: id=0x%lx flags=0x%x state=%s cstate=0x%lx hsstate=0x%x\n"
+ "\trx_h=0x%lx rx_t=0x%lx rx_n=%ld\n"
+ "\ttx_h=0x%lx tx_t=0x%lx tx_n=%ld\n"
+ "\trcv_nxt=%u snd_nxt=%u\n",
+ caller, lp->id, lp->flags, state_to_str(lp->state),
+ lp->chan_state, lp->hs_state,
+ lp->rx_head, lp->rx_tail, lp->rx_num_entries,
+ lp->tx_head, lp->tx_tail, lp->tx_num_entries,
+ lp->rcv_nxt, lp->snd_nxt);
+}
+EXPORT_SYMBOL(__ldc_print);
+
static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
{
struct ldc_packet *p;
- unsigned long new_tail;
+ unsigned long new_tail, hv_err;
int err;
+ hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
+ &lp->chan_state);
+ if (unlikely(hv_err))
+ return -EBUSY;
+
+ if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
+ return LDC_ABORT(lp);
+
if (size > LDC_PACKET_SIZE)
return -EMSGSIZE;
@@ -1442,7 +1541,7 @@ static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size)
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
@@ -1485,7 +1584,7 @@ static int write_nonraw(struct ldc_channel *lp, const void *buf,
return -EBUSY;
if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
if (!tx_has_space_for(lp, size))
return -EAGAIN;
@@ -1551,9 +1650,9 @@ static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
if (err)
return err;
- err = __set_rx_head(lp, lp->rx_tail);
+ err = ldc_rx_reset(lp);
if (err < 0)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -1566,7 +1665,7 @@ static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
return err;
}
if (p->stype & LDC_NACK)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -1586,7 +1685,7 @@ static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head)
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
@@ -1609,7 +1708,7 @@ static int rx_set_head(struct ldc_channel *lp, unsigned long head)
int err = __set_rx_head(lp, head);
if (err < 0)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
lp->rx_head = head;
return 0;
@@ -1648,7 +1747,7 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
@@ -1692,9 +1791,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
lp->rcv_nxt = p->seqid;
+ /*
+ * If this is a control-only packet, there is nothing
+ * else to do but advance the rx queue since the packet
+ * was already processed above.
+ */
if (!(p->type & LDC_DATA)) {
new = rx_advance(lp, new);
- goto no_data;
+ break;
}
if (p->stype & (LDC_ACK | LDC_NACK)) {
err = data_ack_nack(lp, p);
@@ -1750,7 +1854,7 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
* This seems the best behavior because this allows
* a user of the LDC layer to start with a small
* RX buffer for ldc_read() calls and use -EMSGSIZE
- * as a cue to enlarge it's read buffer.
+ * as a cue to enlarge its read buffer.
*/
err = -EMSGSIZE;
break;
@@ -1859,6 +1963,8 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
unsigned long flags;
int err;
+ ldcdbg(RX, "%s: entered size=%d\n", __func__, size);
+
if (!buf)
return -EINVAL;
@@ -1874,44 +1980,13 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
spin_unlock_irqrestore(&lp->lock, flags);
+ ldcdbg(RX, "%s: mode=%d, head=%lu, tail=%lu rv=%d\n", __func__,
+ lp->cfg.mode, lp->rx_head, lp->rx_tail, err);
+
return err;
}
EXPORT_SYMBOL(ldc_read);
-static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
-{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long n, start, end, limit;
- int pass;
-
- limit = arena->limit;
- start = arena->hint;
- pass = 0;
-
-again:
- n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0);
- end = n + npages;
- if (unlikely(end >= limit)) {
- if (likely(pass < 1)) {
- limit = start;
- start = 0;
- pass++;
- goto again;
- } else {
- /* Scanned the whole thing, give up. */
- return -1;
- }
- }
- bitmap_set(arena->map, n, npages);
-
- arena->hint = end;
-
- return n;
-}
-
-#define COOKIE_PGSZ_CODE 0xf000000000000000ULL
-#define COOKIE_PGSZ_CODE_SHIFT 60ULL
-
static u64 pagesize_code(void)
{
switch (PAGE_SIZE) {
@@ -1938,24 +2013,15 @@ static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset)
page_offset);
}
-static u64 cookie_to_index(u64 cookie, unsigned long *shift)
-{
- u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
-
- cookie &= ~COOKIE_PGSZ_CODE;
-
- *shift = szcode * 3;
-
- return (cookie >> (13ULL + (szcode * 3ULL)));
-}
static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
unsigned long npages)
{
long entry;
- entry = arena_alloc(iommu, npages);
- if (unlikely(entry < 0))
+ entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table,
+ npages, NULL, (unsigned long)-1, 0);
+ if (unlikely(entry == IOMMU_ERROR_CODE))
return NULL;
return iommu->page_table + entry;
@@ -2083,11 +2149,12 @@ int ldc_map_sg(struct ldc_channel *lp,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
- unsigned long i, npages, flags;
+ unsigned long i, npages;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
int err;
+ struct scatterlist *s;
if (map_perm & ~LDC_MAP_ALL)
return -EINVAL;
@@ -2102,9 +2169,7 @@ int ldc_map_sg(struct ldc_channel *lp,
iommu = &lp->iommu;
- spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (!base)
return -ENOMEM;
@@ -2116,9 +2181,10 @@ int ldc_map_sg(struct ldc_channel *lp,
state.pte_idx = (base - iommu->page_table);
state.nc = 0;
- for (i = 0; i < num_sg; i++)
- fill_cookies(&state, page_to_pfn(sg_page(&sg[i])) << PAGE_SHIFT,
- sg[i].offset, sg[i].length);
+ for_each_sg(sg, s, num_sg, i) {
+ fill_cookies(&state, page_to_pfn(sg_page(s)) << PAGE_SHIFT,
+ s->offset, s->length);
+ }
return state.nc;
}
@@ -2129,7 +2195,7 @@ int ldc_map_single(struct ldc_channel *lp,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
- unsigned long npages, pa, flags;
+ unsigned long npages, pa;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
@@ -2145,9 +2211,7 @@ int ldc_map_single(struct ldc_channel *lp,
iommu = &lp->iommu;
- spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (!base)
return -ENOMEM;
@@ -2159,41 +2223,31 @@ int ldc_map_single(struct ldc_channel *lp,
state.pte_idx = (base - iommu->page_table);
state.nc = 0;
fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len);
- BUG_ON(state.nc != 1);
+ BUG_ON(state.nc > ncookies);
return state.nc;
}
EXPORT_SYMBOL(ldc_map_single);
+
static void free_npages(unsigned long id, struct ldc_iommu *iommu,
u64 cookie, u64 size)
{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long i, shift, index, npages;
- struct ldc_mtable_entry *base;
+ unsigned long npages, entry;
npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT;
- index = cookie_to_index(cookie, &shift);
- base = iommu->page_table + index;
- BUG_ON(index > arena->limit ||
- (index + npages) > arena->limit);
-
- for (i = 0; i < npages; i++) {
- if (base->cookie)
- sun4v_ldc_revoke(id, cookie + (i << shift),
- base->cookie);
- base->mte = 0;
- __clear_bit(index + i, arena->map);
- }
+ entry = ldc_cookie_to_index(cookie, iommu);
+ ldc_demap(iommu, id, cookie, entry, npages);
+ iommu_tbl_range_free(&iommu->iommu_map_table, cookie, npages, entry);
}
void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
int ncookies)
{
struct ldc_iommu *iommu = &lp->iommu;
- unsigned long flags;
int i;
+ unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
for (i = 0; i < ncookies; i++) {
@@ -2306,7 +2360,7 @@ void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
if (len & (8UL - 1))
return ERR_PTR(-EINVAL);
- buf = kzalloc(len, GFP_KERNEL);
+ buf = kzalloc(len, GFP_ATOMIC);
if (!buf)
return ERR_PTR(-ENOMEM);
diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
index 3ae36f36e758..f4fb82b019bb 100644
--- a/arch/sparc/kernel/led.c
+++ b/arch/sparc/kernel/led.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -8,6 +9,7 @@
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/uaccess.h>
+#include <linux/sched/loadavg.h>
#include <asm/auxio.h>
@@ -30,23 +32,25 @@ static inline void led_toggle(void)
}
static struct timer_list led_blink_timer;
+static unsigned long led_blink_timer_timeout;
-static void led_blink(unsigned long timeout)
+static void led_blink(struct timer_list *unused)
{
+ unsigned long timeout = led_blink_timer_timeout;
+
led_toggle();
/* reschedule */
if (!timeout) { /* blink according to load */
led_blink_timer.expires = jiffies +
((1 + (avenrun[0] >> FSHIFT)) * HZ);
- led_blink_timer.data = 0;
} else { /* blink at user specified interval */
led_blink_timer.expires = jiffies + (timeout * HZ);
- led_blink_timer.data = timeout;
}
add_timer(&led_blink_timer);
}
+#ifdef CONFIG_PROC_FS
static int led_proc_show(struct seq_file *m, void *v)
{
if (get_auxio() & AUXIO_LED)
@@ -69,16 +73,9 @@ static ssize_t led_proc_write(struct file *file, const char __user *buffer,
if (count > LED_MAX_LENGTH)
count = LED_MAX_LENGTH;
- buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, buffer, count)) {
- kfree(buf);
- return -EFAULT;
- }
-
- buf[count] = '\0';
+ buf = memdup_user_nul(buffer, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
/* work around \n when echo'ing into proc */
if (buf[count - 1] == '\n')
@@ -87,16 +84,18 @@ static ssize_t led_proc_write(struct file *file, const char __user *buffer,
/* before we change anything we want to stop any running timers,
* otherwise calls such as on will have no persistent effect
*/
- del_timer_sync(&led_blink_timer);
+ timer_delete_sync(&led_blink_timer);
if (!strcmp(buf, "on")) {
auxio_set_led(AUXIO_LED_ON);
} else if (!strcmp(buf, "toggle")) {
led_toggle();
} else if ((*buf > '0') && (*buf <= '9')) {
- led_blink(simple_strtoul(buf, NULL, 10));
+ led_blink_timer_timeout = simple_strtoul(buf, NULL, 10);
+ led_blink(&led_blink_timer);
} else if (!strcmp(buf, "load")) {
- led_blink(0);
+ led_blink_timer_timeout = 0;
+ led_blink(&led_blink_timer);
} else {
auxio_set_led(AUXIO_LED_OFF);
}
@@ -106,28 +105,25 @@ static ssize_t led_proc_write(struct file *file, const char __user *buffer,
return count;
}
-static const struct file_operations led_proc_fops = {
- .owner = THIS_MODULE,
- .open = led_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = led_proc_write,
+static const struct proc_ops led_proc_ops = {
+ .proc_open = led_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = led_proc_write,
};
-
-static struct proc_dir_entry *led;
+#endif
#define LED_VERSION "0.1"
static int __init led_init(void)
{
- init_timer(&led_blink_timer);
- led_blink_timer.function = led_blink;
+ timer_setup(&led_blink_timer, led_blink, 0);
- led = proc_create("led", 0, NULL, &led_proc_fops);
- if (!led)
+#ifdef CONFIG_PROC_FS
+ if (!proc_create("led", 0, NULL, &led_proc_ops))
return -ENOMEM;
-
+#endif
printk(KERN_INFO
"led: version %s, Lars Kotthoff <metalhead@metalhead.ws>\n",
LED_VERSION);
@@ -138,7 +134,7 @@ static int __init led_init(void)
static void __exit led_exit(void)
{
remove_proc_entry("led", NULL);
- del_timer_sync(&led_blink_timer);
+ timer_delete_sync(&led_blink_timer);
}
module_init(led_init);
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index b7c68976cbc7..a43cf794bb1e 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
* Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
@@ -7,9 +8,7 @@
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
@@ -32,12 +31,13 @@ struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base addr
int leondebug_irq_disable;
int leon_debug_irqout;
-static int dummy_master_l10_counter;
+static volatile u32 dummy_master_l10_counter;
unsigned long amba_system_id;
static DEFINE_SPINLOCK(leon_irq_lock);
+static unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
+static unsigned long leon3_gptimer_ackmask; /* For clearing pending bit */
unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
-unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
unsigned int sparc_leon_eirq;
#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
#define LEON_IACK (&leon3_irqctrl_regs->iclear)
@@ -52,7 +52,7 @@ static inline unsigned int leon_eirq_get(int cpu)
}
/* Handle one or multiple IRQs from the extended interrupt controller */
-static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
+static void leon_handle_ext_irq(struct irq_desc *desc)
{
unsigned int eirq;
struct irq_bucket *p;
@@ -65,7 +65,7 @@ static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
}
/* The extended IRQ controller has been found, this function registers it */
-void leon_eirq_setup(unsigned int eirq)
+static void leon_eirq_setup(unsigned int eirq)
{
unsigned long mask, oldmask;
unsigned int veirq;
@@ -106,13 +106,12 @@ unsigned long leon_get_irqmask(unsigned int irq)
#ifdef CONFIG_SMP
static int irq_choose_cpu(const struct cpumask *affinity)
{
- cpumask_t mask;
+ unsigned int cpu = cpumask_first_and(affinity, cpu_online_mask);
- cpumask_and(&mask, cpu_online_mask, affinity);
- if (cpumask_equal(&mask, cpu_online_mask) || cpumask_empty(&mask))
+ if (cpumask_subset(cpu_online_mask, affinity) || cpu >= nr_cpu_ids)
return boot_cpu_id;
else
- return cpumask_first(&mask);
+ return cpu;
}
#else
#define irq_choose_cpu(affinity) boot_cpu_id
@@ -125,7 +124,7 @@ static int leon_set_affinity(struct irq_data *data, const struct cpumask *dest,
int oldcpu, newcpu;
mask = (unsigned long)data->chip_data;
- oldcpu = irq_choose_cpu(data->affinity);
+ oldcpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
newcpu = irq_choose_cpu(dest);
if (oldcpu == newcpu)
@@ -148,7 +147,7 @@ static void leon_unmask_irq(struct irq_data *data)
int cpu;
mask = (unsigned long)data->chip_data;
- cpu = irq_choose_cpu(data->affinity);
+ cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
spin_lock_irqsave(&leon_irq_lock, flags);
oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask));
@@ -161,7 +160,7 @@ static void leon_mask_irq(struct irq_data *data)
int cpu;
mask = (unsigned long)data->chip_data;
- cpu = irq_choose_cpu(data->affinity);
+ cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
spin_lock_irqsave(&leon_irq_lock, flags);
oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask));
@@ -202,7 +201,7 @@ static struct irq_chip leon_irq = {
/*
* Build a LEON IRQ for the edge triggered LEON IRQ controller:
- * Edge (normal) IRQ - handle_simple_irq, ack=DONT-CARE, never ack
+ * Edge (normal) IRQ - handle_simple_irq, ack=DON'T-CARE, never ack
* Level IRQ (PCI|Level-GPIO) - handle_fasteoi_irq, ack=1, ack after ISR
* Per-CPU Edge - handle_percpu_irq, ack=0
*/
@@ -260,17 +259,25 @@ void leon_update_virq_handling(unsigned int virq,
static u32 leon_cycles_offset(void)
{
- u32 rld, val, off;
+ u32 rld, val, ctrl, off;
+
rld = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld);
val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val);
- off = rld - val;
- return rld - val;
+ ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
+ if (LEON3_GPTIMER_CTRL_ISPENDING(ctrl)) {
+ val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val);
+ off = 2 * rld - val;
+ } else {
+ off = rld - val;
+ }
+
+ return off;
}
#ifdef CONFIG_SMP
/* smp clockevent irq */
-irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
+static irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
{
struct clock_event_device *ce;
int cpu = smp_processor_id();
@@ -302,6 +309,7 @@ void __init leon_init_timers(void)
int ampopts;
int err;
u32 config;
+ u32 ctrl;
sparc_config.get_cycles_offset = leon_cycles_offset;
sparc_config.cs_period = 1000000 / HZ;
@@ -313,7 +321,7 @@ void __init leon_init_timers(void)
leondebug_irq_disable = 0;
leon_debug_irqout = 0;
- master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
+ master_l10_counter = (u32 __iomem *)&dummy_master_l10_counter;
dummy_master_l10_counter = 0;
rootnp = of_find_node_by_path("/ambapp0");
@@ -339,41 +347,51 @@ void __init leon_init_timers(void)
/* Find GPTIMER Timer Registers base address otherwise bail out. */
nnp = rootnp;
- do {
- np = of_find_node_by_name(nnp, "GAISLER_GPTIMER");
- if (!np) {
- np = of_find_node_by_name(nnp, "01_011");
- if (!np)
- goto bad;
- }
- ampopts = 0;
- pp = of_find_property(np, "ampopts", &len);
- if (pp) {
- ampopts = *(int *)pp->value;
- if (ampopts == 0) {
- /* Skip this instance, resource already
- * allocated by other OS */
- nnp = np;
- continue;
- }
+retry:
+ np = of_find_node_by_name(nnp, "GAISLER_GPTIMER");
+ if (!np) {
+ np = of_find_node_by_name(nnp, "01_011");
+ if (!np)
+ goto bad;
+ }
+
+ ampopts = 0;
+ pp = of_find_property(np, "ampopts", &len);
+ if (pp) {
+ ampopts = *(int *)pp->value;
+ if (ampopts == 0) {
+ /* Skip this instance, resource already
+ * allocated by other OS */
+ nnp = np;
+ goto retry;
}
+ }
- /* Select Timer-Instance on Timer Core. Default is zero */
- leon3_gptimer_idx = ampopts & 0x7;
+ /* Select Timer-Instance on Timer Core. Default is zero */
+ leon3_gptimer_idx = ampopts & 0x7;
- pp = of_find_property(np, "reg", &len);
- if (pp)
- leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **)
- pp->value;
- pp = of_find_property(np, "interrupts", &len);
- if (pp)
- leon3_gptimer_irq = *(unsigned int *)pp->value;
- } while (0);
+ pp = of_find_property(np, "reg", &len);
+ if (pp)
+ leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **)
+ pp->value;
+ pp = of_find_property(np, "interrupts", &len);
+ if (pp)
+ leon3_gptimer_irq = *(unsigned int *)pp->value;
if (!(leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq))
goto bad;
+ ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
+ ctrl | LEON3_GPTIMER_CTRL_PENDING);
+ ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
+
+ if ((ctrl & LEON3_GPTIMER_CTRL_PENDING) != 0)
+ leon3_gptimer_ackmask = ~LEON3_GPTIMER_CTRL_PENDING;
+ else
+ leon3_gptimer_ackmask = ~0;
+
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0);
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld,
(((1000000 / HZ) - 1)));
@@ -452,24 +470,15 @@ bad:
static void leon_clear_clock_irq(void)
{
-}
+ u32 ctrl;
-static void leon_load_profile_irq(int cpu, unsigned int limit)
-{
+ ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
+ ctrl & leon3_gptimer_ackmask);
}
-void __init leon_trans_init(struct device_node *dp)
+static void leon_load_profile_irq(int cpu, unsigned int limit)
{
- if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) {
- struct property *p;
- p = of_find_property(dp, "mid", (void *)0);
- if (p) {
- int mid;
- dp->name = prom_early_alloc(5 + 1);
- memcpy(&mid, p->value, p->length);
- sprintf((char *)dp->name, "cpu%.2d", mid);
- }
- }
}
#ifdef CONFIG_SMP
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
index 88aaaa57bb64..10934dfa987a 100644
--- a/arch/sparc/kernel/leon_pci.c
+++ b/arch/sparc/kernel/leon_pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* leon_pci.c: LEON Host PCI support
*
@@ -6,7 +7,8 @@
* Code is partially derived from pcic.c
*/
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/export.h>
@@ -25,6 +27,12 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
{
LIST_HEAD(resources);
struct pci_bus *root_bus;
+ struct pci_host_bridge *bridge;
+ int ret;
+
+ bridge = pci_alloc_host_bridge(0);
+ if (!bridge)
+ return;
pci_add_resource_offset(&resources, &info->io_space,
info->io_space.start - 0x1000);
@@ -32,153 +40,23 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
info->busn.flags = IORESOURCE_BUS;
pci_add_resource(&resources, &info->busn);
- root_bus = pci_scan_root_bus(&ofdev->dev, 0, info->ops, info,
- &resources);
- if (root_bus) {
- /* Setup IRQs of all devices using custom routines */
- pci_fixup_irqs(pci_common_swizzle, info->map_irq);
-
- /* Assign devices with resources */
- pci_assign_unassigned_resources();
- } else {
- pci_free_resource_list(&resources);
- }
-}
-
-void pcibios_fixup_bus(struct pci_bus *pbus)
-{
- struct pci_dev *dev;
- int i, has_io, has_mem;
- u16 cmd;
+ list_splice_init(&resources, &bridge->windows);
+ bridge->dev.parent = &ofdev->dev;
+ bridge->sysdata = info;
+ bridge->busnr = 0;
+ bridge->ops = info->ops;
+ bridge->swizzle_irq = pci_common_swizzle;
+ bridge->map_irq = info->map_irq;
- list_for_each_entry(dev, &pbus->devices, bus_list) {
- /*
- * We can not rely on that the bootloader has enabled I/O
- * or memory access to PCI devices. Instead we enable it here
- * if the device has BARs of respective type.
- */
- has_io = has_mem = 0;
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
- unsigned long f = dev->resource[i].flags;
- if (f & IORESOURCE_IO)
- has_io = 1;
- else if (f & IORESOURCE_MEM)
- has_mem = 1;
- }
- /* ROM BARs are mapped into 32-bit memory space */
- if (dev->resource[PCI_ROM_RESOURCE].end != 0) {
- dev->resource[PCI_ROM_RESOURCE].flags |=
- IORESOURCE_ROM_ENABLE;
- has_mem = 1;
- }
- pci_bus_read_config_word(pbus, dev->devfn, PCI_COMMAND, &cmd);
- if (has_io && !(cmd & PCI_COMMAND_IO)) {
-#ifdef CONFIG_PCI_DEBUG
- printk(KERN_INFO "LEONPCI: Enabling I/O for dev %s\n",
- pci_name(dev));
-#endif
- cmd |= PCI_COMMAND_IO;
- pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
- cmd);
- }
- if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
-#ifdef CONFIG_PCI_DEBUG
- printk(KERN_INFO "LEONPCI: Enabling MEMORY for dev"
- "%s\n", pci_name(dev));
-#endif
- cmd |= PCI_COMMAND_MEMORY;
- pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
- cmd);
- }
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret) {
+ pci_free_host_bridge(bridge);
+ return;
}
-}
-
-resource_size_t pcibios_align_resource(void *data, const struct resource *res,
- resource_size_t size, resource_size_t align)
-{
- return res->start;
-}
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- return pci_enable_resources(dev, mask);
-}
-
-/* in/out routines taken from pcic.c
- *
- * This probably belongs here rather than ioport.c because
- * we do not want this crud linked into SBus kernels.
- * Also, think for a moment about likes of floppy.c that
- * include architecture specific parts. They may want to redefine ins/outs.
- *
- * We do not use horrible macros here because we want to
- * advance pointer by sizeof(size).
- */
-void outsb(unsigned long addr, const void *src, unsigned long count)
-{
- while (count) {
- count -= 1;
- outb(*(const char *)src, addr);
- src += 1;
- /* addr += 1; */
- }
-}
-EXPORT_SYMBOL(outsb);
+ root_bus = bridge->bus;
-void outsw(unsigned long addr, const void *src, unsigned long count)
-{
- while (count) {
- count -= 2;
- outw(*(const short *)src, addr);
- src += 2;
- /* addr += 2; */
- }
-}
-EXPORT_SYMBOL(outsw);
-
-void outsl(unsigned long addr, const void *src, unsigned long count)
-{
- while (count) {
- count -= 4;
- outl(*(const long *)src, addr);
- src += 4;
- /* addr += 4; */
- }
-}
-EXPORT_SYMBOL(outsl);
-
-void insb(unsigned long addr, void *dst, unsigned long count)
-{
- while (count) {
- count -= 1;
- *(unsigned char *)dst = inb(addr);
- dst += 1;
- /* addr += 1; */
- }
-}
-EXPORT_SYMBOL(insb);
-
-void insw(unsigned long addr, void *dst, unsigned long count)
-{
- while (count) {
- count -= 2;
- *(unsigned short *)dst = inw(addr);
- dst += 2;
- /* addr += 2; */
- }
-}
-EXPORT_SYMBOL(insw);
-
-void insl(unsigned long addr, void *dst, unsigned long count)
-{
- while (count) {
- count -= 4;
- /*
- * XXX I am sure we are in for an unaligned trap here.
- */
- *(unsigned long *)dst = inl(addr);
- dst += 4;
- /* addr += 4; */
- }
+ /* Assign devices with resources */
+ pci_assign_unassigned_resources();
+ pci_bus_add_devices(root_bus);
}
-EXPORT_SYMBOL(insl);
diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
index 6df26e37f879..b2b639bee068 100644
--- a/arch/sparc/kernel/leon_pci_grpci1.c
+++ b/arch/sparc/kernel/leon_pci_grpci1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* leon_pci_grpci1.c: GRPCI1 Host PCI driver
*
@@ -12,10 +13,11 @@
* Contributors: Daniel Hellstrom <daniel@gaisler.com>
*/
-#include <linux/of_device.h>
#include <linux/export.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/pci.h>
@@ -80,7 +82,7 @@ struct grpci1_regs {
struct grpci1_priv {
struct leon_pci_info info; /* must be on top of this structure */
- struct grpci1_regs *regs; /* GRPCI register map */
+ struct grpci1_regs __iomem *regs; /* GRPCI register map */
struct device *dev;
int pci_err_mask; /* STATUS register error mask */
int irq; /* LEON irqctrl GRPCI IRQ */
@@ -101,7 +103,7 @@ static struct grpci1_priv *grpci1priv;
static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val);
-int grpci1_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int grpci1_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct grpci1_priv *priv = dev->bus->sysdata;
int irq_group;
@@ -144,7 +146,7 @@ static int grpci1_cfg_r32(struct grpci1_priv *priv, unsigned int bus,
grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, tmp);
} else {
/* Bus always little endian (unaffected by byte-swapping) */
- *val = flip_dword(tmp);
+ *val = swab32(tmp);
}
return 0;
@@ -197,7 +199,7 @@ static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
pci_conf = (unsigned int *) (priv->pci_conf |
(devfn << 8) | (where & 0xfc));
- LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val));
+ LEON3_BYPASS_STORE_PA(pci_conf, swab32(val));
return 0;
}
@@ -357,7 +359,7 @@ static struct irq_chip grpci1_irq = {
};
/* Handle one or multiple IRQs from the PCI core */
-static void grpci1_pci_flow_irq(unsigned int irq, struct irq_desc *desc)
+static void grpci1_pci_flow_irq(struct irq_desc *desc)
{
struct grpci1_priv *priv = grpci1priv;
int i, ack = 0;
@@ -417,10 +419,10 @@ out:
* BAR1: peripheral DMA to host's memory (size at least 256MByte)
* BAR2..BAR5: not implemented in hardware
*/
-void grpci1_hw_init(struct grpci1_priv *priv)
+static void grpci1_hw_init(struct grpci1_priv *priv)
{
u32 ahbadr, bar_sz, data, pciadr;
- struct grpci1_regs *regs = priv->regs;
+ struct grpci1_regs __iomem *regs = priv->regs;
/* set 1:1 mapping between AHB -> PCI memory space */
REGSTORE(regs->cfg_stat, priv->pci_area & 0xf0000000);
@@ -509,7 +511,7 @@ static irqreturn_t grpci1_err_interrupt(int irq, void *arg)
static int grpci1_of_probe(struct platform_device *ofdev)
{
- struct grpci1_regs *regs;
+ struct grpci1_regs __iomem *regs;
struct grpci1_priv *priv;
int err, len;
const int *tmp;
@@ -690,12 +692,12 @@ err3:
err2:
release_resource(&priv->info.mem_space);
err1:
- iounmap((void *)priv->pci_io_va);
+ iounmap((void __iomem *)priv->pci_io_va);
grpci1priv = NULL;
return err;
}
-static struct of_device_id grpci1_of_match[] = {
+static const struct of_device_id grpci1_of_match[] = {
{
.name = "GAISLER_PCIFBRG",
},
@@ -708,7 +710,6 @@ static struct of_device_id grpci1_of_match[] = {
static struct platform_driver grpci1_of_driver = {
.driver = {
.name = "grpci1",
- .owner = THIS_MODULE,
.of_match_table = grpci1_of_match,
},
.probe = grpci1_of_probe,
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
index 5f0402aab7fb..9f662340b5b2 100644
--- a/arch/sparc/kernel/leon_pci_grpci2.c
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* leon_pci_grpci2.c: GRPCI2 Host PCI driver
*
@@ -5,11 +6,14 @@
*
*/
-#include <linux/of_device.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
#include <asm/io.h>
#include <asm/leon.h>
#include <asm/vaddrs.h>
@@ -190,7 +194,7 @@ struct grpci2_cap_first {
struct grpci2_priv {
struct leon_pci_info info; /* must be on top of this structure */
- struct grpci2_regs *regs;
+ struct grpci2_regs __iomem *regs;
char irq;
char irq_mode; /* IRQ Mode from CAPSTS REG */
char bt_enabled;
@@ -214,10 +218,10 @@ struct grpci2_priv {
struct grpci2_barcfg tgtbars[6];
};
-DEFINE_SPINLOCK(grpci2_dev_lock);
-struct grpci2_priv *grpci2priv;
+static DEFINE_SPINLOCK(grpci2_dev_lock);
+static struct grpci2_priv *grpci2priv;
-int grpci2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int grpci2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct grpci2_priv *priv = dev->bus->sysdata;
int irq_group;
@@ -269,7 +273,7 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
*val = 0xffffffff;
} else {
/* Bus always little endian (unaffected by byte-swapping) */
- *val = flip_dword(tmp);
+ *val = swab32(tmp);
}
return 0;
@@ -327,7 +331,7 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
pci_conf = (unsigned int *) (priv->pci_conf |
(devfn << 8) | (where & 0xfc));
- LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val));
+ LEON3_BYPASS_STORE_PA(pci_conf, swab32(val));
/* Wait until GRPCI2 signals that CFG access is done, it should be
* done instantaneously unless a DMA operation is ongoing...
@@ -497,7 +501,7 @@ static struct irq_chip grpci2_irq = {
};
/* Handle one or multiple IRQs from the PCI core */
-static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc)
+static void grpci2_pci_flow_irq(struct irq_desc *desc)
{
struct grpci2_priv *priv = grpci2priv;
int i, ack = 0;
@@ -560,10 +564,10 @@ out:
return virq;
}
-void grpci2_hw_init(struct grpci2_priv *priv)
+static void grpci2_hw_init(struct grpci2_priv *priv)
{
u32 ahbadr, pciadr, bar_sz, capptr, io_map, data;
- struct grpci2_regs *regs = priv->regs;
+ struct grpci2_regs __iomem *regs = priv->regs;
int i;
struct grpci2_barcfg *barcfg = priv->tgtbars;
@@ -582,7 +586,7 @@ void grpci2_hw_init(struct grpci2_priv *priv)
REGSTORE(regs->io_map, REGLOAD(regs->io_map) & 0x0000ffff);
/* set 1:1 mapping between AHB -> PCI memory space, for all Masters
- * Each AHB master has it's own mapping registers. Max 16 AHB masters.
+ * Each AHB master has its own mapping registers. Max 16 AHB masters.
*/
for (i = 0; i < 16; i++)
REGSTORE(regs->ahbmst_map[i], priv->pci_area);
@@ -654,7 +658,7 @@ static irqreturn_t grpci2_jump_interrupt(int irq, void *arg)
static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
{
struct grpci2_priv *priv = arg;
- struct grpci2_regs *regs = priv->regs;
+ struct grpci2_regs __iomem *regs = priv->regs;
unsigned int status;
status = REGLOAD(regs->sts_cap);
@@ -681,7 +685,7 @@ static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
static int grpci2_of_probe(struct platform_device *ofdev)
{
- struct grpci2_regs *regs;
+ struct grpci2_regs __iomem *regs;
struct grpci2_priv *priv;
int err, i, len;
const int *tmp;
@@ -722,7 +726,6 @@ static int grpci2_of_probe(struct platform_device *ofdev)
err = -ENOMEM;
goto err1;
}
- memset(grpci2priv, 0, sizeof(*grpci2priv));
priv->regs = regs;
priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */
priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT;
@@ -877,7 +880,7 @@ err4:
release_resource(&priv->info.mem_space);
err3:
err = -ENOMEM;
- iounmap((void *)priv->pci_io_va);
+ iounmap((void __iomem *)priv->pci_io_va);
err2:
kfree(priv);
err1:
@@ -886,7 +889,7 @@ err1:
return err;
}
-static struct of_device_id grpci2_of_match[] = {
+static const struct of_device_id grpci2_of_match[] = {
{
.name = "GAISLER_GRPCI2",
},
@@ -899,7 +902,6 @@ static struct of_device_id grpci2_of_match[] = {
static struct platform_driver grpci2_of_driver = {
.driver = {
.name = "grpci2",
- .owner = THIS_MODULE,
.of_match_table = grpci2_of_match,
},
.probe = grpci2_of_probe,
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index b0b3967a2dd2..6c00cbad7fb5 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* leon_pmc.c: LEON Power-down cpu_idle() handler
*
* Copyright (C) 2011 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
@@ -12,14 +13,14 @@
#include <asm/processor.h>
/* List of Systems that need fixup instructions around power-down instruction */
-unsigned int pmc_leon_fixup_ids[] = {
+static unsigned int pmc_leon_fixup_ids[] = {
AEROFLEX_UT699,
GAISLER_GR712RC,
LEON4_NEXTREME1,
0
};
-int pmc_leon_need_fixup(void)
+static int pmc_leon_need_fixup(void)
{
unsigned int systemid = amba_system_id >> 16;
unsigned int *id;
@@ -38,7 +39,7 @@ int pmc_leon_need_fixup(void)
* CPU idle callback function for systems that need some extra handling
* See .../arch/sparc/kernel/process.c
*/
-void pmc_leon_idle_fixup(void)
+static void pmc_leon_idle_fixup(void)
{
/* Prepare an address to a non-cachable region. APB is always
* none-cachable. One instruction is executed after the Sleep
@@ -49,26 +50,30 @@ void pmc_leon_idle_fixup(void)
register unsigned int address = (unsigned int)leon3_irqctrl_regs;
/* Interrupts need to be enabled to not hang the CPU */
- local_irq_enable();
+ raw_local_irq_enable();
__asm__ __volatile__ (
"wr %%g0, %%asr19\n"
"lda [%0] %1, %%g0\n"
:
: "r"(address), "i"(ASI_LEON_BYPASS));
+
+ raw_local_irq_disable();
}
/*
* CPU idle callback function
* See .../arch/sparc/kernel/process.c
*/
-void pmc_leon_idle(void)
+static void pmc_leon_idle(void)
{
/* Interrupts need to be enabled to not hang the CPU */
- local_irq_enable();
+ raw_local_irq_enable();
/* For systems without power-down, this will be no-op */
__asm__ __volatile__ ("wr %g0, %asr19\n\t");
+
+ raw_local_irq_disable();
}
/* Install LEON Power Down function */
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index d7aa524b7283..1ee393abc463 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* leon_smp.c: Sparc-Leon SMP support.
*
* based on sun4m_smp.c
@@ -9,7 +10,7 @@
#include <asm/head.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/threads.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
@@ -37,8 +38,6 @@
#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/cpudata.h>
#include <asm/asi.h>
@@ -54,7 +53,7 @@ extern ctxd_t *srmmu_ctx_table_phys;
static int smp_processors_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern cpumask_t smp_commenced_mask;
-void __cpuinit leon_configure_cache_smp(void);
+void leon_configure_cache_smp(void);
static void leon_ipi_init(void);
/* IRQ number of LEON IPIs */
@@ -69,12 +68,12 @@ static inline unsigned long do_swap(volatile unsigned long *ptr,
return val;
}
-void __cpuinit leon_cpu_pre_starting(void *arg)
+void leon_cpu_pre_starting(void *arg)
{
leon_configure_cache_smp();
}
-void __cpuinit leon_cpu_pre_online(void *arg)
+void leon_cpu_pre_online(void *arg)
{
int cpuid = hard_smp_processor_id();
@@ -93,7 +92,7 @@ void __cpuinit leon_cpu_pre_online(void *arg)
: "memory" /* paranoid */);
/* Attach to the address space of init_task. */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
@@ -106,7 +105,7 @@ void __cpuinit leon_cpu_pre_online(void *arg)
extern struct linux_prom_registers smp_penguin_ctable;
-void __cpuinit leon_configure_cache_smp(void)
+void leon_configure_cache_smp(void)
{
unsigned long cfg = sparc_leon3_get_dcachecfg();
int me = smp_processor_id();
@@ -130,7 +129,7 @@ void __cpuinit leon_configure_cache_smp(void)
local_ops->tlb_all();
}
-void leon_smp_setbroadcast(unsigned int mask)
+static void leon_smp_setbroadcast(unsigned int mask)
{
int broadcast =
((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >>
@@ -148,13 +147,6 @@ void leon_smp_setbroadcast(unsigned int mask)
LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask);
}
-unsigned int leon_smp_getbroadcast(void)
-{
- unsigned int mask;
- mask = LEON_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpbroadcast));
- return mask;
-}
-
int leon_smp_nrcpus(void)
{
int nrcpu =
@@ -186,7 +178,7 @@ void __init leon_boot_cpus(void)
}
-int __cpuinit leon_boot_one_cpu(int i, struct task_struct *idle)
+int leon_boot_one_cpu(int i, struct task_struct *idle)
{
int timeout;
@@ -253,23 +245,19 @@ void __init leon_smp_done(void)
/* Free unneeded trap tables */
if (!cpu_present(1)) {
- free_reserved_page(virt_to_page(&trapbase_cpu1));
+ free_reserved_page(virt_to_page(&trapbase_cpu1[0]));
}
if (!cpu_present(2)) {
- free_reserved_page(virt_to_page(&trapbase_cpu2));
+ free_reserved_page(virt_to_page(&trapbase_cpu2[0]));
}
if (!cpu_present(3)) {
- free_reserved_page(virt_to_page(&trapbase_cpu3));
+ free_reserved_page(virt_to_page(&trapbase_cpu3[0]));
}
/* Ok, they are spinning and ready to go. */
smp_processors_ready = 1;
}
-void leon_irq_rotate(int cpu)
-{
-}
-
struct leon_ipi_work {
int single;
int msk;
@@ -354,7 +342,7 @@ static void leon_ipi_resched(int cpu)
void leonsmp_ipi_interrupt(void)
{
- struct leon_ipi_work *work = &__get_cpu_var(leon_ipi_work);
+ struct leon_ipi_work *work = this_cpu_ptr(&leon_ipi_work);
if (work->single) {
work->single = 0;
@@ -371,7 +359,7 @@ void leonsmp_ipi_interrupt(void)
}
static struct smp_funcall {
- smpfunc_t func;
+ void *func;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
@@ -379,12 +367,12 @@ static struct smp_funcall {
unsigned long arg5;
unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
-} ccall_info;
+} ccall_info __attribute__((aligned(8)));
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
-static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+static void leon_cross_call(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
@@ -396,7 +384,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
{
/* If you make changes here, make sure gcc generates proper code... */
- register smpfunc_t f asm("i0") = func;
+ register void *f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
register unsigned long a3 asm("i3") = arg3;
@@ -456,11 +444,13 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
/* Running cross calls. */
void leon_cross_call_irq(void)
{
+ void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
+ unsigned long) = ccall_info.func;
int i = smp_processor_id();
ccall_info.processors_in[i] = 1;
- ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
- ccall_info.arg4, ccall_info.arg5);
+ func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
+ ccall_info.arg5);
ccall_info.processors_out[i] = 1;
}
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 831c001604e8..30f171b7b00c 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -1,25 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
/* mdesc.c: Sun4V machine description handling.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/memblock.h>
#include <linux/log2.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
+#include <linux/refcount.h>
#include <asm/cpudata.h>
#include <asm/hypervisor.h>
#include <asm/mdesc.h>
#include <asm/prom.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/oplib.h>
#include <asm/smp.h>
+#include <asm/adi.h>
/* Unlike the OBP device tree, the machine description is a full-on
* DAG. An arbitrary number of ARCs are possible from one
@@ -37,6 +39,7 @@ struct mdesc_hdr {
u32 node_sz; /* node block size */
u32 name_sz; /* name block size */
u32 data_sz; /* data block size */
+ char data[];
} __attribute__((aligned(16)));
struct mdesc_elem {
@@ -70,11 +73,79 @@ struct mdesc_handle {
struct list_head list;
struct mdesc_mem_ops *mops;
void *self_base;
- atomic_t refcnt;
+ refcount_t refcnt;
unsigned int handle_size;
struct mdesc_hdr mdesc;
};
+typedef int (*mdesc_node_info_get_f)(struct mdesc_handle *, u64,
+ union md_node_info *);
+typedef void (*mdesc_node_info_rel_f)(union md_node_info *);
+typedef bool (*mdesc_node_match_f)(union md_node_info *, union md_node_info *);
+
+struct md_node_ops {
+ char *name;
+ mdesc_node_info_get_f get_info;
+ mdesc_node_info_rel_f rel_info;
+ mdesc_node_match_f node_match;
+};
+
+static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
+ union md_node_info *node_info);
+static void rel_vdev_port_node_info(union md_node_info *node_info);
+static bool vdev_port_node_match(union md_node_info *a_node_info,
+ union md_node_info *b_node_info);
+
+static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
+ union md_node_info *node_info);
+static void rel_ds_port_node_info(union md_node_info *node_info);
+static bool ds_port_node_match(union md_node_info *a_node_info,
+ union md_node_info *b_node_info);
+
+/* supported node types which can be registered */
+static struct md_node_ops md_node_ops_table[] = {
+ {"virtual-device-port", get_vdev_port_node_info,
+ rel_vdev_port_node_info, vdev_port_node_match},
+ {"domain-services-port", get_ds_port_node_info,
+ rel_ds_port_node_info, ds_port_node_match},
+ {NULL, NULL, NULL, NULL}
+};
+
+static void mdesc_get_node_ops(const char *node_name,
+ mdesc_node_info_get_f *get_info_f,
+ mdesc_node_info_rel_f *rel_info_f,
+ mdesc_node_match_f *match_f)
+{
+ int i;
+
+ if (get_info_f)
+ *get_info_f = NULL;
+
+ if (rel_info_f)
+ *rel_info_f = NULL;
+
+ if (match_f)
+ *match_f = NULL;
+
+ if (!node_name)
+ return;
+
+ for (i = 0; md_node_ops_table[i].name != NULL; i++) {
+ if (strcmp(md_node_ops_table[i].name, node_name) == 0) {
+ if (get_info_f)
+ *get_info_f = md_node_ops_table[i].get_info;
+
+ if (rel_info_f)
+ *rel_info_f = md_node_ops_table[i].rel_info;
+
+ if (match_f)
+ *match_f = md_node_ops_table[i].node_match;
+
+ break;
+ }
+ }
+}
+
static void mdesc_handle_init(struct mdesc_handle *hp,
unsigned int handle_size,
void *base)
@@ -84,7 +155,7 @@ static void mdesc_handle_init(struct mdesc_handle *hp,
memset(hp, 0, handle_size);
INIT_LIST_HEAD(&hp->list);
hp->self_base = base;
- atomic_set(&hp->refcnt, 1);
+ refcount_set(&hp->refcnt, 1);
hp->handle_size = handle_size;
}
@@ -99,7 +170,7 @@ static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size
mdesc_size);
alloc_size = PAGE_ALIGN(handle_size);
- paddr = memblock_alloc(alloc_size, PAGE_SIZE);
+ paddr = memblock_phys_alloc(alloc_size, PAGE_SIZE);
hp = NULL;
if (paddr) {
@@ -114,12 +185,12 @@ static void __init mdesc_memblock_free(struct mdesc_handle *hp)
unsigned int alloc_size;
unsigned long start;
- BUG_ON(atomic_read(&hp->refcnt) != 0);
+ BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
alloc_size = PAGE_ALIGN(hp->handle_size);
start = __pa(hp);
- free_bootmem_late(start, alloc_size);
+ memblock_free_late(start, alloc_size);
}
static struct mdesc_mem_ops memblock_mdesc_ops = {
@@ -130,31 +201,29 @@ static struct mdesc_mem_ops memblock_mdesc_ops = {
static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
{
unsigned int handle_size;
+ struct mdesc_handle *hp;
+ unsigned long addr;
void *base;
handle_size = (sizeof(struct mdesc_handle) -
sizeof(struct mdesc_hdr) +
mdesc_size);
+ base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+ if (!base)
+ return NULL;
- base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
- if (base) {
- struct mdesc_handle *hp;
- unsigned long addr;
-
- addr = (unsigned long)base;
- addr = (addr + 15UL) & ~15UL;
- hp = (struct mdesc_handle *) addr;
+ addr = (unsigned long)base;
+ addr = (addr + 15UL) & ~15UL;
+ hp = (struct mdesc_handle *) addr;
- mdesc_handle_init(hp, handle_size, base);
- return hp;
- }
+ mdesc_handle_init(hp, handle_size, base);
- return NULL;
+ return hp;
}
static void mdesc_kfree(struct mdesc_handle *hp)
{
- BUG_ON(atomic_read(&hp->refcnt) != 0);
+ BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
kfree(hp->self_base);
@@ -193,7 +262,7 @@ struct mdesc_handle *mdesc_grab(void)
spin_lock_irqsave(&mdesc_lock, flags);
hp = cur_mdesc;
if (hp)
- atomic_inc(&hp->refcnt);
+ refcount_inc(&hp->refcnt);
spin_unlock_irqrestore(&mdesc_lock, flags);
return hp;
@@ -205,7 +274,7 @@ void mdesc_release(struct mdesc_handle *hp)
unsigned long flags;
spin_lock_irqsave(&mdesc_lock, flags);
- if (atomic_dec_and_test(&hp->refcnt)) {
+ if (refcount_dec_and_test(&hp->refcnt)) {
list_del_init(&hp->list);
hp->mops->free(hp);
}
@@ -218,14 +287,31 @@ static struct mdesc_notifier_client *client_list;
void mdesc_register_notifier(struct mdesc_notifier_client *client)
{
+ bool supported = false;
u64 node;
+ int i;
mutex_lock(&mdesc_mutex);
+
+ /* check to see if the node is supported for registration */
+ for (i = 0; md_node_ops_table[i].name != NULL; i++) {
+ if (strcmp(md_node_ops_table[i].name, client->node_name) == 0) {
+ supported = true;
+ break;
+ }
+ }
+
+ if (!supported) {
+ pr_err("MD: %s node not supported\n", client->node_name);
+ mutex_unlock(&mdesc_mutex);
+ return;
+ }
+
client->next = client_list;
client_list = client;
mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
- client->add(cur_mdesc, node);
+ client->add(cur_mdesc, node, client->node_name);
mutex_unlock(&mdesc_mutex);
}
@@ -249,59 +335,147 @@ static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
return id;
}
+static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
+ union md_node_info *node_info)
+{
+ const u64 *parent_cfg_hdlp;
+ const char *name;
+ const u64 *idp;
+
+ /*
+ * Virtual device nodes are distinguished by:
+ * 1. "id" property
+ * 2. "name" property
+ * 3. parent node "cfg-handle" property
+ */
+ idp = mdesc_get_property(md, node, "id", NULL);
+ name = mdesc_get_property(md, node, "name", NULL);
+ parent_cfg_hdlp = parent_cfg_handle(md, node);
+
+ if (!idp || !name || !parent_cfg_hdlp)
+ return -1;
+
+ node_info->vdev_port.id = *idp;
+ node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL);
+ if (!node_info->vdev_port.name)
+ return -1;
+ node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp;
+
+ return 0;
+}
+
+static void rel_vdev_port_node_info(union md_node_info *node_info)
+{
+ if (node_info && node_info->vdev_port.name) {
+ kfree_const(node_info->vdev_port.name);
+ node_info->vdev_port.name = NULL;
+ }
+}
+
+static bool vdev_port_node_match(union md_node_info *a_node_info,
+ union md_node_info *b_node_info)
+{
+ if (a_node_info->vdev_port.id != b_node_info->vdev_port.id)
+ return false;
+
+ if (a_node_info->vdev_port.parent_cfg_hdl !=
+ b_node_info->vdev_port.parent_cfg_hdl)
+ return false;
+
+ if (strncmp(a_node_info->vdev_port.name,
+ b_node_info->vdev_port.name, MDESC_MAX_STR_LEN) != 0)
+ return false;
+
+ return true;
+}
+
+static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
+ union md_node_info *node_info)
+{
+ const u64 *idp;
+
+ /* DS port nodes use the "id" property to distinguish them */
+ idp = mdesc_get_property(md, node, "id", NULL);
+ if (!idp)
+ return -1;
+
+ node_info->ds_port.id = *idp;
+
+ return 0;
+}
+
+static void rel_ds_port_node_info(union md_node_info *node_info)
+{
+}
+
+static bool ds_port_node_match(union md_node_info *a_node_info,
+ union md_node_info *b_node_info)
+{
+ if (a_node_info->ds_port.id != b_node_info->ds_port.id)
+ return false;
+
+ return true;
+}
+
/* Run 'func' on nodes which are in A but not in B. */
static void invoke_on_missing(const char *name,
struct mdesc_handle *a,
struct mdesc_handle *b,
- void (*func)(struct mdesc_handle *, u64))
+ void (*func)(struct mdesc_handle *, u64,
+ const char *node_name))
{
- u64 node;
+ mdesc_node_info_get_f get_info_func;
+ mdesc_node_info_rel_f rel_info_func;
+ mdesc_node_match_f node_match_func;
+ union md_node_info a_node_info;
+ union md_node_info b_node_info;
+ bool found;
+ u64 a_node;
+ u64 b_node;
+ int rv;
+
+ /*
+ * Find the get_info, rel_info and node_match ops for the given
+ * node name
+ */
+ mdesc_get_node_ops(name, &get_info_func, &rel_info_func,
+ &node_match_func);
- mdesc_for_each_node_by_name(a, node, name) {
- int found = 0, is_vdc_port = 0;
- const char *name_prop;
- const u64 *id;
- u64 fnode;
-
- name_prop = mdesc_get_property(a, node, "name", NULL);
- if (name_prop && !strcmp(name_prop, "vdc-port")) {
- is_vdc_port = 1;
- id = parent_cfg_handle(a, node);
- } else
- id = mdesc_get_property(a, node, "id", NULL);
-
- if (!id) {
- printk(KERN_ERR "MD: Cannot find ID for %s node.\n",
- (name_prop ? name_prop : name));
+ /* If we didn't find a match, the node type is not supported */
+ if (!get_info_func || !rel_info_func || !node_match_func) {
+ pr_err("MD: %s node type is not supported\n", name);
+ return;
+ }
+
+ mdesc_for_each_node_by_name(a, a_node, name) {
+ found = false;
+
+ rv = get_info_func(a, a_node, &a_node_info);
+ if (rv != 0) {
+ pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
+ name);
continue;
}
- mdesc_for_each_node_by_name(b, fnode, name) {
- const u64 *fid;
-
- if (is_vdc_port) {
- name_prop = mdesc_get_property(b, fnode,
- "name", NULL);
- if (!name_prop ||
- strcmp(name_prop, "vdc-port"))
- continue;
- fid = parent_cfg_handle(b, fnode);
- if (!fid) {
- printk(KERN_ERR "MD: Cannot find ID "
- "for vdc-port node.\n");
- continue;
- }
- } else
- fid = mdesc_get_property(b, fnode,
- "id", NULL);
-
- if (*id == *fid) {
- found = 1;
+ /* Check each node in B for node matching a_node */
+ mdesc_for_each_node_by_name(b, b_node, name) {
+ rv = get_info_func(b, b_node, &b_node_info);
+ if (rv != 0)
+ continue;
+
+ if (node_match_func(&a_node_info, &b_node_info)) {
+ found = true;
+ rel_info_func(&b_node_info);
break;
}
+
+ rel_info_func(&b_node_info);
}
+
+ rel_info_func(&a_node_info);
+
if (!found)
- func(a, node);
+ func(a, a_node, name);
}
}
@@ -344,7 +518,7 @@ void mdesc_update(void)
if (status != HV_EOK || real_len > len) {
printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
status);
- atomic_dec(&hp->refcnt);
+ refcount_dec(&hp->refcnt);
mdesc_free(hp);
goto out;
}
@@ -357,7 +531,7 @@ void mdesc_update(void)
mdesc_notify_clients(orig_hp, hp);
spin_lock_irqsave(&mdesc_lock, flags);
- if (atomic_dec_and_test(&orig_hp->refcnt))
+ if (refcount_dec_and_test(&orig_hp->refcnt))
mdesc_free(orig_hp);
else
list_add(&orig_hp->list, &mdesc_zombie_list);
@@ -367,9 +541,79 @@ out:
mutex_unlock(&mdesc_mutex);
}
+u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name,
+ union md_node_info *node_info)
+{
+ mdesc_node_info_get_f get_info_func;
+ mdesc_node_info_rel_f rel_info_func;
+ mdesc_node_match_f node_match_func;
+ union md_node_info hp_node_info;
+ u64 hp_node;
+ int rv;
+
+ if (hp == NULL || node_name == NULL || node_info == NULL)
+ return MDESC_NODE_NULL;
+
+ /* Find the ops for the given node name */
+ mdesc_get_node_ops(node_name, &get_info_func, &rel_info_func,
+ &node_match_func);
+
+ /* If we didn't find ops for the given node name, it is not supported */
+ if (!get_info_func || !rel_info_func || !node_match_func) {
+ pr_err("MD: %s node is not supported\n", node_name);
+ return -EINVAL;
+ }
+
+ mdesc_for_each_node_by_name(hp, hp_node, node_name) {
+ rv = get_info_func(hp, hp_node, &hp_node_info);
+ if (rv != 0)
+ continue;
+
+ if (node_match_func(node_info, &hp_node_info))
+ break;
+
+ rel_info_func(&hp_node_info);
+ }
+
+ rel_info_func(&hp_node_info);
+
+ return hp_node;
+}
+EXPORT_SYMBOL(mdesc_get_node);
+
+int mdesc_get_node_info(struct mdesc_handle *hp, u64 node,
+ const char *node_name, union md_node_info *node_info)
+{
+ mdesc_node_info_get_f get_info_func;
+ int rv;
+
+ if (hp == NULL || node == MDESC_NODE_NULL ||
+ node_name == NULL || node_info == NULL)
+ return -EINVAL;
+
+ /* Find the get_info op for the given node name */
+ mdesc_get_node_ops(node_name, &get_info_func, NULL, NULL);
+
+ /* If we didn't find a get_info_func, the node name is not supported */
+ if (get_info_func == NULL) {
+ pr_err("MD: %s node is not supported\n", node_name);
+ return -EINVAL;
+ }
+
+ rv = get_info_func(hp, node, node_info);
+ if (rv != 0) {
+ pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
+ node_name);
+ return -1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mdesc_get_node_info);
+
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
{
- return (struct mdesc_elem *) (mdesc + 1);
+ return (struct mdesc_elem *) mdesc->data;
}
static void *name_block(struct mdesc_hdr *mdesc)
@@ -571,9 +815,7 @@ static void __init report_platform_properties(void)
mdesc_release(hp);
}
-static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c,
- struct mdesc_handle *hp,
- u64 mp)
+static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
{
const u64 *level = mdesc_get_property(hp, mp, "level", NULL);
const u64 *size = mdesc_get_property(hp, mp, "size", NULL);
@@ -616,45 +858,76 @@ static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c,
}
}
-static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
+static void find_back_node_value(struct mdesc_handle *hp, u64 node,
+ char *srch_val,
+ void (*func)(struct mdesc_handle *, u64, int),
+ u64 val, int depth)
{
- u64 a;
+ u64 arc;
- mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
- u64 t = mdesc_arc_target(hp, a);
- const char *name;
- const u64 *id;
+ /* Since we have an estimate of recursion depth, do a sanity check. */
+ if (depth == 0)
+ return;
- name = mdesc_node_name(hp, t);
- if (!strcmp(name, "cpu")) {
- id = mdesc_get_property(hp, t, "id", NULL);
- if (*id < NR_CPUS)
- cpu_data(*id).core_id = core_id;
- } else {
- u64 j;
+ mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) {
+ u64 n = mdesc_arc_target(hp, arc);
+ const char *name = mdesc_node_name(hp, n);
- mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) {
- u64 n = mdesc_arc_target(hp, j);
- const char *n_name;
+ if (!strcmp(srch_val, name))
+ (*func)(hp, n, val);
- n_name = mdesc_node_name(hp, n);
- if (strcmp(n_name, "cpu"))
- continue;
+ find_back_node_value(hp, n, srch_val, func, val, depth-1);
+ }
+}
- id = mdesc_get_property(hp, n, "id", NULL);
- if (*id < NR_CPUS)
- cpu_data(*id).core_id = core_id;
- }
- }
+static void __mark_core_id(struct mdesc_handle *hp, u64 node,
+ int core_id)
+{
+ const u64 *id = mdesc_get_property(hp, node, "id", NULL);
+
+ if (*id < num_possible_cpus())
+ cpu_data(*id).core_id = core_id;
+}
+
+static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node,
+ int max_cache_id)
+{
+ const u64 *id = mdesc_get_property(hp, node, "id", NULL);
+
+ if (*id < num_possible_cpus()) {
+ cpu_data(*id).max_cache_id = max_cache_id;
+
+ /**
+ * On systems without explicit socket descriptions socket
+ * is max_cache_id
+ */
+ cpu_data(*id).sock_id = max_cache_id;
}
}
-static void __cpuinit set_core_ids(struct mdesc_handle *hp)
+static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
+ int core_id)
+{
+ find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
+}
+
+static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp,
+ int max_cache_id)
+{
+ find_back_node_value(hp, mp, "cpu", __mark_max_cache_id,
+ max_cache_id, 10);
+}
+
+static void set_core_ids(struct mdesc_handle *hp)
{
int idx;
u64 mp;
idx = 1;
+
+ /* Identify unique cores by looking for cpus backpointed to by
+ * level 1 instruction caches.
+ */
mdesc_for_each_node_by_name(hp, mp, "cache") {
const u64 *level;
const char *type;
@@ -669,12 +942,75 @@ static void __cpuinit set_core_ids(struct mdesc_handle *hp)
continue;
mark_core_ids(hp, mp, idx);
+ idx++;
+ }
+}
+static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, int level)
+{
+ u64 mp;
+ int idx = 1;
+ int fnd = 0;
+
+ /**
+ * Identify unique highest level of shared cache by looking for cpus
+ * backpointed to by shared level N caches.
+ */
+ mdesc_for_each_node_by_name(hp, mp, "cache") {
+ const u64 *cur_lvl;
+
+ cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
+ if (*cur_lvl != level)
+ continue;
+ mark_max_cache_ids(hp, mp, idx);
idx++;
+ fnd = 1;
}
+ return fnd;
}
-static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
+static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp)
+{
+ int idx = 1;
+
+ mdesc_for_each_node_by_name(hp, mp, "socket") {
+ u64 a;
+
+ mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
+ u64 t = mdesc_arc_target(hp, a);
+ const char *name;
+ const u64 *id;
+
+ name = mdesc_node_name(hp, t);
+ if (strcmp(name, "cpu"))
+ continue;
+
+ id = mdesc_get_property(hp, t, "id", NULL);
+ if (*id < num_possible_cpus())
+ cpu_data(*id).sock_id = idx;
+ }
+ idx++;
+ }
+}
+
+static void set_sock_ids(struct mdesc_handle *hp)
+{
+ u64 mp;
+
+ /**
+ * Find the highest level of shared cache which pre-T7 is also
+ * the socket.
+ */
+ if (!set_max_cache_ids_by_cache(hp, 3))
+ set_max_cache_ids_by_cache(hp, 2);
+
+ /* If machine description exposes sockets data use it.*/
+ mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
+ if (mp != MDESC_NODE_NULL)
+ set_sock_ids_by_socket(hp, mp);
+}
+
+static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
{
u64 a;
@@ -693,7 +1029,7 @@ static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id
}
}
-static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
+static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
{
int idx;
u64 mp;
@@ -709,19 +1045,18 @@ static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_u
continue;
mark_proc_ids(hp, mp, idx);
-
idx++;
}
}
-static void __cpuinit set_proc_ids(struct mdesc_handle *hp)
+static void set_proc_ids(struct mdesc_handle *hp)
{
__set_proc_ids(hp, "exec_unit");
__set_proc_ids(hp, "exec-unit");
}
-static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
- unsigned long def, unsigned long max)
+static void get_one_mondo_bits(const u64 *p, unsigned int *mask,
+ unsigned long def, unsigned long max)
{
u64 val;
@@ -742,8 +1077,8 @@ use_default:
*mask = ((1U << def) * 64U) - 1U;
}
-static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
- struct trap_per_cpu *tb)
+static void get_mondo_data(struct mdesc_handle *hp, u64 mp,
+ struct trap_per_cpu *tb)
{
static int printed;
const u64 *val;
@@ -769,7 +1104,7 @@ static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
}
}
-static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
+static void *mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
{
struct mdesc_handle *hp = mdesc_grab();
void *ret = NULL;
@@ -799,7 +1134,8 @@ out:
return ret;
}
-static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
+static void *record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
+ void *arg)
{
ncpus_probed++;
#ifdef CONFIG_SMP
@@ -808,7 +1144,7 @@ static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpui
return NULL;
}
-void __cpuinit mdesc_populate_present_mask(cpumask_t *mask)
+void mdesc_populate_present_mask(cpumask_t *mask)
{
if (tlb_type != hypervisor)
return;
@@ -841,7 +1177,8 @@ void __init mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask)
mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask);
}
-static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
+static void *fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
+ void *arg)
{
const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
struct trap_per_cpu *tb;
@@ -890,49 +1227,86 @@ static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpu
return NULL;
}
-void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask)
+void mdesc_fill_in_cpu_data(cpumask_t *mask)
{
struct mdesc_handle *hp;
mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
-#ifdef CONFIG_SMP
- sparc64_multi_core = 1;
-#endif
-
hp = mdesc_grab();
set_core_ids(hp);
set_proc_ids(hp);
+ set_sock_ids(hp);
mdesc_release(hp);
smp_fill_in_sib_core_maps();
}
-static ssize_t mdesc_read(struct file *file, char __user *buf,
- size_t len, loff_t *offp)
+/* mdesc_open() - Grab a reference to mdesc_handle when /dev/mdesc is
+ * opened. Hold this reference until /dev/mdesc is closed to ensure
+ * mdesc data structure is not released underneath us. Store the
+ * pointer to mdesc structure in private_data for read and seek to use
+ */
+static int mdesc_open(struct inode *inode, struct file *file)
{
struct mdesc_handle *hp = mdesc_grab();
- int err;
if (!hp)
return -ENODEV;
- err = hp->handle_size;
- if (len < hp->handle_size)
- err = -EMSGSIZE;
- else if (copy_to_user(buf, &hp->mdesc, hp->handle_size))
- err = -EFAULT;
- mdesc_release(hp);
+ file->private_data = hp;
+
+ return 0;
+}
+
+static ssize_t mdesc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offp)
+{
+ struct mdesc_handle *hp = file->private_data;
+ unsigned char *mdesc;
+ int bytes_left, count = len;
+
+ if (*offp >= hp->handle_size)
+ return 0;
+
+ bytes_left = hp->handle_size - *offp;
+ if (count > bytes_left)
+ count = bytes_left;
+
+ mdesc = (unsigned char *)&hp->mdesc;
+ mdesc += *offp;
+ if (!copy_to_user(buf, mdesc, count)) {
+ *offp += count;
+ return count;
+ } else {
+ return -EFAULT;
+ }
+}
- return err;
+static loff_t mdesc_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct mdesc_handle *hp = file->private_data;
+
+ return no_seek_end_llseek_size(file, offset, whence, hp->handle_size);
+}
+
+/* mdesc_close() - /dev/mdesc is being closed, release the reference to
+ * mdesc structure.
+ */
+static int mdesc_close(struct inode *inode, struct file *file)
+{
+ mdesc_release(file->private_data);
+ return 0;
}
static const struct file_operations mdesc_fops = {
- .read = mdesc_read,
- .owner = THIS_MODULE,
- .llseek = noop_llseek,
+ .open = mdesc_open,
+ .read = mdesc_read,
+ .llseek = mdesc_llseek,
+ .release = mdesc_close,
+ .owner = THIS_MODULE,
};
static struct miscdevice mdesc_misc = {
@@ -974,5 +1348,6 @@ void __init sun4v_mdesc_init(void)
cur_mdesc = hp;
+ mdesc_adi_init();
report_platform_properties();
}
diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
index 753b4f031bfb..b5c84177521e 100644
--- a/arch/sparc/kernel/misctrap.S
+++ b/arch/sparc/kernel/misctrap.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_KGDB
.globl arch_kgdb_breakpoint
.type arch_kgdb_breakpoint,#function
@@ -18,8 +19,7 @@ __do_privact:
109: or %g7, %lo(109b), %g7
call do_privact
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __do_privact,.-__do_privact
.type do_mna,#function
@@ -46,8 +46,7 @@ do_mna:
mov %l5, %o2
call mem_address_unaligned
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_mna,.-do_mna
.type do_lddfmna,#function
@@ -65,8 +64,7 @@ do_lddfmna:
mov %l5, %o2
call handle_lddfmna
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_lddfmna,.-do_lddfmna
.type do_stdfmna,#function
@@ -84,7 +82,7 @@ do_stdfmna:
mov %l5, %o2
call handle_stdfmna
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
+ ba,a,pt %xcc, rtrap
nop
.size do_stdfmna,.-do_stdfmna
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 4435488ebe25..49740450a685 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Kernel module help for sparc64.
*
* Copyright (C) 2001 Rusty Russell.
@@ -20,36 +21,6 @@
#include "entry.h"
-#ifdef CONFIG_SPARC64
-
-#include <linux/jump_label.h>
-
-static void *module_map(unsigned long size)
-{
- if (PAGE_ALIGN(size) > MODULES_LEN)
- return NULL;
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL, -1,
- __builtin_return_address(0));
-}
-#else
-static void *module_map(unsigned long size)
-{
- return vmalloc(size);
-}
-#endif /* CONFIG_SPARC64 */
-
-void *module_alloc(unsigned long size)
-{
- void *ret;
-
- ret = module_map(size);
- if (ret)
- memset(ret, 0, size);
-
- return ret;
-}
-
/* Make generic code ignore STT_REGISTER dummy undefined symbols. */
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
@@ -116,6 +87,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
break;
#ifdef CONFIG_SPARC64
case R_SPARC_64:
+ case R_SPARC_UA64:
location[0] = v >> 56;
location[1] = v >> 48;
location[2] = v >> 40;
@@ -170,7 +142,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
break;
default:
- printk(KERN_ERR "module %s: Unknown relocation: %x\n",
+ printk(KERN_ERR "module %s: Unknown relocation: 0x%x\n",
me->name,
(int) (ELF_R_TYPE(rel[i].r_info) & 0xff));
return -ENOEXEC;
@@ -207,9 +179,6 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
- /* make jump label nops */
- jump_label_apply_nops(me);
-
do_patch_sections(hdr, sechdrs);
/* Cheetah's I-cache is fully coherent. */
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 6479256fd5a4..149adc094753 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Pseudo NMI support on sparc64 systems.
*
* Copyright (C) 2009 David S. Miller <davem@davemloft.net>
@@ -42,7 +43,7 @@ static int panic_on_timeout;
*/
atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
EXPORT_SYMBOL(nmi_active);
-
+static int nmi_init_done;
static unsigned int nmi_hz = HZ;
static DEFINE_PER_CPU(short, wd_enabled);
static int endflag __initdata;
@@ -51,7 +52,7 @@ static DEFINE_PER_CPU(unsigned int, last_irq_sum);
static DEFINE_PER_CPU(long, alert_counter);
static DEFINE_PER_CPU(int, nmi_touch);
-void touch_nmi_watchdog(void)
+void arch_touch_nmi_watchdog(void)
{
if (atomic_read(&nmi_active)) {
int cpu;
@@ -61,34 +62,26 @@ void touch_nmi_watchdog(void)
per_cpu(nmi_touch, cpu) = 1;
}
}
+}
+EXPORT_SYMBOL(arch_touch_nmi_watchdog);
- touch_softlockup_watchdog();
+int __init watchdog_hardlockup_probe(void)
+{
+ return 0;
}
-EXPORT_SYMBOL(touch_nmi_watchdog);
static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
{
+ int this_cpu = smp_processor_id();
+
if (notify_die(DIE_NMIWATCHDOG, str, regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
return;
- console_verbose();
- bust_spinlocks(1);
-
- printk(KERN_EMERG "%s", str);
- printk(" on CPU%d, ip %08lx, registers:\n",
- smp_processor_id(), regs->tpc);
- show_regs(regs);
- dump_stack();
-
- bust_spinlocks(0);
-
if (do_panic || panic_on_oops)
- panic("Non maskable interrupt");
-
- nmi_exit();
- local_irq_enable();
- do_exit(SIGBUS);
+ panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+ else
+ WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
}
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
@@ -111,20 +104,20 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
sum = local_cpu_data().irq0_irqs;
- if (__get_cpu_var(nmi_touch)) {
- __get_cpu_var(nmi_touch) = 0;
+ if (__this_cpu_read(nmi_touch)) {
+ __this_cpu_write(nmi_touch, 0);
touched = 1;
}
- if (!touched && __get_cpu_var(last_irq_sum) == sum) {
+ if (!touched && __this_cpu_read(last_irq_sum) == sum) {
__this_cpu_inc(alert_counter);
if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
die_nmi("BUG: NMI Watchdog detected LOCKUP",
regs, panic_on_timeout);
} else {
- __get_cpu_var(last_irq_sum) = sum;
+ __this_cpu_write(last_irq_sum, sum);
__this_cpu_write(alert_counter, 0);
}
- if (__get_cpu_var(wd_enabled)) {
+ if (__this_cpu_read(wd_enabled)) {
pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
}
@@ -141,7 +134,6 @@ static inline unsigned int get_nmi_count(int cpu)
static __init void nmi_cpu_busy(void *data)
{
- local_irq_enable_in_hardirq();
while (endflag == 0)
mb();
}
@@ -165,8 +157,10 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
void stop_nmi_watchdog(void *unused)
{
+ if (!__this_cpu_read(wd_enabled))
+ return;
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
- __get_cpu_var(wd_enabled) = 0;
+ __this_cpu_write(wd_enabled, 0);
atomic_dec(&nmi_active);
}
@@ -178,7 +172,8 @@ static int __init check_nmi_watchdog(void)
if (!atomic_read(&nmi_active))
return 0;
- prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
+ prev_nmi_count = kmalloc_array(nr_cpu_ids, sizeof(unsigned int),
+ GFP_KERNEL);
if (!prev_nmi_count) {
err = -ENOMEM;
goto error;
@@ -219,7 +214,10 @@ error:
void start_nmi_watchdog(void *unused)
{
- __get_cpu_var(wd_enabled) = 1;
+ if (__this_cpu_read(wd_enabled))
+ return;
+
+ __this_cpu_write(wd_enabled, 1);
atomic_inc(&nmi_active);
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
@@ -230,7 +228,7 @@ void start_nmi_watchdog(void *unused)
static void nmi_adjust_hz_one(void *unused)
{
- if (!__get_cpu_var(wd_enabled))
+ if (!__this_cpu_read(wd_enabled))
return;
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
@@ -271,6 +269,8 @@ int __init nmi_init(void)
}
}
+ nmi_init_done = 1;
+
return err;
}
@@ -279,6 +279,39 @@ static int __init setup_nmi_watchdog(char *str)
if (!strncmp(str, "panic", 5))
panic_on_timeout = 1;
- return 0;
+ return 1;
}
__setup("nmi_watchdog=", setup_nmi_watchdog);
+
+/*
+ * sparc specific NMI watchdog enable function.
+ * Enables watchdog if it is not enabled already.
+ */
+void watchdog_hardlockup_enable(unsigned int cpu)
+{
+ if (atomic_read(&nmi_active) == -1) {
+ pr_warn("NMI watchdog cannot be enabled or disabled\n");
+ return;
+ }
+
+ /*
+ * watchdog thread could start even before nmi_init is called.
+ * Just Return in that case. Let nmi_init finish the init
+ * process first.
+ */
+ if (!nmi_init_done)
+ return;
+
+ smp_call_function_single(cpu, start_nmi_watchdog, NULL, 1);
+}
+/*
+ * sparc specific NMI watchdog disable function.
+ * Disables watchdog if it is not disabled already.
+ */
+void watchdog_hardlockup_disable(unsigned int cpu)
+{
+ if (atomic_read(&nmi_active) == -1)
+ pr_warn_once("NMI watchdog cannot be enabled or disabled\n");
+ else
+ smp_call_function_single(cpu, stop_nmi_watchdog, NULL, 1);
+}
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 185aa96fa5be..284a4cafa432 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/of.h>
@@ -6,8 +7,9 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/irq.h>
-#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#include <asm/leon.h>
#include <asm/leon_amba.h>
@@ -20,14 +22,14 @@
static int of_bus_pci_match(struct device_node *np)
{
- if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) {
+ if (of_node_is_type(np, "pci") || of_node_is_type(np, "pciex")) {
/* Do not do PCI specific frobbing if the
* PCI bridge lacks a ranges property. We
* want to pass it through up to the next
* parent as-is, not with the PCI translate
* method which chops off the top address cell.
*/
- if (!of_find_property(np, "ranges", NULL))
+ if (!of_property_present(np, "ranges"))
return 0;
return 1;
@@ -105,7 +107,7 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
static int of_bus_ambapp_match(struct device_node *np)
{
- return !strcmp(np->type, "ambapp");
+ return of_node_is_type(np, "ambapp");
}
static void of_bus_ambapp_count_cells(struct device_node *child,
@@ -221,7 +223,7 @@ static int __init build_one_resource(struct device_node *parent,
static int __init use_1to1_mapping(struct device_node *pp)
{
/* If we have a ranges property in the parent, use it. */
- if (of_find_property(pp, "ranges", NULL) != NULL)
+ if (of_property_present(pp, "ranges"))
return 0;
/* Some SBUS devices use intermediate nodes to express
@@ -230,10 +232,10 @@ static int __init use_1to1_mapping(struct device_node *pp)
* But, we should still pass the translation work up
* to the SBUS itself.
*/
- if (!strcmp(pp->name, "dma") ||
- !strcmp(pp->name, "espdma") ||
- !strcmp(pp->name, "ledma") ||
- !strcmp(pp->name, "lebuffer"))
+ if (of_node_name_eq(pp, "dma") ||
+ of_node_name_eq(pp, "espdma") ||
+ of_node_name_eq(pp, "ledma") ||
+ of_node_name_eq(pp, "lebuffer"))
return 0;
return 1;
@@ -322,8 +324,8 @@ static void __init build_device_resources(struct platform_device *op,
memset(r, 0, sizeof(*r));
if (of_resource_verbose)
- printk("%s reg[%d] -> %llx\n",
- op->dev.of_node->full_name, index,
+ printk("%pOF reg[%d] -> %llx\n",
+ op->dev.of_node, index,
result);
if (result != OF_BAD_ADDR) {
@@ -331,7 +333,7 @@ static void __init build_device_resources(struct platform_device *op,
r->end = result + size - 1;
r->flags = flags | ((result >> 32ULL) & 0xffUL);
}
- r->name = op->dev.of_node->name;
+ r->name = op->dev.of_node->full_name;
}
}
@@ -380,9 +382,12 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
else
dev_set_name(&op->dev, "%08x", dp->phandle);
+ op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ op->dev.dma_mask = &op->dev.coherent_dma_mask;
+
if (of_device_register(op)) {
- printk("%s: Could not register of device.\n",
- dp->full_name);
+ printk("%pOF: Could not register of device.\n", dp);
+ put_device(&op->dev);
kfree(op);
op = NULL;
}
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 7bbdc26d9512..f53092b07b9e 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -1,14 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
#include <linux/kernel.h>
-#include <linux/of.h>
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/irq.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/spitfire.h>
#include "of_device_common.h"
@@ -44,7 +46,7 @@ EXPORT_SYMBOL(of_iounmap);
static int of_bus_pci_match(struct device_node *np)
{
- if (!strcmp(np->name, "pci")) {
+ if (of_node_name_eq(np, "pci")) {
const char *model = of_get_property(np, "model", NULL);
if (model && !strcmp(model, "SUNW,simba"))
@@ -56,7 +58,7 @@ static int of_bus_pci_match(struct device_node *np)
* parent as-is, not with the PCI translate
* method which chops off the top address cell.
*/
- if (!of_find_property(np, "ranges", NULL))
+ if (!of_property_present(np, "ranges"))
return 0;
return 1;
@@ -75,8 +77,8 @@ static int of_bus_simba_match(struct device_node *np)
/* Treat PCI busses lacking ranges property just like
* simba.
*/
- if (!strcmp(np->name, "pci")) {
- if (!of_find_property(np, "ranges", NULL))
+ if (of_node_name_eq(np, "pci")) {
+ if (!of_property_present(np, "ranges"))
return 1;
}
@@ -168,8 +170,8 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
*/
static int of_bus_fhc_match(struct device_node *np)
{
- return !strcmp(np->name, "fhc") ||
- !strcmp(np->name, "central");
+ return of_node_name_eq(np, "fhc") ||
+ of_node_name_eq(np, "central");
}
#define of_bus_fhc_count_cells of_bus_sbus_count_cells
@@ -281,7 +283,7 @@ static int __init build_one_resource(struct device_node *parent,
static int __init use_1to1_mapping(struct device_node *pp)
{
/* If we have a ranges property in the parent, use it. */
- if (of_find_property(pp, "ranges", NULL) != NULL)
+ if (of_property_present(pp, "ranges"))
return 0;
/* If the parent is the dma node of an ISA bus, pass
@@ -293,17 +295,17 @@ static int __init use_1to1_mapping(struct device_node *pp)
* But, we should still pass the translation work up
* to the SBUS itself.
*/
- if (!strcmp(pp->name, "dma") ||
- !strcmp(pp->name, "espdma") ||
- !strcmp(pp->name, "ledma") ||
- !strcmp(pp->name, "lebuffer"))
+ if (of_node_name_eq(pp, "dma") ||
+ of_node_name_eq(pp, "espdma") ||
+ of_node_name_eq(pp, "ledma") ||
+ of_node_name_eq(pp, "lebuffer"))
return 0;
/* Similarly for all PCI bridges, if we get this far
* it lacks a ranges property, and this will include
* cases like Simba.
*/
- if (!strcmp(pp->name, "pci"))
+ if (of_node_name_eq(pp, "pci"))
return 0;
return 1;
@@ -339,9 +341,9 @@ static void __init build_device_resources(struct platform_device *op,
/* Prevent overrunning the op->resources[] array. */
if (num_reg > PROMREG_MAX) {
- printk(KERN_WARNING "%s: Too many regs (%d), "
+ printk(KERN_WARNING "%pOF: Too many regs (%d), "
"limiting to %d.\n",
- op->dev.of_node->full_name, num_reg, PROMREG_MAX);
+ op->dev.of_node, num_reg, PROMREG_MAX);
num_reg = PROMREG_MAX;
}
@@ -399,8 +401,8 @@ static void __init build_device_resources(struct platform_device *op,
memset(r, 0, sizeof(*r));
if (of_resource_verbose)
- printk("%s reg[%d] -> %llx\n",
- op->dev.of_node->full_name, index,
+ printk("%pOF reg[%d] -> %llx\n",
+ op->dev.of_node, index,
result);
if (result != OF_BAD_ADDR) {
@@ -411,7 +413,7 @@ static void __init build_device_resources(struct platform_device *op,
r->end = result + size - 1;
r->flags = flags;
}
- r->name = op->dev.of_node->name;
+ r->name = op->dev.of_node->full_name;
}
}
@@ -546,8 +548,8 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
dp->irq_trans->data);
if (of_irq_verbose)
- printk("%s: direct translate %x --> %x\n",
- dp->full_name, orig_irq, irq);
+ printk("%pOF: direct translate %x --> %x\n",
+ dp, orig_irq, irq);
goto out;
}
@@ -558,7 +560,7 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
*
* If we hit a bus type or situation we cannot handle, we
* stop and assume that the original IRQ number was in a
- * format which has special meaning to it's immediate parent.
+ * format which has special meaning to its immediate parent.
*/
pp = dp->parent;
ip = NULL;
@@ -577,10 +579,9 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
&irq);
if (of_irq_verbose)
- printk("%s: Apply [%s:%x] imap --> [%s:%x]\n",
- op->dev.of_node->full_name,
- pp->full_name, this_orig_irq,
- of_node_full_name(iret), irq);
+ printk("%pOF: Apply [%pOF:%x] imap --> [%pOF:%x]\n",
+ op->dev.of_node,
+ pp, this_orig_irq, iret, irq);
if (!iret)
break;
@@ -590,15 +591,15 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
break;
}
} else {
- if (!strcmp(pp->name, "pci")) {
+ if (of_node_name_eq(pp, "pci")) {
unsigned int this_orig_irq = irq;
irq = pci_irq_swizzle(dp, pp, irq);
if (of_irq_verbose)
- printk("%s: PCI swizzle [%s] "
+ printk("%pOF: PCI swizzle [%pOF] "
"%x --> %x\n",
- op->dev.of_node->full_name,
- pp->full_name, this_orig_irq,
+ op->dev.of_node,
+ pp, this_orig_irq,
irq);
}
@@ -617,16 +618,13 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
irq = ip->irq_trans->irq_build(op->dev.of_node, irq,
ip->irq_trans->data);
if (of_irq_verbose)
- printk("%s: Apply IRQ trans [%s] %x --> %x\n",
- op->dev.of_node->full_name, ip->full_name, orig_irq, irq);
+ printk("%pOF: Apply IRQ trans [%pOF] %x --> %x\n",
+ op->dev.of_node, ip, orig_irq, irq);
out:
nid = of_node_to_nid(dp);
if (nid != -1) {
- cpumask_t numa_mask;
-
- cpumask_copy(&numa_mask, cpumask_of_node(nid));
- irq_set_affinity(irq, &numa_mask);
+ irq_set_affinity(irq, cpumask_of_node(nid));
}
return irq;
@@ -654,9 +652,9 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
/* Prevent overrunning the op->irqs[] array. */
if (op->archdata.num_irqs > PROMINTR_MAX) {
- printk(KERN_WARNING "%s: Too many irqs (%d), "
+ printk(KERN_WARNING "%pOF: Too many irqs (%d), "
"limiting to %d.\n",
- dp->full_name, op->archdata.num_irqs, PROMINTR_MAX);
+ dp, op->archdata.num_irqs, PROMINTR_MAX);
op->archdata.num_irqs = PROMINTR_MAX;
}
memcpy(op->archdata.irqs, irq, op->archdata.num_irqs * 4);
@@ -674,10 +672,12 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
dev_set_name(&op->dev, "root");
else
dev_set_name(&op->dev, "%08x", dp->phandle);
+ op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ op->dev.dma_mask = &op->dev.coherent_dma_mask;
if (of_device_register(op)) {
- printk("%s: Could not register of device.\n",
- dp->full_name);
+ printk("%pOF: Could not register of device.\n", dp);
+ put_device(&op->dev);
kfree(op);
op = NULL;
}
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
index de199bf0cb05..ba2a6ae23508 100644
--- a/arch/sparc/kernel/of_device_common.c
+++ b/arch/sparc/kernel/of_device_common.c
@@ -1,13 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
#include <linux/string.h>
#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/init.h>
#include <linux/export.h>
#include <linux/mod_devicetable.h>
#include <linux/errno.h>
#include <linux/irq.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include "of_device_common.h"
@@ -65,6 +67,7 @@ void of_propagate_archdata(struct platform_device *bus)
op->dev.archdata.stc = bus_sd->stc;
op->dev.archdata.host_controller = bus_sd->host_controller;
op->dev.archdata.numa_node = bus_sd->numa_node;
+ op->dev.dma_ops = bus->dev.dma_ops;
if (dp->child)
of_propagate_archdata(op);
@@ -150,8 +153,8 @@ int of_bus_sbus_match(struct device_node *np)
struct device_node *dp = np;
while (dp) {
- if (!strcmp(dp->name, "sbus") ||
- !strcmp(dp->name, "sbi"))
+ if (of_node_name_eq(dp, "sbus") ||
+ of_node_name_eq(dp, "sbi"))
return 1;
/* Have a look at use_1to1_mapping(). We're trying
@@ -159,7 +162,7 @@ int of_bus_sbus_match(struct device_node *np)
* don't have some intervening real bus that provides
* ranges based translations.
*/
- if (of_find_property(dp, "ranges", NULL) != NULL)
+ if (of_property_present(dp, "ranges"))
break;
dp = dp->parent;
diff --git a/arch/sparc/kernel/of_device_common.h b/arch/sparc/kernel/of_device_common.h
index cdfd23992841..3d66230c61b6 100644
--- a/arch/sparc/kernel/of_device_common.h
+++ b/arch/sparc/kernel/of_device_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _OF_DEVICE_COMMON_H
#define _OF_DEVICE_COMMON_H
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index bc4d3f5d2e5d..a9448088e762 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* pci.c: UltraSparc PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
@@ -19,15 +20,17 @@
#include <linux/irq.h>
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/pgtable.h>
+#include <linux/platform_device.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
+#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/apb.h>
#include "pci_impl.h"
+#include "kernel.h"
/* List of all PCI controllers found in the system. */
struct pci_pbm_info *pci_pbm_root = NULL;
@@ -184,8 +187,10 @@ static unsigned long pci_parse_of_flags(u32 addr0)
if (addr0 & 0x02000000) {
flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
- flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
+ if (addr0 & 0x01000000)
+ flags |= IORESOURCE_MEM_64
+ | PCI_BASE_ADDRESS_MEM_TYPE_64;
if (addr0 & 0x40000000)
flags |= IORESOURCE_PREFETCH
| PCI_BASE_ADDRESS_MEM_PREFETCH;
@@ -210,8 +215,8 @@ static void pci_parse_of_addrs(struct platform_device *op,
if (!addrs)
return;
if (ofpci_verbose)
- printk(" parse addresses (%d bytes) @ %p\n",
- proplen, addrs);
+ pci_info(dev, " parse addresses (%d bytes) @ %p\n",
+ proplen, addrs);
op_res = &op->resource[0];
for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
struct resource *res;
@@ -223,63 +228,67 @@ static void pci_parse_of_addrs(struct platform_device *op,
continue;
i = addrs[0] & 0xff;
if (ofpci_verbose)
- printk(" start: %llx, end: %llx, i: %x\n",
- op_res->start, op_res->end, i);
+ pci_info(dev, " start: %llx, end: %llx, i: %x\n",
+ op_res->start, op_res->end, i);
if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
} else if (i == dev->rom_base_reg) {
res = &dev->resource[PCI_ROM_RESOURCE];
- flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE
- | IORESOURCE_SIZEALIGN;
+ flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
} else {
- printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
+ pci_err(dev, "bad cfg reg num 0x%x\n", i);
continue;
}
res->start = op_res->start;
res->end = op_res->end;
res->flags = flags;
res->name = pci_name(dev);
+
+ pci_info(dev, "reg 0x%x: %pR\n", i, res);
}
}
+static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
+ void *stc, void *host_controller,
+ struct platform_device *op,
+ int numa_node)
+{
+ sd->iommu = iommu;
+ sd->stc = stc;
+ sd->host_controller = host_controller;
+ sd->op = op;
+ sd->numa_node = numa_node;
+}
+
static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_bus *bus, int devfn)
{
struct dev_archdata *sd;
- struct pci_slot *slot;
struct platform_device *op;
struct pci_dev *dev;
- const char *type;
u32 class;
dev = pci_alloc_dev(bus);
if (!dev)
return NULL;
+ op = of_find_device_by_node(node);
sd = &dev->dev.archdata;
- sd->iommu = pbm->iommu;
- sd->stc = &pbm->stc;
- sd->host_controller = pbm;
- sd->op = op = of_find_device_by_node(node);
- sd->numa_node = pbm->numa_node;
-
+ pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
+ pbm->numa_node);
sd = &op->dev.archdata;
sd->iommu = pbm->iommu;
sd->stc = &pbm->stc;
sd->numa_node = pbm->numa_node;
- if (!strcmp(node->name, "ebus"))
+ if (of_node_name_eq(node, "ebus"))
of_propagate_archdata(op);
- type = of_get_property(node, "device_type", NULL);
- if (type == NULL)
- type = "";
-
if (ofpci_verbose)
- printk(" create device, devfn: %x, type: %s\n",
- devfn, type);
+ pci_info(bus," create device, devfn: %x, type: %s\n",
+ devfn, of_node_get_device_type(node));
dev->sysdata = node;
dev->dev.parent = bus->bridge;
@@ -289,10 +298,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->multifunction = 0; /* maybe a lie? */
set_pcie_port_type(dev);
- list_for_each_entry(slot, &dev->bus->slots, list)
- if (PCI_SLOT(dev->devfn) == slot->number)
- dev->slot = slot;
-
+ pci_dev_assign_slot(dev);
dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
dev->device = of_getintprop_default(node, "device-id", 0xffff);
dev->subsystem_vendor =
@@ -305,7 +311,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
/* We can't actually use the firmware value, we have
* to read what is in the register right now. One
* reason is that in the case of IDE interfaces the
- * firmware can sample the value before the the IDE
+ * firmware can sample the value before the IDE
* interface is programmed into native mode.
*/
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
@@ -315,10 +321,6 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
- if (ofpci_verbose)
- printk(" class: 0x%x device name: %s\n",
- dev->class, pci_name(dev));
-
/* I have seen IDE devices which will not respond to
* the bmdma simplex check reads if bus mastering is
* disabled.
@@ -330,11 +332,11 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->error_state = pci_channel_io_normal;
dev->dma_mask = 0xffffffff;
- if (!strcmp(node->name, "pci")) {
+ if (of_node_name_eq(node, "pci")) {
/* a PCI-PCI bridge */
dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
dev->rom_base_reg = PCI_ROM_ADDRESS1;
- } else if (!strcmp(type, "cardbus")) {
+ } else if (of_node_is_type(node, "cardbus")) {
dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
} else {
dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
@@ -345,10 +347,13 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->irq = PCI_IRQ_NONE;
}
+ pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
+ dev->vendor, dev->device, dev->hdr_type, dev->class);
+
pci_parse_of_addrs(sd->op, node, dev);
if (ofpci_verbose)
- printk(" adding to system ...\n");
+ pci_info(dev, " adding to system ...\n");
pci_device_add(dev, bus);
@@ -392,15 +397,15 @@ static void apb_fake_ranges(struct pci_dev *dev,
res->flags = IORESOURCE_IO;
region.start = (first << 21);
region.end = (last << 21) + ((1 << 21) - 1);
- pcibios_bus_to_resource(dev, res, &region);
+ pcibios_bus_to_resource(dev->bus, res, &region);
pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
apb_calc_first_last(map, &first, &last);
res = bus->resource[1];
res->flags = IORESOURCE_MEM;
- region.start = (first << 21);
- region.end = (last << 21) + ((1 << 21) - 1);
- pcibios_bus_to_resource(dev, res, &region);
+ region.start = (first << 29);
+ region.end = (last << 29) + ((1 << 29) - 1);
+ pcibios_bus_to_resource(dev->bus, res, &region);
}
static void pci_of_scan_bus(struct pci_pbm_info *pbm,
@@ -422,15 +427,20 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
u64 size;
if (ofpci_verbose)
- printk("of_scan_pci_bridge(%s)\n", node->full_name);
+ pci_info(dev, "of_scan_pci_bridge(%pOF)\n", node);
/* parse bus-range property */
busrange = of_get_property(node, "bus-range", &len);
if (busrange == NULL || len != 8) {
- printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
- node->full_name);
+ pci_info(dev, "Can't get bus-range for PCI-PCI bridge %pOF\n",
+ node);
return;
}
+
+ if (ofpci_verbose)
+ pci_info(dev, " Bridge bus range [%u --> %u]\n",
+ busrange[0], busrange[1]);
+
ranges = of_get_property(node, "ranges", &len);
simba = 0;
if (ranges == NULL) {
@@ -441,8 +451,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
if (!bus) {
- printk(KERN_ERR "Failed to create pci bus for %s\n",
- node->full_name);
+ pci_err(dev, "Failed to create pci bus for %pOF\n",
+ node);
return;
}
@@ -450,6 +460,10 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
pci_bus_insert_busn_res(bus, busrange[0], busrange[1]);
bus->bridge_ctl = 0;
+ if (ofpci_verbose)
+ pci_info(dev, " Bridge ranges[%p] simba[%d]\n",
+ ranges, simba);
+
/* parse ranges property, or cook one up by hand for Simba */
/* PCI #address-cells == 3 and #size-cells == 2 always */
res = &dev->resource[PCI_BRIDGE_RESOURCES];
@@ -467,21 +481,40 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
}
i = 1;
for (; len >= 32; len -= 32, ranges += 8) {
+ u64 start;
+
+ if (ofpci_verbose)
+ pci_info(dev, " RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:"
+ "%08x:%08x]\n",
+ ranges[0], ranges[1], ranges[2], ranges[3],
+ ranges[4], ranges[5], ranges[6], ranges[7]);
+
flags = pci_parse_of_flags(ranges[0]);
size = GET_64BIT(ranges, 6);
if (flags == 0 || size == 0)
continue;
+
+ /* On PCI-Express systems, PCI bridges that have no devices downstream
+ * have a bogus size value where the first 32-bit cell is 0xffffffff.
+ * This results in a bogus range where start + size overflows.
+ *
+ * Just skip these otherwise the kernel will complain when the resource
+ * tries to be claimed.
+ */
+ if (size >> 32 == 0xffffffff)
+ continue;
+
if (flags & IORESOURCE_IO) {
res = bus->resource[0];
if (res->flags) {
- printk(KERN_ERR "PCI: ignoring extra I/O range"
- " for bridge %s\n", node->full_name);
+ pci_err(dev, "ignoring extra I/O range"
+ " for bridge %pOF\n", node);
continue;
}
} else {
if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
- printk(KERN_ERR "PCI: too many memory ranges"
- " for bridge %s\n", node->full_name);
+ pci_err(dev, "too many memory ranges"
+ " for bridge %pOF\n", node);
continue;
}
res = bus->resource[i];
@@ -489,15 +522,20 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
}
res->flags = flags;
- region.start = GET_64BIT(ranges, 1);
+ region.start = start = GET_64BIT(ranges, 1);
region.end = region.start + size - 1;
- pcibios_bus_to_resource(dev, res, &region);
+
+ if (ofpci_verbose)
+ pci_info(dev, " Using flags[%08x] start[%016llx] size[%016llx]\n",
+ flags, start, size);
+
+ pcibios_bus_to_resource(dev->bus, res, &region);
}
after_ranges:
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
if (ofpci_verbose)
- printk(" bus name: %s\n", bus->name);
+ pci_info(dev, " bus name: %s\n", bus->name);
pci_of_scan_bus(pbm, node, bus);
}
@@ -512,14 +550,13 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm,
struct pci_dev *dev;
if (ofpci_verbose)
- printk("PCI: scan_bus[%s] bus no %d\n",
- node->full_name, bus->number);
+ pci_info(bus, "scan_bus[%pOF] bus no %d\n",
+ node, bus->number);
- child = NULL;
prev_devfn = -1;
- while ((child = of_get_next_child(node, child)) != NULL) {
+ for_each_child_of_node(node, child) {
if (ofpci_verbose)
- printk(" * %s\n", child->full_name);
+ pci_info(bus, " * %pOF\n", child);
reg = of_get_property(child, "reg", &reglen);
if (reg == NULL || reglen < 20)
continue;
@@ -540,11 +577,9 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm,
if (!dev)
continue;
if (ofpci_verbose)
- printk("PCI: dev header type: %x\n",
- dev->hdr_type);
+ pci_info(dev, "dev header type: %x\n", dev->hdr_type);
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ if (pci_is_bridge(dev))
of_scan_pci_bridge(pbm, child, dev);
}
}
@@ -558,7 +593,7 @@ show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char *
pdev = to_pci_dev(dev);
dp = pdev->dev.of_node;
- return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name);
+ return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp);
}
static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
@@ -584,6 +619,72 @@ static void pci_bus_register_of_sysfs(struct pci_bus *bus)
pci_bus_register_of_sysfs(child_bus);
}
+static void pci_claim_legacy_resources(struct pci_dev *dev)
+{
+ struct pci_bus_region region;
+ struct resource *p, *root, *conflict;
+
+ if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ return;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return;
+
+ p->name = "Video RAM area";
+ p->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+ region.start = 0xa0000UL;
+ region.end = region.start + 0x1ffffUL;
+ pcibios_bus_to_resource(dev->bus, p, &region);
+
+ root = pci_find_parent_resource(dev, p);
+ if (!root) {
+ pci_info(dev, "can't claim VGA legacy %pR: no compatible bridge window\n", p);
+ goto err;
+ }
+
+ conflict = request_resource_conflict(root, p);
+ if (conflict) {
+ pci_info(dev, "can't claim VGA legacy %pR: address conflict with %s %pR\n",
+ p, conflict->name, conflict);
+ goto err;
+ }
+
+ pci_info(dev, "VGA legacy framebuffer %pR\n", p);
+ return;
+
+err:
+ kfree(p);
+}
+
+static void pci_claim_bus_resources(struct pci_bus *bus)
+{
+ struct pci_bus *child_bus;
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ struct resource *r;
+ int i;
+
+ pci_dev_for_each_resource(dev, r, i) {
+ if (r->parent || !r->start || !r->flags)
+ continue;
+
+ if (ofpci_verbose)
+ pci_info(dev, "Claiming Resource %d: %pR\n",
+ i, r);
+
+ pci_claim_resource(dev, i);
+ }
+
+ pci_claim_legacy_resources(dev);
+ }
+
+ list_for_each_entry(child_bus, &bus->children, node)
+ pci_claim_bus_resources(child_bus);
+}
+
struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
struct device *parent)
{
@@ -591,12 +692,15 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
struct device_node *node = pbm->op->dev.of_node;
struct pci_bus *bus;
- printk("PCI: Scanning PBM %s\n", node->full_name);
+ printk("PCI: Scanning PBM %pOF\n", node);
pci_add_resource_offset(&resources, &pbm->io_space,
- pbm->io_space.start);
+ pbm->io_offset);
pci_add_resource_offset(&resources, &pbm->mem_space,
- pbm->mem_space.start);
+ pbm->mem_offset);
+ if (pbm->mem64_space.flags)
+ pci_add_resource_offset(&resources, &pbm->mem64_space,
+ pbm->mem64_offset);
pbm->busn.start = pbm->pci_first_busno;
pbm->busn.end = pbm->pci_last_busno;
pbm->busn.flags = IORESOURCE_BUS;
@@ -604,210 +708,30 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
bus = pci_create_root_bus(parent, pbm->pci_first_busno, pbm->pci_ops,
pbm, &resources);
if (!bus) {
- printk(KERN_ERR "Failed to create bus for %s\n",
- node->full_name);
+ printk(KERN_ERR "Failed to create bus for %pOF\n", node);
pci_free_resource_list(&resources);
return NULL;
}
pci_of_scan_bus(pbm, node, bus);
- pci_bus_add_devices(bus);
pci_bus_register_of_sysfs(bus);
- return bus;
-}
-
-void pcibios_fixup_bus(struct pci_bus *pbus)
-{
-}
-
-resource_size_t pcibios_align_resource(void *data, const struct resource *res,
- resource_size_t size, resource_size_t align)
-{
- return res->start;
-}
-
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- u16 cmd, oldcmd;
- int i;
+ pci_claim_bus_resources(bus);
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- oldcmd = cmd;
-
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- struct resource *res = &dev->resource[i];
-
- /* Only set up the requested stuff */
- if (!(mask & (1<<i)))
- continue;
-
- if (res->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (res->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
-
- if (cmd != oldcmd) {
- printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
- pci_name(dev), cmd);
- /* Enable the appropriate bits in the PCI command register. */
- pci_write_config_word(dev, PCI_COMMAND, cmd);
- }
- return 0;
+ pci_bus_add_devices(bus);
+ return bus;
}
/* Platform support for /proc/bus/pci/X/Y mmap()s. */
-
-/* If the user uses a host-bridge as the PCI device, he may use
- * this to perform a raw mmap() of the I/O or MEM space behind
- * that controller.
- *
- * This can be useful for execution of x86 PCI bios initialization code
- * on a PCI card, like the xfree86 int10 stuff does.
- */
-static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state)
+int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
- unsigned long space_size, user_offset, user_size;
-
- if (mmap_state == pci_mmap_io) {
- space_size = resource_size(&pbm->io_space);
- } else {
- space_size = resource_size(&pbm->mem_space);
- }
-
- /* Make sure the request is in range. */
- user_offset = vma->vm_pgoff << PAGE_SHIFT;
- user_size = vma->vm_end - vma->vm_start;
+ resource_size_t ioaddr = pci_resource_start(pdev, bar);
- if (user_offset >= space_size ||
- (user_offset + user_size) > space_size)
+ if (!pbm)
return -EINVAL;
- if (mmap_state == pci_mmap_io) {
- vma->vm_pgoff = (pbm->io_space.start +
- user_offset) >> PAGE_SHIFT;
- } else {
- vma->vm_pgoff = (pbm->mem_space.start +
- user_offset) >> PAGE_SHIFT;
- }
-
- return 0;
-}
-
-/* Adjust vm_pgoff of VMA such that it is the physical page offset
- * corresponding to the 32-bit pci bus offset for DEV requested by the user.
- *
- * Basically, the user finds the base address for his device which he wishes
- * to mmap. They read the 32-bit value from the config space base register,
- * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
- * offset parameter of mmap on /proc/bus/pci/XXX for that device.
- *
- * Returns negative error code on failure, zero on success.
- */
-static int __pci_mmap_make_offset(struct pci_dev *pdev,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state)
-{
- unsigned long user_paddr, user_size;
- int i, err;
-
- /* First compute the physical address in vma->vm_pgoff,
- * making sure the user offset is within range in the
- * appropriate PCI space.
- */
- err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state);
- if (err)
- return err;
-
- /* If this is a mapping on a host bridge, any address
- * is OK.
- */
- if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
- return err;
-
- /* Otherwise make sure it's in the range for one of the
- * device's resources.
- */
- user_paddr = vma->vm_pgoff << PAGE_SHIFT;
- user_size = vma->vm_end - vma->vm_start;
-
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- struct resource *rp = &pdev->resource[i];
- resource_size_t aligned_end;
-
- /* Active? */
- if (!rp->flags)
- continue;
-
- /* Same type? */
- if (i == PCI_ROM_RESOURCE) {
- if (mmap_state != pci_mmap_mem)
- continue;
- } else {
- if ((mmap_state == pci_mmap_io &&
- (rp->flags & IORESOURCE_IO) == 0) ||
- (mmap_state == pci_mmap_mem &&
- (rp->flags & IORESOURCE_MEM) == 0))
- continue;
- }
-
- /* Align the resource end to the next page address.
- * PAGE_SIZE intentionally added instead of (PAGE_SIZE - 1),
- * because actually we need the address of the next byte
- * after rp->end.
- */
- aligned_end = (rp->end + PAGE_SIZE) & PAGE_MASK;
-
- if ((rp->start <= user_paddr) &&
- (user_paddr + user_size) <= aligned_end)
- break;
- }
-
- if (i > PCI_ROM_RESOURCE)
- return -EINVAL;
-
- return 0;
-}
-
-/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state)
-{
- /* Our io_remap_pfn_range takes care of this, do nothing. */
-}
-
-/* Perform the actual remap of the pages for a PCI device mapping, as appropriate
- * for this architecture. The region in the process to map is described by vm_start
- * and vm_end members of VMA, the base physical address is found in vm_pgoff.
- * The pci device structure is provided so that architectures may make mapping
- * decisions on a per-device or per-bus basis.
- *
- * Returns a negative error code on failure, zero on success.
- */
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state,
- int write_combine)
-{
- int ret;
-
- ret = __pci_mmap_make_offset(dev, vma, mmap_state);
- if (ret < 0)
- return ret;
-
- __pci_mmap_set_pgprot(dev, vma, mmap_state);
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- ret = io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- if (ret)
- return ret;
+ vma->vm_pgoff += (ioaddr + pbm->io_space.start) >> PAGE_SHIFT;
return 0;
}
@@ -854,7 +778,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
void arch_teardown_msi_irq(unsigned int irq)
{
struct msi_desc *entry = irq_get_msi_desc(irq);
- struct pci_dev *pdev = entry->dev;
+ struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
if (pbm->teardown_msi_irq)
@@ -862,67 +786,53 @@ void arch_teardown_msi_irq(unsigned int irq)
}
#endif /* !(CONFIG_PCI_MSI) */
-static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
+/* ALI sound chips generate 31-bits of DMA, a special register
+ * determines what bit 31 is emitted as.
+ */
+int ali_sound_dma_hack(struct device *dev, u64 device_mask)
{
+ struct iommu *iommu = dev->archdata.iommu;
struct pci_dev *ali_isa_bridge;
u8 val;
- /* ALI sound chips generate 31-bits of DMA, a special register
- * determines what bit 31 is emitted as.
- */
+ if (!dev_is_pci(dev))
+ return 0;
+
+ if (to_pci_dev(dev)->vendor != PCI_VENDOR_ID_AL ||
+ to_pci_dev(dev)->device != PCI_DEVICE_ID_AL_M5451 ||
+ device_mask != 0x7fffffff)
+ return 0;
+
ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
PCI_DEVICE_ID_AL_M1533,
NULL);
pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
- if (set_bit)
+ if (iommu->dma_addr_mask & 0x80000000)
val |= 0x01;
else
val &= ~0x01;
pci_write_config_byte(ali_isa_bridge, 0x7e, val);
pci_dev_put(ali_isa_bridge);
-}
-
-int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask)
-{
- u64 dma_addr_mask;
-
- if (pdev == NULL) {
- dma_addr_mask = 0xffffffff;
- } else {
- struct iommu *iommu = pdev->dev.archdata.iommu;
-
- dma_addr_mask = iommu->dma_addr_mask;
-
- if (pdev->vendor == PCI_VENDOR_ID_AL &&
- pdev->device == PCI_DEVICE_ID_AL_M5451 &&
- device_mask == 0x7fffffff) {
- ali_sound_dma_hack(pdev,
- (dma_addr_mask & 0x80000000) != 0);
- return 1;
- }
- }
-
- if (device_mask >= (1UL << 32UL))
- return 0;
-
- return (device_mask & dma_addr_mask) == dma_addr_mask;
+ return 1;
}
void pci_resource_to_user(const struct pci_dev *pdev, int bar,
const struct resource *rp, resource_size_t *start,
resource_size_t *end)
{
- struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
- unsigned long offset;
-
- if (rp->flags & IORESOURCE_IO)
- offset = pbm->io_space.start;
- else
- offset = pbm->mem_space.start;
+ struct pci_bus_region region;
- *start = rp->start - offset;
- *end = rp->end - offset;
+ /*
+ * "User" addresses are shown in /sys/devices/pci.../.../resource
+ * and /proc/bus/pci/devices and used as mmap offsets for
+ * /proc/bus/pci/BB/DD.F files (see proc_bus_pci_mmap()).
+ *
+ * On sparc, these are PCI bus addresses, i.e., raw BAR values.
+ */
+ pcibios_resource_to_bus(pdev->bus, &region, (struct resource *) rp);
+ *start = region.start;
+ *end = region.end;
}
void pcibios_set_master(struct pci_dev *dev)
@@ -930,6 +840,27 @@ void pcibios_set_master(struct pci_dev *dev)
/* No special bus mastering setup handling */
}
+#ifdef CONFIG_PCI_IOV
+int pcibios_device_add(struct pci_dev *dev)
+{
+ struct pci_dev *pdev;
+
+ /* Add sriov arch specific initialization here.
+ * Copy dev_archdata from PF to VF
+ */
+ if (dev->is_virtfn) {
+ struct dev_archdata *psd;
+
+ pdev = dev->physfn;
+ psd = &pdev->dev.archdata;
+ pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
+ psd->stc, psd->host_controller, NULL,
+ psd->numa_node);
+ }
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
static int __init pcibios_init(void)
{
pci_dfl_cache_line_size = 64 >> 2;
@@ -938,11 +869,43 @@ static int __init pcibios_init(void)
subsys_initcall(pcibios_init);
#ifdef CONFIG_SYSFS
+
+#define SLOT_NAME_SIZE 11 /* Max decimal digits + null in u32 */
+
+static void pcie_bus_slot_names(struct pci_bus *pbus)
+{
+ struct pci_dev *pdev;
+ struct pci_bus *bus;
+
+ list_for_each_entry(pdev, &pbus->devices, bus_list) {
+ char name[SLOT_NAME_SIZE];
+ struct pci_slot *pci_slot;
+ const u32 *slot_num;
+ int len;
+
+ slot_num = of_get_property(pdev->dev.of_node,
+ "physical-slot#", &len);
+
+ if (slot_num == NULL || len != 4)
+ continue;
+
+ snprintf(name, sizeof(name), "%u", slot_num[0]);
+ pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL);
+
+ if (IS_ERR(pci_slot))
+ pr_err("PCI: pci_create_slot returned %ld.\n",
+ PTR_ERR(pci_slot));
+ }
+
+ list_for_each_entry(bus, &pbus->children, node)
+ pcie_bus_slot_names(bus);
+}
+
static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
{
const struct pci_slot_names {
u32 slot_mask;
- char names[0];
+ char names[];
} *prop;
const char *sp;
int len, i;
@@ -956,8 +919,8 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
sp = prop->names;
if (ofpci_verbose)
- printk("PCI: Making slots for [%s] mask[0x%02x]\n",
- node->full_name, mask);
+ pci_info(bus, "Making slots for [%pOF] mask[0x%02x]\n",
+ node, mask);
i = 0;
while (mask) {
@@ -970,12 +933,12 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
}
if (ofpci_verbose)
- printk("PCI: Making slot [%s]\n", sp);
+ pci_info(bus, "Making slot [%s]\n", sp);
pci_slot = pci_create_slot(bus, i, sp, NULL);
if (IS_ERR(pci_slot))
- printk(KERN_ERR "PCI: pci_create_slot returned %ld\n",
- PTR_ERR(pci_slot));
+ pci_err(bus, "pci_create_slot returned %ld\n",
+ PTR_ERR(pci_slot));
sp += strlen(sp) + 1;
mask &= ~this_bit;
@@ -989,22 +952,32 @@ static int __init of_pci_slot_init(void)
while ((pbus = pci_find_next_bus(pbus)) != NULL) {
struct device_node *node;
+ struct pci_dev *pdev;
+
+ pdev = list_first_entry(&pbus->devices, struct pci_dev,
+ bus_list);
- if (pbus->self) {
- /* PCI->PCI bridge */
- node = pbus->self->dev.of_node;
+ if (pdev && pci_is_pcie(pdev)) {
+ pcie_bus_slot_names(pbus);
} else {
- struct pci_pbm_info *pbm = pbus->sysdata;
- /* Host PCI controller */
- node = pbm->op->dev.of_node;
- }
+ if (pbus->self) {
- pci_bus_slot_names(node, pbus);
+ /* PCI->PCI bridge */
+ node = pbus->self->dev.of_node;
+
+ } else {
+ struct pci_pbm_info *pbm = pbus->sysdata;
+
+ /* Host PCI controller */
+ node = pbm->op->dev.of_node;
+ }
+
+ pci_bus_slot_names(node, pbus);
+ }
}
return 0;
}
-
-module_init(of_pci_slot_init);
+device_initcall(of_pci_slot_init);
#endif
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index a6895987fb70..2576f4f31309 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* pci_common.c: PCI controller common support.
*
* Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
@@ -5,10 +6,10 @@
#include <linux/string.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/device.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <asm/prom.h>
#include <asm/oplib.h>
@@ -329,43 +330,6 @@ void pci_get_pbm_props(struct pci_pbm_info *pbm)
}
}
-static void pci_register_legacy_regions(struct resource *io_res,
- struct resource *mem_res)
-{
- struct resource *p;
-
- /* VGA Video RAM. */
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return;
-
- p->name = "Video RAM area";
- p->start = mem_res->start + 0xa0000UL;
- p->end = p->start + 0x1ffffUL;
- p->flags = IORESOURCE_BUSY;
- request_resource(mem_res, p);
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return;
-
- p->name = "System ROM";
- p->start = mem_res->start + 0xf0000UL;
- p->end = p->start + 0xffffUL;
- p->flags = IORESOURCE_BUSY;
- request_resource(mem_res, p);
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return;
-
- p->name = "Video ROM";
- p->start = mem_res->start + 0xc0000UL;
- p->end = p->start + 0x7fffUL;
- p->flags = IORESOURCE_BUSY;
- request_resource(mem_res, p);
-}
-
static void pci_register_iommu_region(struct pci_pbm_info *pbm)
{
const u32 *vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma",
@@ -397,6 +361,8 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
int i, saw_mem, saw_io;
int num_pbm_ranges;
+ /* Corresponds to generic devm_of_pci_get_host_bridge_resources() */
+
saw_mem = saw_io = 0;
pbm_ranges = of_get_property(pbm->op->dev.of_node, "ranges", &i);
if (!pbm_ranges) {
@@ -407,16 +373,20 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
}
num_pbm_ranges = i / sizeof(*pbm_ranges);
+ memset(&pbm->mem64_space, 0, sizeof(struct resource));
for (i = 0; i < num_pbm_ranges; i++) {
const struct linux_prom_pci_ranges *pr = &pbm_ranges[i];
- unsigned long a, size;
+ unsigned long a, size, region_a;
u32 parent_phys_hi, parent_phys_lo;
+ u32 child_phys_mid, child_phys_lo;
u32 size_hi, size_lo;
int type;
parent_phys_hi = pr->parent_phys_hi;
parent_phys_lo = pr->parent_phys_lo;
+ child_phys_mid = pr->child_phys_mid;
+ child_phys_lo = pr->child_phys_lo;
if (tlb_type == hypervisor)
parent_phys_hi &= 0x0fffffff;
@@ -426,6 +396,8 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
type = (pr->child_phys_hi >> 24) & 0x3;
a = (((unsigned long)parent_phys_hi << 32UL) |
((unsigned long)parent_phys_lo << 0UL));
+ region_a = (((unsigned long)child_phys_mid << 32UL) |
+ ((unsigned long)child_phys_lo << 0UL));
size = (((unsigned long)size_hi << 32UL) |
((unsigned long)size_lo << 0UL));
@@ -440,6 +412,7 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
pbm->io_space.start = a;
pbm->io_space.end = a + size - 1UL;
pbm->io_space.flags = IORESOURCE_IO;
+ pbm->io_offset = a - region_a;
saw_io = 1;
break;
@@ -448,11 +421,18 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
pbm->mem_space.start = a;
pbm->mem_space.end = a + size - 1UL;
pbm->mem_space.flags = IORESOURCE_MEM;
+ pbm->mem_offset = a - region_a;
saw_mem = 1;
break;
case 3:
- /* XXX 64-bit MEM handling XXX */
+ /* 64-bit MEM handling */
+ pbm->mem64_space.start = a;
+ pbm->mem64_space.end = a + size - 1UL;
+ pbm->mem64_space.flags = IORESOURCE_MEM;
+ pbm->mem64_offset = a - region_a;
+ saw_mem = 1;
+ break;
default:
break;
@@ -466,18 +446,31 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
prom_halt();
}
- printk("%s: PCI IO[%llx] MEM[%llx]\n",
- pbm->name,
- pbm->io_space.start,
- pbm->mem_space.start);
+ if (pbm->io_space.flags)
+ printk("%s: PCI IO %pR offset %llx\n",
+ pbm->name, &pbm->io_space, pbm->io_offset);
+ if (pbm->mem_space.flags)
+ printk("%s: PCI MEM %pR offset %llx\n",
+ pbm->name, &pbm->mem_space, pbm->mem_offset);
+ if (pbm->mem64_space.flags && pbm->mem_space.flags) {
+ if (pbm->mem64_space.start <= pbm->mem_space.end)
+ pbm->mem64_space.start = pbm->mem_space.end + 1;
+ if (pbm->mem64_space.start > pbm->mem64_space.end)
+ pbm->mem64_space.flags = 0;
+ }
+
+ if (pbm->mem64_space.flags)
+ printk("%s: PCI MEM64 %pR offset %llx\n",
+ pbm->name, &pbm->mem64_space, pbm->mem64_offset);
pbm->io_space.name = pbm->mem_space.name = pbm->name;
+ pbm->mem64_space.name = pbm->name;
request_resource(&ioport_resource, &pbm->io_space);
request_resource(&iomem_resource, &pbm->mem_space);
+ if (pbm->mem64_space.flags)
+ request_resource(&iomem_resource, &pbm->mem64_space);
- pci_register_legacy_regions(&pbm->io_space,
- &pbm->mem_space);
pci_register_iommu_region(pbm);
}
@@ -497,8 +490,8 @@ void pci_scan_for_target_abort(struct pci_pbm_info *pbm,
PCI_STATUS_REC_TARGET_ABORT));
if (error_bits) {
pci_write_config_word(pdev, PCI_STATUS, error_bits);
- printk("%s: Device %s saw Target Abort [%016x]\n",
- pbm->name, pci_name(pdev), status);
+ pci_info(pdev, "%s: Device saw Target Abort [%016x]\n",
+ pbm->name, status);
}
}
@@ -520,8 +513,8 @@ void pci_scan_for_master_abort(struct pci_pbm_info *pbm,
(status & (PCI_STATUS_REC_MASTER_ABORT));
if (error_bits) {
pci_write_config_word(pdev, PCI_STATUS, error_bits);
- printk("%s: Device %s received Master Abort [%016x]\n",
- pbm->name, pci_name(pdev), status);
+ pci_info(pdev, "%s: Device received Master Abort "
+ "[%016x]\n", pbm->name, status);
}
}
@@ -544,8 +537,8 @@ void pci_scan_for_parity_error(struct pci_pbm_info *pbm,
PCI_STATUS_DETECTED_PARITY));
if (error_bits) {
pci_write_config_word(pdev, PCI_STATUS, error_bits);
- printk("%s: Device %s saw Parity Error [%016x]\n",
- pbm->name, pci_name(pdev), status);
+ pci_info(pdev, "%s: Device saw Parity Error [%016x]\n",
+ pbm->name, status);
}
}
diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c
index e60fc6a67e9b..0b91bde80fdc 100644
--- a/arch/sparc/kernel/pci_fire.c
+++ b/arch/sparc/kernel/pci_fire.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* pci_fire.c: Sun4u platform PCI-E controller support.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
@@ -9,7 +10,9 @@
#include <linux/msi.h>
#include <linux/export.h>
#include <linux/irq.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/numa.h>
#include <asm/prom.h>
#include <asm/irq.h>
@@ -415,7 +418,7 @@ static int pci_fire_pbm_init(struct pci_pbm_info *pbm,
struct device_node *dp = op->dev.of_node;
int err;
- pbm->numa_node = -1;
+ pbm->numa_node = NUMA_NO_NODE;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 12;
@@ -508,7 +511,6 @@ static const struct of_device_id fire_match[] = {
static struct platform_driver fire_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = fire_match,
},
.probe = fire_probe,
diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h
index 5f688531f48c..83718876f1d4 100644
--- a/arch/sparc/kernel/pci_impl.h
+++ b/arch/sparc/kernel/pci_impl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* pci_impl.h: Helper definitions for PCI controller support.
*
* Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
@@ -10,7 +11,6 @@
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/msi.h>
-#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
@@ -19,9 +19,9 @@
* each with one (Sabre) or two (PSYCHO/SCHIZO) PCI bus modules
* underneath. Each PCI bus module uses an IOMMU (shared by both
* PBMs of a controller, or per-PBM), and if a streaming buffer
- * is present, each PCI bus module has it's own. (ie. the IOMMU
+ * is present, each PCI bus module has its own. (ie. the IOMMU
* might be shared between PBMs, the STC is never shared)
- * Furthermore, each PCI bus module controls it's own autonomous
+ * Furthermore, each PCI bus module controls its own autonomous
* PCI bus.
*/
@@ -48,8 +48,8 @@ struct sparc64_msiq_ops {
unsigned long devino);
};
-extern void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
- const struct sparc64_msiq_ops *ops);
+void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
+ const struct sparc64_msiq_ops *ops);
struct sparc64_msiq_cookie {
struct pci_pbm_info *pbm;
@@ -97,7 +97,12 @@ struct pci_pbm_info {
/* PBM I/O and Memory space resources. */
struct resource io_space;
struct resource mem_space;
+ struct resource mem64_space;
struct resource busn;
+ /* offset */
+ resource_size_t io_offset;
+ resource_size_t mem_offset;
+ resource_size_t mem64_offset;
/* Base of PCI Config space, can be per-PBM or shared. */
unsigned long config_space;
@@ -158,23 +163,23 @@ extern struct pci_pbm_info *pci_pbm_root;
extern int pci_num_pbms;
/* PCI bus scanning and fixup support. */
-extern void pci_get_pbm_props(struct pci_pbm_info *pbm);
-extern struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
- struct device *parent);
-extern void pci_determine_mem_io_space(struct pci_pbm_info *pbm);
+void pci_get_pbm_props(struct pci_pbm_info *pbm);
+struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
+ struct device *parent);
+void pci_determine_mem_io_space(struct pci_pbm_info *pbm);
/* Error reporting support. */
-extern void pci_scan_for_target_abort(struct pci_pbm_info *, struct pci_bus *);
-extern void pci_scan_for_master_abort(struct pci_pbm_info *, struct pci_bus *);
-extern void pci_scan_for_parity_error(struct pci_pbm_info *, struct pci_bus *);
+void pci_scan_for_target_abort(struct pci_pbm_info *, struct pci_bus *);
+void pci_scan_for_master_abort(struct pci_pbm_info *, struct pci_bus *);
+void pci_scan_for_parity_error(struct pci_pbm_info *, struct pci_bus *);
/* Configuration space access. */
-extern void pci_config_read8(u8 *addr, u8 *ret);
-extern void pci_config_read16(u16 *addr, u16 *ret);
-extern void pci_config_read32(u32 *addr, u32 *ret);
-extern void pci_config_write8(u8 *addr, u8 val);
-extern void pci_config_write16(u16 *addr, u16 val);
-extern void pci_config_write32(u32 *addr, u32 val);
+void pci_config_read8(u8 *addr, u8 *ret);
+void pci_config_read16(u16 *addr, u16 *ret);
+void pci_config_read32(u32 *addr, u32 *ret);
+void pci_config_write8(u8 *addr, u8 val);
+void pci_config_write16(u16 *addr, u16 val);
+void pci_config_write32(u32 *addr, u32 val);
extern struct pci_ops sun4u_pci_ops;
extern struct pci_ops sun4v_pci_ops;
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index 580651af73f2..acb2f83a1d5c 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -1,9 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/* pci_msi.c: Sparc64 MSI support common layer.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/irq.h>
@@ -111,10 +114,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num)
static struct irq_chip msi_irq = {
.name = "PCI-MSI",
- .irq_mask = mask_msi_irq,
- .irq_unmask = unmask_msi_irq,
- .irq_enable = unmask_msi_irq,
- .irq_disable = mask_msi_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
/* XXX affinity XXX */
};
@@ -145,13 +148,13 @@ static int sparc64_setup_msi_irq(unsigned int *irq_p,
msiqid = pick_msiq(pbm);
err = ops->msi_setup(pbm, msiqid, msi,
- (entry->msi_attrib.is_64 ? 1 : 0));
+ (entry->pci.msi_attrib.is_64 ? 1 : 0));
if (err)
goto out_msi_free;
pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
- if (entry->msi_attrib.is_64) {
+ if (entry->pci.msi_attrib.is_64) {
msg.address_hi = pbm->msi64_start >> 32;
msg.address_lo = pbm->msi64_start & 0xffffffff;
} else {
@@ -161,7 +164,7 @@ static int sparc64_setup_msi_irq(unsigned int *irq_p,
msg.data = msi;
irq_set_msi_desc(*irq_p, entry);
- write_msi_msg(*irq_p, &msg);
+ pci_write_msi_msg(*irq_p, &msg);
return 0;
@@ -190,8 +193,8 @@ static void sparc64_teardown_msi_irq(unsigned int irq,
break;
}
if (i >= pbm->msi_num) {
- printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
- pbm->name, irq);
+ pci_err(pdev, "%s: teardown: No MSI for irq %u\n", pbm->name,
+ irq);
return;
}
@@ -200,9 +203,9 @@ static void sparc64_teardown_msi_irq(unsigned int irq,
err = ops->msi_teardown(pbm, msi_num);
if (err) {
- printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
- "irq %u, gives error %d\n",
- pbm->name, msi_num, irq, err);
+ pci_err(pdev, "%s: teardown: ops->teardown() on MSI %u, "
+ "irq %u, gives error %d\n", pbm->name, msi_num, irq,
+ err);
return;
}
@@ -284,10 +287,7 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
nid = pbm->numa_node;
if (nid != -1) {
- cpumask_t numa_mask;
-
- cpumask_copy(&numa_mask, cpumask_of_node(nid));
- irq_set_affinity(irq, &numa_mask);
+ irq_set_affinity(irq, cpumask_of_node(nid));
}
err = request_irq(irq, sparc64_msiq_interrupt, 0,
"MSIQ",
diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c
index c647634ead2b..1efc98305ec7 100644
--- a/arch/sparc/kernel/pci_psycho.c
+++ b/arch/sparc/kernel/pci_psycho.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* pci_psycho.c: PSYCHO/U2P specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net)
@@ -12,7 +13,9 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/iommu.h>
#include <asm/irq.h>
@@ -604,7 +607,6 @@ static const struct of_device_id psycho_match[] = {
static struct platform_driver psycho_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = psycho_match,
},
.probe = psycho_probe,
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c
index 6f00d27e8dac..a84598568300 100644
--- a/arch/sparc/kernel/pci_sabre.c
+++ b/arch/sparc/kernel/pci_sabre.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* pci_sabre.c: Sabre specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net)
@@ -12,7 +13,10 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <asm/apb.h>
#include <asm/iommu.h>
@@ -455,7 +459,6 @@ static void sabre_pbm_init(struct pci_pbm_info *pbm,
static const struct of_device_id sabre_match[];
static int sabre_probe(struct platform_device *op)
{
- const struct of_device_id *match;
const struct linux_prom64_registers *pr_regs;
struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
@@ -465,8 +468,7 @@ static int sabre_probe(struct platform_device *op)
const u32 *vdma;
u64 clear_irq;
- match = of_match_device(sabre_match, &op->dev);
- hummingbird_p = match && (match->data != NULL);
+ hummingbird_p = (uintptr_t)device_get_match_data(&op->dev);
if (!hummingbird_p) {
struct device_node *cpu_dp;
@@ -474,7 +476,7 @@ static int sabre_probe(struct platform_device *op)
* different ways, inconsistently.
*/
for_each_node_by_type(cpu_dp, "cpu") {
- if (!strcmp(cpu_dp->name, "SUNW,UltraSPARC-IIe"))
+ if (of_node_name_eq(cpu_dp, "SUNW,UltraSPARC-IIe"))
hummingbird_p = 1;
}
}
@@ -600,7 +602,6 @@ static const struct of_device_id sabre_match[] = {
static struct platform_driver sabre_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = sabre_match,
},
.probe = sabre_probe,
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 8f76f23dac38..93cd9e5a8099 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
*
* Copyright (C) 2001, 2002, 2003, 2007, 2008 David S. Miller (davem@davemloft.net)
@@ -10,7 +11,11 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/numa.h>
#include <asm/iommu.h>
#include <asm/irq.h>
@@ -140,7 +145,7 @@ static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
/* This is __REALLY__ dangerous. When we put the
* streaming buffer into diagnostic mode to probe
- * it's tags and error status, we _must_ clear all
+ * its tags and error status, we _must_ clear all
* of the line tag valid bits before re-enabling
* the streaming buffer. If any dirty data lives
* in the STC when we do this, we will end up
@@ -270,7 +275,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
pbm->name, type_string);
/* Put the IOMMU into diagnostic mode and probe
- * it's TLB for entries with error status.
+ * its TLB for entries with error status.
*
* It is very possible for another DVMA to occur
* while we do this probe, and corrupt the system
@@ -581,7 +586,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
{
unsigned long csr_reg, csr, csr_error_bits;
irqreturn_t ret = IRQ_NONE;
- u16 stat;
+ u32 stat;
csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
csr = upa_readq(csr_reg);
@@ -617,7 +622,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
pbm->name);
ret = IRQ_HANDLED;
}
- pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
+ pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat);
if (stat & (PCI_STATUS_PARITY |
PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_TARGET_ABORT |
@@ -625,7 +630,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
PCI_STATUS_SIG_SYSTEM_ERROR)) {
printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
pbm->name, stat);
- pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
+ pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff);
ret = IRQ_HANDLED;
}
return ret;
@@ -1268,7 +1273,7 @@ static void schizo_pbm_hw_init(struct pci_pbm_info *pbm)
pbm->chip_version >= 0x2)
tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT;
- if (!of_find_property(pbm->op->dev.of_node, "no-bus-parking", NULL))
+ if (!of_property_read_bool(pbm->op->dev.of_node, "no-bus-parking"))
tmp |= SCHIZO_PCICTRL_PARK;
else
tmp &= ~SCHIZO_PCICTRL_PARK;
@@ -1346,7 +1351,7 @@ static int schizo_pbm_init(struct pci_pbm_info *pbm,
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
- pbm->numa_node = -1;
+ pbm->numa_node = NUMA_NO_NODE;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 8;
@@ -1457,15 +1462,13 @@ out_err:
return err;
}
-static const struct of_device_id schizo_match[];
static int schizo_probe(struct platform_device *op)
{
- const struct of_device_id *match;
+ unsigned long chip_type = (unsigned long)device_get_match_data(&op->dev);
- match = of_match_device(schizo_match, &op->dev);
- if (!match)
+ if (!chip_type)
return -EINVAL;
- return __schizo_init(op, (unsigned long)match->data);
+ return __schizo_init(op, chip_type);
}
/* The ordering of this table is very important. Some Tomatillo
@@ -1495,7 +1498,6 @@ static const struct of_device_id schizo_match[] = {
static struct platform_driver schizo_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = schizo_match,
},
.probe = schizo_probe,
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index d07f6b29aed8..791f0a76665f 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* pci_sun4v.c: SUN4V specific PCI controller support.
*
* Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
@@ -14,7 +15,10 @@
#include <linux/msi.h>
#include <linux/export.h>
#include <linux/log2.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-map-ops.h>
+#include <asm/iommu-common.h>
#include <asm/iommu.h>
#include <asm/irq.h>
@@ -23,14 +27,29 @@
#include "pci_impl.h"
#include "iommu_common.h"
+#include "kernel.h"
#include "pci_sun4v.h"
#define DRIVER_NAME "pci_sun4v"
#define PFX DRIVER_NAME ": "
-static unsigned long vpci_major = 1;
-static unsigned long vpci_minor = 1;
+static unsigned long vpci_major;
+static unsigned long vpci_minor;
+
+struct vpci_version {
+ unsigned long major;
+ unsigned long minor;
+};
+
+/* Ordered from largest major to lowest */
+static struct vpci_version vpci_versions[] = {
+ { .major = 2, .minor = 0 },
+ { .major = 1, .minor = 1 },
+};
+
+static unsigned long vatu_major = 1;
+static unsigned long vatu_minor = 1;
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
@@ -48,7 +67,7 @@ static int iommu_batch_initialized;
/* Interrupts must be disabled. */
static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
{
- struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+ struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
p->dev = dev;
p->prot = prot;
@@ -56,31 +75,63 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
p->npages = 0;
}
+static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
+{
+ return iommu->atu && mask > DMA_BIT_MASK(32);
+}
+
/* Interrupts must be disabled. */
-static long iommu_batch_flush(struct iommu_batch *p)
+static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
{
struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
+ u64 *pglist = p->pglist;
+ u64 index_count;
unsigned long devhandle = pbm->devhandle;
unsigned long prot = p->prot;
unsigned long entry = p->entry;
- u64 *pglist = p->pglist;
unsigned long npages = p->npages;
+ unsigned long iotsb_num;
+ unsigned long ret;
+ long num;
+
+ /* VPCI maj=1, min=[0,1] only supports read and write */
+ if (vpci_major < 2)
+ prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
while (npages != 0) {
- long num;
-
- num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
- npages, prot, __pa(pglist));
- if (unlikely(num < 0)) {
- if (printk_ratelimit())
- printk("iommu_batch_flush: IOMMU map of "
- "[%08lx:%08llx:%lx:%lx:%lx] failed with "
- "status %ld\n",
- devhandle, HV_PCI_TSBID(0, entry),
- npages, prot, __pa(pglist), num);
- return -1;
+ if (!iommu_use_atu(pbm->iommu, mask)) {
+ num = pci_sun4v_iommu_map(devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages,
+ prot,
+ __pa(pglist));
+ if (unlikely(num < 0)) {
+ pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
+ __func__,
+ devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages, prot, __pa(pglist),
+ num);
+ return -1;
+ }
+ } else {
+ index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
+ iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
+ ret = pci_sun4v_iotsb_map(devhandle,
+ iotsb_num,
+ index_count,
+ prot,
+ __pa(pglist),
+ &num);
+ if (unlikely(ret != HV_EOK)) {
+ pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
+ __func__,
+ devhandle, iotsb_num,
+ index_count, prot,
+ __pa(pglist), ret);
+ return -1;
+ }
}
-
entry += num;
npages -= num;
pglist += num;
@@ -92,47 +143,50 @@ static long iommu_batch_flush(struct iommu_batch *p)
return 0;
}
-static inline void iommu_batch_new_entry(unsigned long entry)
+static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
{
- struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+ struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
if (p->entry + p->npages == entry)
return;
if (p->entry != ~0UL)
- iommu_batch_flush(p);
+ iommu_batch_flush(p, mask);
p->entry = entry;
}
/* Interrupts must be disabled. */
-static inline long iommu_batch_add(u64 phys_page)
+static inline long iommu_batch_add(u64 phys_page, u64 mask)
{
- struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+ struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
p->pglist[p->npages++] = phys_page;
if (p->npages == PGLIST_NENTS)
- return iommu_batch_flush(p);
+ return iommu_batch_flush(p, mask);
return 0;
}
/* Interrupts must be disabled. */
-static inline long iommu_batch_end(void)
+static inline long iommu_batch_end(u64 mask)
{
- struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+ struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
- return iommu_batch_flush(p);
+ return iommu_batch_flush(p, mask);
}
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
+ u64 mask;
unsigned long flags, order, first_page, npages, n;
+ unsigned long prot = 0;
struct iommu *iommu;
+ struct iommu_map_table *tbl;
struct page *page;
void *ret;
long entry;
@@ -140,11 +194,14 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
size = IO_PAGE_ALIGN(size);
order = get_order(size);
- if (unlikely(order >= MAX_ORDER))
+ if (unlikely(order > MAX_PAGE_ORDER))
return NULL;
npages = size >> IO_PAGE_SHIFT;
+ if (attrs & DMA_ATTR_WEAK_ORDERING)
+ prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
+
nid = dev->archdata.numa_node;
page = alloc_pages_node(nid, gfp, order);
if (unlikely(!page))
@@ -154,33 +211,36 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
+ mask = dev->coherent_dma_mask;
+ if (!iommu_use_atu(iommu, mask))
+ tbl = &iommu->tbl;
+ else
+ tbl = &iommu->atu->tbl;
- spin_lock_irqsave(&iommu->lock, flags);
- entry = iommu_range_alloc(dev, iommu, npages, NULL);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
+ (unsigned long)(-1), 0);
- if (unlikely(entry == DMA_ERROR_CODE))
+ if (unlikely(entry == IOMMU_ERROR_CODE))
goto range_alloc_fail;
- *dma_addrp = (iommu->page_table_map_base +
- (entry << IO_PAGE_SHIFT));
+ *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
ret = (void *) first_page;
first_page = __pa(first_page);
local_irq_save(flags);
iommu_batch_start(dev,
- (HV_PCI_MAP_ATTR_READ |
+ (HV_PCI_MAP_ATTR_READ | prot |
HV_PCI_MAP_ATTR_WRITE),
entry);
for (n = 0; n < npages; n++) {
- long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
+ long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
if (unlikely(err < 0L))
goto iommu_map_fail;
}
- if (unlikely(iommu_batch_end() < 0L))
+ if (unlikely(iommu_batch_end(mask) < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
@@ -188,96 +248,174 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
return ret;
iommu_map_fail:
- /* Interrupts are disabled. */
- spin_lock(&iommu->lock);
- iommu_range_free(iommu, *dma_addrp, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ local_irq_restore(flags);
+ iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
range_alloc_fail:
free_pages(first_page, order);
return NULL;
}
+static unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
+ unsigned long iotsb_num,
+ struct pci_bus *bus_dev)
+{
+ struct pci_dev *pdev;
+ unsigned long err;
+ unsigned int bus;
+ unsigned int device;
+ unsigned int fun;
+
+ list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
+ if (pdev->subordinate) {
+ /* No need to bind pci bridge */
+ dma_4v_iotsb_bind(devhandle, iotsb_num,
+ pdev->subordinate);
+ } else {
+ bus = bus_dev->number;
+ device = PCI_SLOT(pdev->devfn);
+ fun = PCI_FUNC(pdev->devfn);
+ err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
+ HV_PCI_DEVICE_BUILD(bus,
+ device,
+ fun));
+
+ /* If bind fails for one device it is going to fail
+ * for rest of the devices because we are sharing
+ * IOTSB. So in case of failure simply return with
+ * error.
+ */
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
+ dma_addr_t dvma, unsigned long iotsb_num,
+ unsigned long entry, unsigned long npages)
+{
+ unsigned long num, flags;
+ unsigned long ret;
+
+ local_irq_save(flags);
+ do {
+ if (dvma <= DMA_BIT_MASK(32)) {
+ num = pci_sun4v_iommu_demap(devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages);
+ } else {
+ ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
+ entry, npages, &num);
+ if (unlikely(ret != HV_EOK)) {
+ pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
+ ret);
+ }
+ }
+ entry += num;
+ npages -= num;
+ } while (npages != 0);
+ local_irq_restore(flags);
+}
+
static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
- dma_addr_t dvma, struct dma_attrs *attrs)
+ dma_addr_t dvma, unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
- unsigned long flags, order, npages, entry;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
+ unsigned long order, npages, entry;
+ unsigned long iotsb_num;
u32 devhandle;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
+ atu = iommu->atu;
devhandle = pbm->devhandle;
- entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- iommu_range_free(iommu, dvma, npages);
-
- do {
- unsigned long num;
-
- num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
- npages);
- entry += num;
- npages -= num;
- } while (npages != 0);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
+ if (!iommu_use_atu(iommu, dvma)) {
+ tbl = &iommu->tbl;
+ iotsb_num = 0; /* we don't care for legacy iommu */
+ } else {
+ tbl = &atu->tbl;
+ iotsb_num = atu->iotsb->iotsb_num;
+ }
+ entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
+ dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
+ iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
}
-static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t sz,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
+static dma_addr_t dma_4v_map_phys(struct device *dev, phys_addr_t phys,
+ size_t sz, enum dma_data_direction direction,
+ unsigned long attrs)
{
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
+ u64 mask;
unsigned long flags, npages, oaddr;
- unsigned long i, base_paddr;
- u32 bus_addr, ret;
- unsigned long prot;
+ unsigned long i, prot;
+ dma_addr_t bus_addr, ret;
long entry;
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ /*
+ * This check is included because older versions of the code
+ * lacked MMIO path support, and my ability to test this path
+ * is limited. However, from a software technical standpoint,
+ * there is no restriction, as the following code operates
+ * solely on physical addresses.
+ */
+ goto bad;
+
iommu = dev->archdata.iommu;
+ atu = iommu->atu;
if (unlikely(direction == DMA_NONE))
goto bad;
- oaddr = (unsigned long)(page_address(page) + offset);
+ oaddr = (unsigned long)(phys_to_virt(phys));
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
- spin_lock_irqsave(&iommu->lock, flags);
- entry = iommu_range_alloc(dev, iommu, npages, NULL);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ mask = *dev->dma_mask;
+ if (!iommu_use_atu(iommu, mask))
+ tbl = &iommu->tbl;
+ else
+ tbl = &atu->tbl;
+
+ entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
+ (unsigned long)(-1), 0);
- if (unlikely(entry == DMA_ERROR_CODE))
+ if (unlikely(entry == IOMMU_ERROR_CODE))
goto bad;
- bus_addr = (iommu->page_table_map_base +
- (entry << IO_PAGE_SHIFT));
+ bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
- base_paddr = __pa(oaddr & IO_PAGE_MASK);
prot = HV_PCI_MAP_ATTR_READ;
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
+ if (attrs & DMA_ATTR_WEAK_ORDERING)
+ prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
+
local_irq_save(flags);
iommu_batch_start(dev, prot, entry);
- for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
- long err = iommu_batch_add(base_paddr);
+ for (i = 0; i < npages; i++, phys += IO_PAGE_SIZE) {
+ long err = iommu_batch_add(phys, mask);
if (unlikely(err < 0L))
goto iommu_map_fail;
}
- if (unlikely(iommu_batch_end() < 0L))
+ if (unlikely(iommu_batch_end(mask) < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
@@ -287,24 +425,24 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
bad:
if (printk_ratelimit())
WARN_ON(1);
- return DMA_ERROR_CODE;
+ return DMA_MAPPING_ERROR;
iommu_map_fail:
- /* Interrupts are disabled. */
- spin_lock(&iommu->lock);
- iommu_range_free(iommu, bus_addr, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- return DMA_ERROR_CODE;
+ local_irq_restore(flags);
+ iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
+ return DMA_MAPPING_ERROR;
}
-static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
+static void dma_4v_unmap_phys(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
- unsigned long flags, npages;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
+ unsigned long npages;
+ unsigned long iotsb_num;
long entry;
u32 devhandle;
@@ -316,32 +454,28 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
+ atu = iommu->atu;
devhandle = pbm->devhandle;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
- spin_lock_irqsave(&iommu->lock, flags);
-
- iommu_range_free(iommu, bus_addr, npages);
-
- entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
- do {
- unsigned long num;
-
- num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
- npages);
- entry += num;
- npages -= num;
- } while (npages != 0);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
+ if (bus_addr <= DMA_BIT_MASK(32)) {
+ iotsb_num = 0; /* we don't care for legacy iommu */
+ tbl = &iommu->tbl;
+ } else {
+ iotsb_num = atu->iotsb->iotsb_num;
+ tbl = &atu->tbl;
+ }
+ entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
+ dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
+ iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot;
@@ -350,6 +484,9 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
unsigned long seg_boundary_size;
int outcount, incount, i;
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
+ u64 mask;
unsigned long base_shift;
long err;
@@ -357,12 +494,16 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
iommu = dev->archdata.iommu;
if (nelems == 0 || !iommu)
- return 0;
-
+ return -EINVAL;
+ atu = iommu->atu;
+
prot = HV_PCI_MAP_ATTR_READ;
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
+ if (attrs & DMA_ATTR_WEAK_ORDERING)
+ prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
+
outs = s = segstart = &sglist[0];
outcount = 1;
incount = nelems;
@@ -371,14 +512,21 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
/* Init first segment length for backout at failure */
outs->dma_length = 0;
- spin_lock_irqsave(&iommu->lock, flags);
+ local_irq_save(flags);
iommu_batch_start(dev, prot, ~0UL);
max_seg_size = dma_get_max_seg_size(dev);
- seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
- base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
+ seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
+
+ mask = *dev->dma_mask;
+ if (!iommu_use_atu(iommu, mask))
+ tbl = &iommu->tbl;
+ else
+ tbl = &atu->tbl;
+
+ base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
+
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
@@ -391,27 +539,26 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
- entry = iommu_range_alloc(dev, iommu, npages, &handle);
+ entry = iommu_tbl_range_alloc(dev, tbl, npages,
+ &handle, (unsigned long)(-1), 0);
/* Handle failure */
- if (unlikely(entry == DMA_ERROR_CODE)) {
- if (printk_ratelimit())
- printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
- " npages %lx\n", iommu, paddr, npages);
+ if (unlikely(entry == IOMMU_ERROR_CODE)) {
+ pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
+ tbl, paddr, npages);
goto iommu_map_failed;
}
- iommu_batch_new_entry(entry);
+ iommu_batch_new_entry(entry, mask);
/* Convert entry to a dma_addr_t */
- dma_addr = iommu->page_table_map_base +
- (entry << IO_PAGE_SHIFT);
+ dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK;
while (npages--) {
- err = iommu_batch_add(paddr);
+ err = iommu_batch_add(paddr, mask);
if (unlikely(err < 0L))
goto iommu_map_failed;
paddr += IO_PAGE_SIZE;
@@ -446,16 +593,15 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
dma_next = dma_addr + slen;
}
- err = iommu_batch_end();
+ err = iommu_batch_end(mask);
if (unlikely(err < 0L))
goto iommu_map_failed;
- spin_unlock_irqrestore(&iommu->lock, flags);
+ local_irq_restore(flags);
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0;
}
@@ -469,71 +615,89 @@ iommu_map_failed:
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
- iommu_range_free(iommu, vaddr, npages);
+ iommu_tbl_range_free(tbl, vaddr, npages,
+ IOMMU_ERROR_CODE);
/* XXX demap? XXX */
- s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
if (s == outs)
break;
}
- spin_unlock_irqrestore(&iommu->lock, flags);
+ local_irq_restore(flags);
- return 0;
+ return -EINVAL;
}
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct scatterlist *sg;
struct iommu *iommu;
- unsigned long flags;
+ struct atu *atu;
+ unsigned long flags, entry;
+ unsigned long iotsb_num;
u32 devhandle;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
+ atu = iommu->atu;
devhandle = pbm->devhandle;
- spin_lock_irqsave(&iommu->lock, flags);
+ local_irq_save(flags);
sg = sglist;
while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
- unsigned long npages, entry;
+ unsigned long npages;
+ struct iommu_map_table *tbl;
+ unsigned long shift = IO_PAGE_SHIFT;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
- iommu_range_free(iommu, dma_handle, npages);
- entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
- while (npages) {
- unsigned long num;
-
- num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
- npages);
- entry += num;
- npages -= num;
+ if (dma_handle <= DMA_BIT_MASK(32)) {
+ iotsb_num = 0; /* we don't care for legacy iommu */
+ tbl = &iommu->tbl;
+ } else {
+ iotsb_num = atu->iotsb->iotsb_num;
+ tbl = &atu->tbl;
}
-
+ entry = ((dma_handle - tbl->table_map_base) >> shift);
+ dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
+ entry, npages);
+ iommu_tbl_range_free(tbl, dma_handle, npages,
+ IOMMU_ERROR_CODE);
sg = sg_next(sg);
}
- spin_unlock_irqrestore(&iommu->lock, flags);
+ local_irq_restore(flags);
+}
+
+static int dma_4v_supported(struct device *dev, u64 device_mask)
+{
+ struct iommu *iommu = dev->archdata.iommu;
+
+ if (ali_sound_dma_hack(dev, device_mask))
+ return 1;
+ if (device_mask < iommu->dma_addr_mask)
+ return 0;
+ return 1;
}
-static struct dma_map_ops sun4v_dma_ops = {
+static const struct dma_map_ops sun4v_dma_ops = {
.alloc = dma_4v_alloc_coherent,
.free = dma_4v_free_coherent,
- .map_page = dma_4v_map_page,
- .unmap_page = dma_4v_unmap_page,
+ .map_phys = dma_4v_map_phys,
+ .unmap_phys = dma_4v_unmap_phys,
.map_sg = dma_4v_map_sg,
.unmap_sg = dma_4v_unmap_sg,
+ .dma_supported = dma_4v_supported,
};
static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
@@ -550,33 +714,162 @@ static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
}
static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
- struct iommu *iommu)
+ struct iommu_map_table *iommu)
{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long i, cnt = 0;
+ struct iommu_pool *pool;
+ unsigned long i, pool_nr, cnt = 0;
u32 devhandle;
devhandle = pbm->devhandle;
- for (i = 0; i < arena->limit; i++) {
- unsigned long ret, io_attrs, ra;
-
- ret = pci_sun4v_iommu_getmap(devhandle,
- HV_PCI_TSBID(0, i),
- &io_attrs, &ra);
- if (ret == HV_EOK) {
- if (page_in_phys_avail(ra)) {
- pci_sun4v_iommu_demap(devhandle,
- HV_PCI_TSBID(0, i), 1);
- } else {
- cnt++;
- __set_bit(i, arena->map);
+ for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
+ pool = &(iommu->pools[pool_nr]);
+ for (i = pool->start; i <= pool->end; i++) {
+ unsigned long ret, io_attrs, ra;
+
+ ret = pci_sun4v_iommu_getmap(devhandle,
+ HV_PCI_TSBID(0, i),
+ &io_attrs, &ra);
+ if (ret == HV_EOK) {
+ if (page_in_phys_avail(ra)) {
+ pci_sun4v_iommu_demap(devhandle,
+ HV_PCI_TSBID(0,
+ i), 1);
+ } else {
+ cnt++;
+ __set_bit(i, iommu->map);
+ }
}
}
}
-
return cnt;
}
+static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
+{
+ struct atu *atu = pbm->iommu->atu;
+ struct atu_iotsb *iotsb;
+ void *table;
+ u64 table_size;
+ u64 iotsb_num;
+ unsigned long order;
+ unsigned long err;
+
+ iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
+ if (!iotsb) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ atu->iotsb = iotsb;
+
+ /* calculate size of IOTSB */
+ table_size = (atu->size / IO_PAGE_SIZE) * 8;
+ order = get_order(table_size);
+ table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!table) {
+ err = -ENOMEM;
+ goto table_failed;
+ }
+ iotsb->table = table;
+ iotsb->ra = __pa(table);
+ iotsb->dvma_size = atu->size;
+ iotsb->dvma_base = atu->base;
+ iotsb->table_size = table_size;
+ iotsb->page_size = IO_PAGE_SIZE;
+
+ /* configure and register IOTSB with HV */
+ err = pci_sun4v_iotsb_conf(pbm->devhandle,
+ iotsb->ra,
+ iotsb->table_size,
+ iotsb->page_size,
+ iotsb->dvma_base,
+ &iotsb_num);
+ if (err) {
+ pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
+ goto iotsb_conf_failed;
+ }
+ iotsb->iotsb_num = iotsb_num;
+
+ err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
+ if (err) {
+ pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
+ goto iotsb_conf_failed;
+ }
+
+ return 0;
+
+iotsb_conf_failed:
+ free_pages((unsigned long)table, order);
+table_failed:
+ kfree(iotsb);
+out_err:
+ return err;
+}
+
+static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
+{
+ struct atu *atu = pbm->iommu->atu;
+ unsigned long err;
+ const u64 *ranges;
+ u64 map_size, num_iotte;
+ u64 dma_mask;
+ const u32 *page_size;
+ int len;
+
+ ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
+ &len);
+ if (!ranges) {
+ pr_err(PFX "No iommu-address-ranges\n");
+ return -EINVAL;
+ }
+
+ page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
+ NULL);
+ if (!page_size) {
+ pr_err(PFX "No iommu-pagesizes\n");
+ return -EINVAL;
+ }
+
+ /* There are 4 iommu-address-ranges supported. Each range is pair of
+ * {base, size}. The ranges[0] and ranges[1] are 32bit address space
+ * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
+ * address ranges to support 64bit addressing. Because 'size' for
+ * address ranges[2] and ranges[3] are same we can select either of
+ * ranges[2] or ranges[3] for mapping. However due to 'size' is too
+ * large for OS to allocate IOTSB we are using fix size 32G
+ * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
+ * to share.
+ */
+ atu->ranges = (struct atu_ranges *)ranges;
+ atu->base = atu->ranges[3].base;
+ atu->size = ATU_64_SPACE_SIZE;
+
+ /* Create IOTSB */
+ err = pci_sun4v_atu_alloc_iotsb(pbm);
+ if (err) {
+ pr_err(PFX "Error creating ATU IOTSB\n");
+ return err;
+ }
+
+ /* Create ATU iommu map.
+ * One bit represents one iotte in IOTSB table.
+ */
+ dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
+ num_iotte = atu->size / IO_PAGE_SIZE;
+ map_size = num_iotte / 8;
+ atu->tbl.table_map_base = atu->base;
+ atu->dma_addr_mask = dma_mask;
+ atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
+ if (!atu->tbl.map)
+ return -ENOMEM;
+
+ iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
+ NULL, false /* no large_pool */,
+ 0 /* default npools */,
+ false /* want span boundary checking */);
+
+ return 0;
+}
+
static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
@@ -603,20 +896,22 @@ static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
- iommu->page_table_map_base = dma_offset;
+ iommu->tbl.table_map_base = dma_offset;
iommu->dma_addr_mask = dma_mask;
/* Allocate and initialize the free area map. */
sz = (num_tsb_entries + 7) / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->arena.map = kzalloc(sz, GFP_KERNEL);
- if (!iommu->arena.map) {
+ iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
+ if (!iommu->tbl.map) {
printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
return -ENOMEM;
}
- iommu->arena.limit = num_tsb_entries;
-
- sz = probe_existing_entries(pbm, iommu);
+ iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
+ NULL, false /* no large_pool */,
+ 0 /* default npools */,
+ false /* want span boundary checking */);
+ sz = probe_existing_entries(pbm, &iommu->tbl);
if (sz)
printk("%s: Imported %lu TSB entries from OBP\n",
pbm->name, sz);
@@ -912,6 +1207,18 @@ static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
pci_sun4v_scan_bus(pbm, &op->dev);
+ /* if atu_init fails its not complete failure.
+ * we can still continue using legacy iommu.
+ */
+ if (pbm->iommu->atu) {
+ err = pci_sun4v_atu_init(pbm);
+ if (err) {
+ kfree(pbm->iommu->atu);
+ pbm->iommu->atu = NULL;
+ pr_err(PFX "ATU init failed, err=%d\n", err);
+ }
+ }
+
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
@@ -925,23 +1232,41 @@ static int pci_sun4v_probe(struct platform_device *op)
struct pci_pbm_info *pbm;
struct device_node *dp;
struct iommu *iommu;
+ struct atu *atu;
u32 devhandle;
- int i, err;
+ int i, err = -ENODEV;
+ static bool hv_atu = true;
dp = op->dev.of_node;
if (!hvapi_negotiated++) {
- err = sun4v_hvapi_register(HV_GRP_PCI,
- vpci_major,
- &vpci_minor);
+ for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
+ vpci_major = vpci_versions[i].major;
+ vpci_minor = vpci_versions[i].minor;
+
+ err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
+ &vpci_minor);
+ if (!err)
+ break;
+ }
if (err) {
- printk(KERN_ERR PFX "Could not register hvapi, "
- "err=%d\n", err);
+ pr_err(PFX "Could not register hvapi, err=%d\n", err);
return err;
}
- printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
- vpci_major, vpci_minor);
+ pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
+ vpci_major, vpci_minor);
+
+ err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
+ if (err) {
+ /* don't return an error if we fail to register the
+ * ATU group, but ATU hcalls won't be available.
+ */
+ hv_atu = false;
+ } else {
+ pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
+ vatu_major, vatu_minor);
+ }
dma_ops = &sun4v_dma_ops;
}
@@ -980,6 +1305,14 @@ static int pci_sun4v_probe(struct platform_device *op)
}
pbm->iommu = iommu;
+ iommu->atu = NULL;
+ if (hv_atu) {
+ atu = kzalloc(sizeof(*atu), GFP_KERNEL);
+ if (!atu)
+ pr_err(PFX "Could not allocate atu\n");
+ else
+ iommu->atu = atu;
+ }
err = pci_sun4v_pbm_init(pbm, op, devhandle);
if (err)
@@ -990,6 +1323,7 @@ static int pci_sun4v_probe(struct platform_device *op)
return 0;
out_free_iommu:
+ kfree(iommu->atu);
kfree(pbm->iommu);
out_free_controller:
@@ -1010,7 +1344,6 @@ static const struct of_device_id pci_sun4v_match[] = {
static struct platform_driver pci_sun4v_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = pci_sun4v_match,
},
.probe = pci_sun4v_probe,
diff --git a/arch/sparc/kernel/pci_sun4v.h b/arch/sparc/kernel/pci_sun4v.h
index 8e9fc3a5b4f5..d47263a9901a 100644
--- a/arch/sparc/kernel/pci_sun4v.h
+++ b/arch/sparc/kernel/pci_sun4v.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* pci_sun4v.h: SUN4V specific PCI controller support.
*
* Copyright (C) 2006 David S. Miller (davem@davemloft.net)
@@ -6,87 +7,108 @@
#ifndef _PCI_SUN4V_H
#define _PCI_SUN4V_H
-extern long pci_sun4v_iommu_map(unsigned long devhandle,
- unsigned long tsbid,
- unsigned long num_ttes,
- unsigned long io_attributes,
- unsigned long io_page_list_pa);
-extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle,
- unsigned long tsbid,
- unsigned long num_ttes);
-extern unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle,
- unsigned long tsbid,
- unsigned long *io_attributes,
- unsigned long *real_address);
-extern unsigned long pci_sun4v_config_get(unsigned long devhandle,
- unsigned long pci_device,
- unsigned long config_offset,
- unsigned long size);
-extern int pci_sun4v_config_put(unsigned long devhandle,
- unsigned long pci_device,
- unsigned long config_offset,
- unsigned long size,
- unsigned long data);
+long pci_sun4v_iommu_map(unsigned long devhandle,
+ unsigned long tsbid,
+ unsigned long num_ttes,
+ unsigned long io_attributes,
+ unsigned long io_page_list_pa);
+unsigned long pci_sun4v_iommu_demap(unsigned long devhandle,
+ unsigned long tsbid,
+ unsigned long num_ttes);
+unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle,
+ unsigned long tsbid,
+ unsigned long *io_attributes,
+ unsigned long *real_address);
+unsigned long pci_sun4v_config_get(unsigned long devhandle,
+ unsigned long pci_device,
+ unsigned long config_offset,
+ unsigned long size);
+int pci_sun4v_config_put(unsigned long devhandle,
+ unsigned long pci_device,
+ unsigned long config_offset,
+ unsigned long size,
+ unsigned long data);
-extern unsigned long pci_sun4v_msiq_conf(unsigned long devhandle,
+unsigned long pci_sun4v_msiq_conf(unsigned long devhandle,
unsigned long msiqid,
unsigned long msiq_paddr,
unsigned long num_entries);
-extern unsigned long pci_sun4v_msiq_info(unsigned long devhandle,
- unsigned long msiqid,
- unsigned long *msiq_paddr,
- unsigned long *num_entries);
-extern unsigned long pci_sun4v_msiq_getvalid(unsigned long devhandle,
- unsigned long msiqid,
- unsigned long *valid);
-extern unsigned long pci_sun4v_msiq_setvalid(unsigned long devhandle,
- unsigned long msiqid,
- unsigned long valid);
-extern unsigned long pci_sun4v_msiq_getstate(unsigned long devhandle,
- unsigned long msiqid,
- unsigned long *state);
-extern unsigned long pci_sun4v_msiq_setstate(unsigned long devhandle,
- unsigned long msiqid,
- unsigned long state);
-extern unsigned long pci_sun4v_msiq_gethead(unsigned long devhandle,
- unsigned long msiqid,
- unsigned long *head);
-extern unsigned long pci_sun4v_msiq_sethead(unsigned long devhandle,
- unsigned long msiqid,
- unsigned long head);
-extern unsigned long pci_sun4v_msiq_gettail(unsigned long devhandle,
- unsigned long msiqid,
- unsigned long *head);
-extern unsigned long pci_sun4v_msi_getvalid(unsigned long devhandle,
- unsigned long msinum,
- unsigned long *valid);
-extern unsigned long pci_sun4v_msi_setvalid(unsigned long devhandle,
- unsigned long msinum,
- unsigned long valid);
-extern unsigned long pci_sun4v_msi_getmsiq(unsigned long devhandle,
- unsigned long msinum,
- unsigned long *msiq);
-extern unsigned long pci_sun4v_msi_setmsiq(unsigned long devhandle,
- unsigned long msinum,
- unsigned long msiq,
- unsigned long msitype);
-extern unsigned long pci_sun4v_msi_getstate(unsigned long devhandle,
- unsigned long msinum,
- unsigned long *state);
-extern unsigned long pci_sun4v_msi_setstate(unsigned long devhandle,
- unsigned long msinum,
- unsigned long state);
-extern unsigned long pci_sun4v_msg_getmsiq(unsigned long devhandle,
- unsigned long msinum,
- unsigned long *msiq);
-extern unsigned long pci_sun4v_msg_setmsiq(unsigned long devhandle,
- unsigned long msinum,
- unsigned long msiq);
-extern unsigned long pci_sun4v_msg_getvalid(unsigned long devhandle,
- unsigned long msinum,
- unsigned long *valid);
-extern unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
- unsigned long msinum,
- unsigned long valid);
+unsigned long pci_sun4v_msiq_info(unsigned long devhandle,
+ unsigned long msiqid,
+ unsigned long *msiq_paddr,
+ unsigned long *num_entries);
+unsigned long pci_sun4v_msiq_getvalid(unsigned long devhandle,
+ unsigned long msiqid,
+ unsigned long *valid);
+unsigned long pci_sun4v_msiq_setvalid(unsigned long devhandle,
+ unsigned long msiqid,
+ unsigned long valid);
+unsigned long pci_sun4v_msiq_getstate(unsigned long devhandle,
+ unsigned long msiqid,
+ unsigned long *state);
+unsigned long pci_sun4v_msiq_setstate(unsigned long devhandle,
+ unsigned long msiqid,
+ unsigned long state);
+unsigned long pci_sun4v_msiq_gethead(unsigned long devhandle,
+ unsigned long msiqid,
+ unsigned long *head);
+unsigned long pci_sun4v_msiq_sethead(unsigned long devhandle,
+ unsigned long msiqid,
+ unsigned long head);
+unsigned long pci_sun4v_msiq_gettail(unsigned long devhandle,
+ unsigned long msiqid,
+ unsigned long *head);
+unsigned long pci_sun4v_msi_getvalid(unsigned long devhandle,
+ unsigned long msinum,
+ unsigned long *valid);
+unsigned long pci_sun4v_msi_setvalid(unsigned long devhandle,
+ unsigned long msinum,
+ unsigned long valid);
+unsigned long pci_sun4v_msi_getmsiq(unsigned long devhandle,
+ unsigned long msinum,
+ unsigned long *msiq);
+unsigned long pci_sun4v_msi_setmsiq(unsigned long devhandle,
+ unsigned long msinum,
+ unsigned long msiq,
+ unsigned long msitype);
+unsigned long pci_sun4v_msi_getstate(unsigned long devhandle,
+ unsigned long msinum,
+ unsigned long *state);
+unsigned long pci_sun4v_msi_setstate(unsigned long devhandle,
+ unsigned long msinum,
+ unsigned long state);
+unsigned long pci_sun4v_msg_getmsiq(unsigned long devhandle,
+ unsigned long msinum,
+ unsigned long *msiq);
+unsigned long pci_sun4v_msg_setmsiq(unsigned long devhandle,
+ unsigned long msinum,
+ unsigned long msiq);
+unsigned long pci_sun4v_msg_getvalid(unsigned long devhandle,
+ unsigned long msinum,
+ unsigned long *valid);
+unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
+ unsigned long msinum,
+ unsigned long valid);
+/* Sun4v HV IOMMU v2 APIs */
+unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle,
+ unsigned long ra,
+ unsigned long table_size,
+ unsigned long page_size,
+ unsigned long dvma_base,
+ u64 *iotsb_num);
+unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle,
+ unsigned long iotsb_num,
+ unsigned int pci_device);
+unsigned long pci_sun4v_iotsb_map(unsigned long devhandle,
+ unsigned long iotsb_num,
+ unsigned long iotsb_index_iottes,
+ unsigned long io_attributes,
+ unsigned long io_page_list_pa,
+ long *mapped);
+unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle,
+ unsigned long iotsb_num,
+ unsigned long iotsb_index,
+ unsigned long iottes,
+ unsigned long *demapped);
#endif /* !(_PCI_SUN4V_H) */
diff --git a/arch/sparc/kernel/pci_sun4v_asm.S b/arch/sparc/kernel/pci_sun4v_asm.S
index e606d46c6815..2b8051871a15 100644
--- a/arch/sparc/kernel/pci_sun4v_asm.S
+++ b/arch/sparc/kernel/pci_sun4v_asm.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* pci_sun4v_asm: Hypervisor calls for PCI support.
*
* Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
@@ -360,3 +361,71 @@ ENTRY(pci_sun4v_msg_setvalid)
mov %o0, %o0
ENDPROC(pci_sun4v_msg_setvalid)
+ /*
+ * %o0: devhandle
+ * %o1: r_addr
+ * %o2: size
+ * %o3: pagesize
+ * %o4: virt
+ * %o5: &iotsb_num/&iotsb_handle
+ *
+ * returns %o0: status
+ * %o1: iotsb_num/iotsb_handle
+ */
+ENTRY(pci_sun4v_iotsb_conf)
+ mov %o5, %g1
+ mov HV_FAST_PCI_IOTSB_CONF, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%g1]
+ENDPROC(pci_sun4v_iotsb_conf)
+
+ /*
+ * %o0: devhandle
+ * %o1: iotsb_num/iotsb_handle
+ * %o2: pci_device
+ *
+ * returns %o0: status
+ */
+ENTRY(pci_sun4v_iotsb_bind)
+ mov HV_FAST_PCI_IOTSB_BIND, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(pci_sun4v_iotsb_bind)
+
+ /*
+ * %o0: devhandle
+ * %o1: iotsb_num/iotsb_handle
+ * %o2: index_count
+ * %o3: iotte_attributes
+ * %o4: io_page_list_p
+ * %o5: &mapped
+ *
+ * returns %o0: status
+ * %o1: #mapped
+ */
+ENTRY(pci_sun4v_iotsb_map)
+ mov %o5, %g1
+ mov HV_FAST_PCI_IOTSB_MAP, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%g1]
+ENDPROC(pci_sun4v_iotsb_map)
+
+ /*
+ * %o0: devhandle
+ * %o1: iotsb_num/iotsb_handle
+ * %o2: iotsb_index
+ * %o3: #iottes
+ * %o4: &demapped
+ *
+ * returns %o0: status
+ * %o1: #demapped
+ */
+ENTRY(pci_sun4v_iotsb_demap)
+ mov HV_FAST_PCI_IOTSB_DEMAP, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%o4]
+ENDPROC(pci_sun4v_iotsb_demap)
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 09f4fdd8d808..d7c911724435 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* pcic.c: MicroSPARC-IIep PCI controller support
*
@@ -15,6 +16,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/jiffies.h>
#include <asm/swift.h> /* for cache flushing. */
@@ -33,9 +35,10 @@
#include <asm/pcic.h>
#include <asm/timex.h>
#include <asm/timer.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/irq_regs.h>
+#include "kernel.h"
#include "irq.h"
/*
@@ -162,8 +165,8 @@ static int pcic0_up;
static struct linux_pcic pcic0;
void __iomem *pcic_regs;
-volatile int pcic_speculative;
-volatile int pcic_trapped;
+static volatile int pcic_speculative;
+static volatile int pcic_trapped;
/* forward */
unsigned int pcic_build_device_irq(struct platform_device *op,
@@ -329,7 +332,7 @@ int __init pcic_probe(void)
pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr";
if ((pcic->pcic_config_space_addr =
- ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == 0) {
+ ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == NULL) {
prom_printf("PCIC: Error, cannot map "
"PCI Configuration Space Address.\n");
prom_halt();
@@ -341,7 +344,7 @@ int __init pcic_probe(void)
*/
pcic->pcic_res_cfg_data.name = "pcic_cfg_data";
if ((pcic->pcic_config_space_data =
- ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == 0) {
+ ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == NULL) {
prom_printf("PCIC: Error, cannot map "
"PCI Configuration Space Data.\n");
prom_halt();
@@ -350,10 +353,9 @@ int __init pcic_probe(void)
pbm = &pcic->pbm;
pbm->prom_node = node;
prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0;
- strcpy(pbm->prom_name, namebuf);
+ strscpy(pbm->prom_name, namebuf);
{
- extern volatile int t_nmi[4];
extern int pcic_nmi_trap_patch[4];
t_nmi[0] = pcic_nmi_trap_patch[0];
@@ -391,12 +393,16 @@ static void __init pcic_pbm_scan_bus(struct linux_pcic *pcic)
struct linux_pbm_info *pbm = &pcic->pbm;
pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, &pcic_ops, pbm);
+ if (!pbm->pci_bus)
+ return;
+
#if 0 /* deadwood transplanted from sparc64 */
pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
pci_record_assignments(pbm, pbm->pci_bus);
pci_assign_unassigned(pbm, pbm->pci_bus);
pci_fixup_irq(pbm, pbm->pci_bus);
#endif
+ pci_bus_add_devices(pbm->pci_bus);
}
/*
@@ -472,7 +478,7 @@ static void pcic_map_pci_device(struct linux_pcic *pcic,
int j;
if (node == 0 || node == -1) {
- strcpy(namebuf, "???");
+ strscpy(namebuf, "???");
} else {
prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0;
}
@@ -513,10 +519,10 @@ static void pcic_map_pci_device(struct linux_pcic *pcic,
* board in a PCI slot. We must remap it
* under 64K but it is not done yet. XXX
*/
- printk("PCIC: Skipping I/O space at 0x%lx, "
- "this will Oops if a driver attaches "
- "device '%s' at %02x:%02x)\n", address,
- namebuf, dev->bus->number, dev->devfn);
+ pci_info(dev, "PCIC: Skipping I/O space at "
+ "0x%lx, this will Oops if a driver "
+ "attaches device '%s'\n", address,
+ namebuf);
}
}
}
@@ -531,12 +537,12 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
char namebuf[64];
if (node == 0 || node == -1) {
- strcpy(namebuf, "???");
+ strscpy(namebuf, "???");
} else {
prom_getstring(node, "name", namebuf, sizeof(namebuf));
}
- if ((p = pcic->pcic_imap) == 0) {
+ if ((p = pcic->pcic_imap) == NULL) {
dev->irq = 0;
return;
}
@@ -546,8 +552,8 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
p++;
}
if (i >= pcic->pcic_imdim) {
- printk("PCIC: device %s devfn %02x:%02x not found in %d\n",
- namebuf, dev->bus->number, dev->devfn, pcic->pcic_imdim);
+ pci_info(dev, "PCIC: device %s not found in %d\n", namebuf,
+ pcic->pcic_imdim);
dev->irq = 0;
return;
}
@@ -560,7 +566,7 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
real_irq = ivec >> ((i-4) << 2) & 0xF;
} else { /* Corrupted map */
- printk("PCIC: BAD PIN %d\n", i); for (;;) {}
+ pci_info(dev, "PCIC: BAD PIN %d\n", i); for (;;) {}
}
/* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */
@@ -569,10 +575,10 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
*/
if (real_irq == 0 || p->force) {
if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */
- printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
+ pci_info(dev, "PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
}
- printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n",
- p->irq, p->pin, dev->bus->number, dev->devfn);
+ pci_info(dev, "PCIC: setting irq %d at pin %d\n", p->irq,
+ p->pin);
real_irq = p->irq;
i = p->pin;
@@ -597,15 +603,13 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
void pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_dev *dev;
- int i, has_io, has_mem;
- unsigned int cmd;
struct linux_pcic *pcic;
/* struct linux_pbm_info* pbm = &pcic->pbm; */
int node;
struct pcidev_cookie *pcp;
if (!pcic0_up) {
- printk("pcibios_fixup_bus: no PCIC\n");
+ pci_info(bus, "pcibios_fixup_bus: no PCIC\n");
return;
}
pcic = &pcic0;
@@ -614,44 +618,12 @@ void pcibios_fixup_bus(struct pci_bus *bus)
* Next crud is an equivalent of pbm = pcic_bus_to_pbm(bus);
*/
if (bus->number != 0) {
- printk("pcibios_fixup_bus: nonzero bus 0x%x\n", bus->number);
+ pci_info(bus, "pcibios_fixup_bus: nonzero bus 0x%x\n",
+ bus->number);
return;
}
list_for_each_entry(dev, &bus->devices, bus_list) {
-
- /*
- * Comment from i386 branch:
- * There are buggy BIOSes that forget to enable I/O and memory
- * access to PCI devices. We try to fix this, but we need to
- * be sure that the BIOS didn't forget to assign an address
- * to the device. [mj]
- * OBP is a case of such BIOS :-)
- */
- has_io = has_mem = 0;
- for(i=0; i<6; i++) {
- unsigned long f = dev->resource[i].flags;
- if (f & IORESOURCE_IO) {
- has_io = 1;
- } else if (f & IORESOURCE_MEM)
- has_mem = 1;
- }
- pcic_read_config(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd);
- if (has_io && !(cmd & PCI_COMMAND_IO)) {
- printk("PCIC: Enabling I/O for device %02x:%02x\n",
- dev->bus->number, dev->devfn);
- cmd |= PCI_COMMAND_IO;
- pcic_write_config(dev->bus, dev->devfn,
- PCI_COMMAND, 2, cmd);
- }
- if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
- printk("PCIC: Enabling memory for device %02x:%02x\n",
- dev->bus->number, dev->devfn);
- cmd |= PCI_COMMAND_MEMORY;
- pcic_write_config(dev->bus, dev->devfn,
- PCI_COMMAND, 2, cmd);
- }
-
node = pdev_to_pnode(&pcic->pbm, dev);
if(node == 0)
node = -1;
@@ -670,30 +642,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
}
}
-/*
- * pcic_pin_to_irq() is exported to bus probing code
- */
-unsigned int
-pcic_pin_to_irq(unsigned int pin, const char *name)
-{
- struct linux_pcic *pcic = &pcic0;
- unsigned int irq;
- unsigned int ivec;
-
- if (pin < 4) {
- ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
- irq = ivec >> (pin << 2) & 0xF;
- } else if (pin < 8) {
- ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
- irq = ivec >> ((pin-4) << 2) & 0xF;
- } else { /* Corrupted map */
- printk("PCIC: BAD PIN %d FOR %s\n", pin, name);
- for (;;) {} /* XXX Cannot panic properly in case of PROLL */
- }
-/* P3 */ /* printk("PCIC: dev %s pin %d ivec 0x%x irq %x\n", name, pin, ivec, irq); */
- return irq;
-}
-
/* Makes compiler happy */
static volatile int pcic_timer_dummy;
@@ -766,24 +714,12 @@ static void watchdog_reset() {
}
#endif
-resource_size_t pcibios_align_resource(void *data, const struct resource *res,
- resource_size_t size, resource_size_t align)
-{
- return res->start;
-}
-
-int pcibios_enable_device(struct pci_dev *pdev, int mask)
-{
- return 0;
-}
-
/*
* NMI
*/
void pcic_nmi(unsigned int pend, struct pt_regs *regs)
{
-
- pend = flip_dword(pend);
+ pend = swab32(pend);
if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) {
/*
@@ -875,82 +811,4 @@ void __init sun4m_pci_init_IRQ(void)
sparc_config.load_profile_irq = pcic_load_profile_irq;
}
-/*
- * This probably belongs here rather than ioport.c because
- * we do not want this crud linked into SBus kernels.
- * Also, think for a moment about likes of floppy.c that
- * include architecture specific parts. They may want to redefine ins/outs.
- *
- * We do not use horrible macros here because we want to
- * advance pointer by sizeof(size).
- */
-void outsb(unsigned long addr, const void *src, unsigned long count)
-{
- while (count) {
- count -= 1;
- outb(*(const char *)src, addr);
- src += 1;
- /* addr += 1; */
- }
-}
-EXPORT_SYMBOL(outsb);
-
-void outsw(unsigned long addr, const void *src, unsigned long count)
-{
- while (count) {
- count -= 2;
- outw(*(const short *)src, addr);
- src += 2;
- /* addr += 2; */
- }
-}
-EXPORT_SYMBOL(outsw);
-
-void outsl(unsigned long addr, const void *src, unsigned long count)
-{
- while (count) {
- count -= 4;
- outl(*(const long *)src, addr);
- src += 4;
- /* addr += 4; */
- }
-}
-EXPORT_SYMBOL(outsl);
-
-void insb(unsigned long addr, void *dst, unsigned long count)
-{
- while (count) {
- count -= 1;
- *(unsigned char *)dst = inb(addr);
- dst += 1;
- /* addr += 1; */
- }
-}
-EXPORT_SYMBOL(insb);
-
-void insw(unsigned long addr, void *dst, unsigned long count)
-{
- while (count) {
- count -= 2;
- *(unsigned short *)dst = inw(addr);
- dst += 2;
- /* addr += 2; */
- }
-}
-EXPORT_SYMBOL(insw);
-
-void insl(unsigned long addr, void *dst, unsigned long count)
-{
- while (count) {
- count -= 4;
- /*
- * XXX I am sure we are in for an unaligned trap here.
- */
- *(unsigned long *)dst = inl(addr);
- dst += 4;
- /* addr += 4; */
- }
-}
-EXPORT_SYMBOL(insl);
-
subsys_initcall(pcic_init);
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 269af58497aa..2a12c86af956 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* pcr.c: Generic sparc64 performance counter infrastructure.
*
* Copyright (C) 2009 David S. Miller (davem@davemloft.net)
@@ -191,12 +192,66 @@ static const struct pcr_ops n4_pcr_ops = {
.pcr_nmi_disable = PCR_N4_PICNPT,
};
+static u64 n5_pcr_read(unsigned long reg_num)
+{
+ unsigned long val;
+
+ (void) sun4v_t5_get_perfreg(reg_num, &val);
+
+ return val;
+}
+
+static void n5_pcr_write(unsigned long reg_num, u64 val)
+{
+ (void) sun4v_t5_set_perfreg(reg_num, val);
+}
+
+static const struct pcr_ops n5_pcr_ops = {
+ .read_pcr = n5_pcr_read,
+ .write_pcr = n5_pcr_write,
+ .read_pic = n4_pic_read,
+ .write_pic = n4_pic_write,
+ .nmi_picl_value = n4_picl_value,
+ .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
+ PCR_N4_UTRACE | PCR_N4_TOE |
+ (26 << PCR_N4_SL_SHIFT)),
+ .pcr_nmi_disable = PCR_N4_PICNPT,
+};
+
+static u64 m7_pcr_read(unsigned long reg_num)
+{
+ unsigned long val;
+
+ (void) sun4v_m7_get_perfreg(reg_num, &val);
+
+ return val;
+}
+
+static void m7_pcr_write(unsigned long reg_num, u64 val)
+{
+ (void) sun4v_m7_set_perfreg(reg_num, val);
+}
+
+static const struct pcr_ops m7_pcr_ops = {
+ .read_pcr = m7_pcr_read,
+ .write_pcr = m7_pcr_write,
+ .read_pic = n4_pic_read,
+ .write_pic = n4_pic_write,
+ .nmi_picl_value = n4_picl_value,
+ .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
+ PCR_N4_UTRACE | PCR_N4_TOE |
+ (26 << PCR_N4_SL_SHIFT)),
+ .pcr_nmi_disable = PCR_N4_PICNPT,
+};
+
static unsigned long perf_hsvc_group;
static unsigned long perf_hsvc_major;
static unsigned long perf_hsvc_minor;
static int __init register_perf_hsvc(void)
{
+ unsigned long hverror;
+
if (tlb_type == hypervisor) {
switch (sun4v_chip_type) {
case SUN4V_CHIP_NIAGARA1:
@@ -215,6 +270,14 @@ static int __init register_perf_hsvc(void)
perf_hsvc_group = HV_GRP_VT_CPU;
break;
+ case SUN4V_CHIP_NIAGARA5:
+ perf_hsvc_group = HV_GRP_T5_CPU;
+ break;
+
+ case SUN4V_CHIP_SPARC_M7:
+ perf_hsvc_group = HV_GRP_M7_PERF;
+ break;
+
default:
return -ENODEV;
}
@@ -222,10 +285,12 @@ static int __init register_perf_hsvc(void)
perf_hsvc_major = 1;
perf_hsvc_minor = 0;
- if (sun4v_hvapi_register(perf_hsvc_group,
- perf_hsvc_major,
- &perf_hsvc_minor)) {
- printk("perfmon: Could not register hvapi.\n");
+ hverror = sun4v_hvapi_register(perf_hsvc_group,
+ perf_hsvc_major,
+ &perf_hsvc_minor);
+ if (hverror) {
+ pr_err("perfmon: Could not register hvapi(0x%lx).\n",
+ hverror);
return -ENODEV;
}
}
@@ -254,6 +319,14 @@ static int __init setup_sun4v_pcr_ops(void)
pcr_ops = &n4_pcr_ops;
break;
+ case SUN4V_CHIP_NIAGARA5:
+ pcr_ops = &n5_pcr_ops;
+ break;
+
+ case SUN4V_CHIP_SPARC_M7:
+ pcr_ops = &m7_pcr_ops;
+ break;
+
default:
ret = -ENODEV;
break;
@@ -286,7 +359,7 @@ int __init pcr_arch_init(void)
* counter overflow interrupt so we can't make use of
* their hardware currently.
*/
- /* fallthrough */
+ fallthrough;
default:
err = -ENODEV;
goto out_unregister;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index b5c38faa4ead..cae4d33002a5 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Performance event support for sparc64.
*
* Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
@@ -9,7 +10,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/perf_event.h>
@@ -21,8 +22,9 @@
#include <asm/stacktrace.h>
#include <asm/cpudata.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/atomic.h>
+#include <linux/sched/clock.h>
#include <asm/nmi.h>
#include <asm/pcr.h>
#include <asm/cacheflush.h>
@@ -108,9 +110,9 @@ struct cpu_hw_events {
/* Enabled/disable state. */
int enabled;
- unsigned int group_flag;
+ unsigned int txn_flags;
};
-DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
/* An event map describes the characteristics of a performance
* counter event. In particular it gives the encoding as well as
@@ -737,25 +739,9 @@ static void sparc_vt_write_pmc(int idx, u64 val)
{
u64 pcr;
- /* There seems to be an internal latch on the overflow event
- * on SPARC-T4 that prevents it from triggering unless you
- * update the PIC exactly as we do here. The requirement
- * seems to be that you have to turn off event counting in the
- * PCR around the PIC update.
- *
- * For example, after the following sequence:
- *
- * 1) set PIC to -1
- * 2) enable event counting and overflow reporting in PCR
- * 3) overflow triggers, softint 15 handler invoked
- * 4) clear OV bit in PCR
- * 5) write PIC to -1
- *
- * a subsequent overflow event will not trigger. This
- * sequence works on SPARC-T3 and previous chips.
- */
pcr = pcr_ops->read_pcr(idx);
- pcr_ops->write_pcr(idx, PCR_N4_PICNPT);
+ /* ensure ov and ntc are reset */
+ pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
pcr_ops->write_pic(idx, val & 0xffffffff);
@@ -792,6 +778,29 @@ static const struct sparc_pmu niagara4_pmu = {
.num_pic_regs = 4,
};
+static const struct sparc_pmu sparc_m7_pmu = {
+ .event_map = niagara4_event_map,
+ .cache_map = &niagara4_cache_map,
+ .max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
+ .read_pmc = sparc_vt_read_pmc,
+ .write_pmc = sparc_vt_write_pmc,
+ .upper_shift = 5,
+ .lower_shift = 5,
+ .event_mask = 0x7ff,
+ .user_bit = PCR_N4_UTRACE,
+ .priv_bit = PCR_N4_STRACE,
+
+ /* We explicitly don't support hypervisor tracing. */
+ .hv_bit = 0,
+
+ .irq_bit = PCR_N4_TOE,
+ .upper_nop = 0,
+ .lower_nop = 0,
+ .flags = 0,
+ .max_hw_events = 4,
+ .num_pcrs = 4,
+ .num_pic_regs = 4,
+};
static const struct sparc_pmu *sparc_pmu __read_mostly;
static u64 event_encoding(u64 event_id, int idx)
@@ -882,6 +891,10 @@ static int sparc_perf_event_set_period(struct perf_event *event,
s64 period = hwc->sample_period;
int ret = 0;
+ /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
+ if (unlikely(period != hwc->last_period))
+ left = period - (hwc->last_period - left);
+
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
@@ -919,6 +932,8 @@ static void read_in_all_counters(struct cpu_hw_events *cpuc)
sparc_perf_event_update(cp, &cp->hw,
cpuc->current_idx[i]);
cpuc->current_idx[i] = PIC_NO_INDEX;
+ if (cp->hw.state & PERF_HES_STOPPED)
+ cp->hw.state |= PERF_HES_ARCH;
}
}
}
@@ -951,16 +966,20 @@ static void calculate_single_pcr(struct cpu_hw_events *cpuc)
enc = perf_event_get_enc(cpuc->events[i]);
cpuc->pcr[0] &= ~mask_for_index(idx);
- if (hwc->state & PERF_HES_STOPPED)
+ if (hwc->state & PERF_HES_ARCH) {
cpuc->pcr[0] |= nop_for_index(idx);
- else
+ } else {
cpuc->pcr[0] |= event_encoding(enc, idx);
+ hwc->state = 0;
+ }
}
out:
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
}
-/* On this PMU each PIC has it's own PCR control register. */
+static void sparc_pmu_start(struct perf_event *event, int flags);
+
+/* On this PMU each PIC has its own PCR control register. */
static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
{
int i;
@@ -972,20 +991,16 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
struct perf_event *cp = cpuc->event[i];
struct hw_perf_event *hwc = &cp->hw;
int idx = hwc->idx;
- u64 enc;
if (cpuc->current_idx[i] != PIC_NO_INDEX)
continue;
- sparc_perf_event_set_period(cp, hwc, idx);
cpuc->current_idx[i] = idx;
- enc = perf_event_get_enc(cpuc->events[i]);
- cpuc->pcr[idx] &= ~mask_for_index(idx);
- if (hwc->state & PERF_HES_STOPPED)
- cpuc->pcr[idx] |= nop_for_index(idx);
- else
- cpuc->pcr[idx] |= event_encoding(enc, idx);
+ if (cp->hw.state & PERF_HES_ARCH)
+ continue;
+
+ sparc_pmu_start(cp, PERF_EF_RELOAD);
}
out:
for (i = 0; i < cpuc->n_events; i++) {
@@ -1013,7 +1028,7 @@ static void update_pcrs_for_enable(struct cpu_hw_events *cpuc)
static void sparc_pmu_enable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
if (cpuc->enabled)
@@ -1031,7 +1046,7 @@ static void sparc_pmu_enable(struct pmu *pmu)
static void sparc_pmu_disable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
if (!cpuc->enabled)
@@ -1065,7 +1080,7 @@ static int active_event_index(struct cpu_hw_events *cpuc,
static void sparc_pmu_start(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = active_event_index(cpuc, event);
if (flags & PERF_EF_RELOAD) {
@@ -1076,11 +1091,13 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
event->hw.state = 0;
sparc_pmu_enable_event(cpuc, &event->hw, idx);
+
+ perf_event_update_userpage(event);
}
static void sparc_pmu_stop(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = active_event_index(cpuc, event);
if (!(event->hw.state & PERF_HES_STOPPED)) {
@@ -1096,12 +1113,11 @@ static void sparc_pmu_stop(struct perf_event *event, int flags)
static void sparc_pmu_del(struct perf_event *event, int _flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned long flags;
int i;
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event[i]) {
@@ -1127,13 +1143,12 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
}
}
- perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
static void sparc_pmu_read(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = active_event_index(cpuc, event);
struct hw_perf_event *hwc = &event->hw;
@@ -1145,7 +1160,7 @@ static DEFINE_MUTEX(pmc_grab_mutex);
static void perf_stop_nmi_watchdog(void *unused)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
stop_nmi_watchdog(NULL);
@@ -1153,7 +1168,7 @@ static void perf_stop_nmi_watchdog(void *unused)
cpuc->pcr[i] = pcr_ops->read_pcr(i);
}
-void perf_event_grab_pmc(void)
+static void perf_event_grab_pmc(void)
{
if (atomic_inc_not_zero(&active_events))
return;
@@ -1169,7 +1184,7 @@ void perf_event_grab_pmc(void)
mutex_unlock(&pmc_grab_mutex);
}
-void perf_event_release_pmc(void)
+static void perf_event_release_pmc(void)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
if (atomic_read(&nmi_active) == 0)
@@ -1341,7 +1356,7 @@ static int collect_events(struct perf_event *group, int max_count,
events[n] = group->hw.event_base;
current_idx[n++] = PIC_NO_INDEX;
}
- list_for_each_entry(event, &group->sibling_list, group_entry) {
+ for_each_sibling_event(event, group) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
@@ -1356,12 +1371,11 @@ static int collect_events(struct perf_event *group, int max_count,
static int sparc_pmu_add(struct perf_event *event, int ef_flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int n0, ret = -EAGAIN;
unsigned long flags;
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
if (n0 >= sparc_pmu->max_hw_events)
@@ -1371,16 +1385,16 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
cpuc->events[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PIC_NO_INDEX;
- event->hw.state = PERF_HES_UPTODATE;
+ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (!(ef_flags & PERF_EF_START))
- event->hw.state |= PERF_HES_STOPPED;
+ event->hw.state |= PERF_HES_ARCH;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be performed
* at commit time(->commit_txn) as a whole
*/
- if (cpuc->group_flag & PERF_EVENT_TXN)
+ if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
goto nocheck;
if (check_excludes(cpuc->event, n0, 1))
@@ -1394,7 +1408,6 @@ nocheck:
ret = 0;
out:
- perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
@@ -1496,12 +1509,17 @@ static int sparc_pmu_event_init(struct perf_event *event)
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
-static void sparc_pmu_start_txn(struct pmu *pmu)
+static void sparc_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+
+ WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
+
+ cpuhw->txn_flags = txn_flags;
+ if (txn_flags & ~PERF_PMU_TXN_ADD)
+ return;
perf_pmu_disable(pmu);
- cpuhw->group_flag |= PERF_EVENT_TXN;
}
/*
@@ -1511,9 +1529,16 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
*/
static void sparc_pmu_cancel_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ unsigned int txn_flags;
+
+ WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
+
+ txn_flags = cpuhw->txn_flags;
+ cpuhw->txn_flags = 0;
+ if (txn_flags & ~PERF_PMU_TXN_ADD)
+ return;
- cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
}
@@ -1524,20 +1549,26 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu)
*/
static int sparc_pmu_commit_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int n;
if (!sparc_pmu)
return -EINVAL;
- cpuc = &__get_cpu_var(cpu_hw_events);
+ WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
+
+ if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
+ cpuc->txn_flags = 0;
+ return 0;
+ }
+
n = cpuc->n_events;
if (check_excludes(cpuc->event, 0, n))
return -EINVAL;
if (sparc_check_constraints(cpuc->event, cpuc->events, n))
return -EAGAIN;
- cpuc->group_flag &= ~PERF_EVENT_TXN;
+ cpuc->txn_flags = 0;
perf_pmu_enable(pmu);
return 0;
}
@@ -1586,6 +1617,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
+ u64 finish_clock;
+ u64 start_clock;
int i;
if (!atomic_read(&active_events))
@@ -1599,9 +1632,11 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
return NOTIFY_DONE;
}
+ start_clock = sched_clock();
+
regs = args->regs;
- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);
/* If the PMU has the TOE IRQ enable bits, we need to do a
* dummy write to the %pcr to clear the overflow bits and thus
@@ -1633,10 +1668,13 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
if (!sparc_perf_event_set_period(event, hwc, idx))
continue;
- if (perf_event_overflow(event, &data, regs))
- sparc_pmu_stop(event, 0);
+ perf_event_overflow(event, &data, regs);
}
+ finish_clock = sched_clock();
+
+ perf_sample_event_took(finish_clock - start_clock);
+
return NOTIFY_STOP;
}
@@ -1662,18 +1700,26 @@ static bool __init supported_pmu(void)
sparc_pmu = &niagara2_pmu;
return true;
}
- if (!strcmp(sparc_pmu_type, "niagara4")) {
+ if (!strcmp(sparc_pmu_type, "niagara4") ||
+ !strcmp(sparc_pmu_type, "niagara5")) {
sparc_pmu = &niagara4_pmu;
return true;
}
+ if (!strcmp(sparc_pmu_type, "sparc-m7")) {
+ sparc_pmu = &sparc_m7_pmu;
+ return true;
+ }
return false;
}
-int __init init_hw_perf_events(void)
+static int __init init_hw_perf_events(void)
{
+ int err;
+
pr_info("Performance events: ");
- if (!supported_pmu()) {
+ err = pcr_arch_init();
+ if (err || !supported_pmu()) {
pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
return 0;
}
@@ -1685,9 +1731,9 @@ int __init init_hw_perf_events(void)
return 0;
}
-early_initcall(init_hw_perf_events);
+pure_initcall(init_hw_perf_events);
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long ksp, fp;
@@ -1724,78 +1770,107 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
perf_callchain_store(entry, pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
- int index = current->curr_ret_stack;
- if (current->ret_stack && index >= graph) {
- pc = current->ret_stack[index - graph].ret;
+ struct ftrace_ret_stack *ret_stack;
+ ret_stack = ftrace_graph_get_ret_stack(current,
+ graph);
+ if (ret_stack) {
+ pc = ret_stack->ret;
perf_callchain_store(entry, pc);
graph++;
}
}
#endif
- } while (entry->nr < PERF_MAX_STACK_DEPTH);
+ } while (entry->nr < entry->max_stack);
+}
+
+static inline int
+valid_user_frame(const void __user *fp, unsigned long size)
+{
+ /* addresses should be at least 4-byte aligned */
+ if (((unsigned long) fp) & 3)
+ return 0;
+
+ return (__range_not_ok(fp, size, TASK_SIZE) == 0);
}
-static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long ufp;
- ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
+ ufp = regs->u_regs[UREG_FP] + STACK_BIAS;
do {
- struct sparc_stackf *usf, sf;
+ struct sparc_stackf __user *usf;
+ struct sparc_stackf sf;
unsigned long pc;
- usf = (struct sparc_stackf *) ufp;
+ usf = (struct sparc_stackf __user *)ufp;
+ if (!valid_user_frame(usf, sizeof(sf)))
+ break;
+
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp + STACK_BIAS;
perf_callchain_store(entry, pc);
- } while (entry->nr < PERF_MAX_STACK_DEPTH);
+ } while (entry->nr < entry->max_stack);
}
-static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long ufp;
- ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
+ ufp = regs->u_regs[UREG_FP] & 0xffffffffUL;
do {
unsigned long pc;
if (thread32_stack_is_64bit(ufp)) {
- struct sparc_stackf *usf, sf;
+ struct sparc_stackf __user *usf;
+ struct sparc_stackf sf;
ufp += STACK_BIAS;
- usf = (struct sparc_stackf *) ufp;
+ usf = (struct sparc_stackf __user *)ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc & 0xffffffff;
ufp = ((unsigned long) sf.fp) & 0xffffffff;
} else {
- struct sparc_stackf32 *usf, sf;
- usf = (struct sparc_stackf32 *) ufp;
+ struct sparc_stackf32 __user *usf;
+ struct sparc_stackf32 sf;
+ usf = (struct sparc_stackf32 __user *)ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp;
}
perf_callchain_store(entry, pc);
- } while (entry->nr < PERF_MAX_STACK_DEPTH);
+ } while (entry->nr < entry->max_stack);
}
void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
+ u64 saved_fault_address = current_thread_info()->fault_address;
+ u8 saved_fault_code = get_thread_fault_code();
+
perf_callchain_store(entry, regs->tpc);
if (!current->mm)
return;
flushw_user();
+
+ pagefault_disable();
+
if (test_thread_flag(TIF_32BIT))
perf_callchain_user_32(entry, regs);
else
perf_callchain_user_64(entry, regs);
+
+ pagefault_enable();
+
+ set_thread_fault_code(saved_fault_code);
+ current_thread_info()->fault_address = saved_fault_address;
}
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index 8b7297faca79..69a0206e56f0 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* pmc - Driver implementation for power management functions
* of Power Management Controller (PMC) on SPARCstation-Voyager.
*
@@ -10,12 +11,12 @@
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/oplib.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/auxio.h>
#include <asm/processor.h>
@@ -71,7 +72,7 @@ static int pmc_probe(struct platform_device *op)
return 0;
}
-static struct of_device_id pmc_match[] = {
+static const struct of_device_id pmc_match[] = {
{
.name = PMC_OBPNAME,
},
@@ -82,7 +83,6 @@ MODULE_DEVICE_TABLE(of, pmc_match);
static struct platform_driver pmc_driver = {
.driver = {
.name = "pmc",
- .owner = THIS_MODULE,
.of_match_table = pmc_match,
},
.probe = pmc_probe,
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c
index 4cb23c41553f..db8a3f9e3d40 100644
--- a/arch/sparc/kernel/power.c
+++ b/arch/sparc/kernel/power.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* power.c: Power management driver.
*
* Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
@@ -8,7 +9,8 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <asm/prom.h>
#include <asm/io.h>
@@ -27,7 +29,7 @@ static int has_button_interrupt(unsigned int irq, struct device_node *dp)
{
if (irq == 0xffffffff)
return 0;
- if (!of_find_property(dp, "button", NULL))
+ if (!of_property_read_bool(dp, "button"))
return 0;
return 1;
@@ -40,8 +42,8 @@ static int power_probe(struct platform_device *op)
power_reg = of_ioremap(res, 0, 0x4, "power");
- printk(KERN_INFO "%s: Control reg at %llx\n",
- op->dev.of_node->name, res->start);
+ printk(KERN_INFO "%pOFn: Control reg at %llx\n",
+ op->dev.of_node, res->start);
if (has_button_interrupt(irq, op->dev.of_node)) {
if (request_irq(irq,
@@ -63,14 +65,8 @@ static struct platform_driver power_driver = {
.probe = power_probe,
.driver = {
.name = "power",
- .owner = THIS_MODULE,
.of_match_table = power_match,
},
};
-static int __init power_init(void)
-{
- return platform_driver_register(&power_driver);
-}
-
-device_initcall(power_init);
+builtin_platform_driver(power_driver);
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c
new file mode 100644
index 000000000000..0442ab00518d
--- /dev/null
+++ b/arch/sparc/kernel/process.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This file handles the architecture independent parts of process handling..
+ */
+
+#include <linux/compat.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/signal.h>
+
+#include "kernel.h"
+
+asmlinkage long sparc_fork(struct pt_regs *regs)
+{
+ unsigned long orig_i1 = regs->u_regs[UREG_I1];
+ long ret;
+ struct kernel_clone_args args = {
+ .exit_signal = SIGCHLD,
+ /* Reuse the parent's stack for the child. */
+ .stack = regs->u_regs[UREG_FP],
+ };
+
+ ret = kernel_clone(&args);
+
+ /* If we get an error and potentially restart the system
+ * call, we're screwed because copy_thread() clobbered
+ * the parent's %o1. So detect that case and restore it
+ * here.
+ */
+ if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
+ regs->u_regs[UREG_I1] = orig_i1;
+
+ return ret;
+}
+
+asmlinkage long sparc_vfork(struct pt_regs *regs)
+{
+ unsigned long orig_i1 = regs->u_regs[UREG_I1];
+ long ret;
+
+ struct kernel_clone_args args = {
+ .flags = CLONE_VFORK | CLONE_VM,
+ .exit_signal = SIGCHLD,
+ /* Reuse the parent's stack for the child. */
+ .stack = regs->u_regs[UREG_FP],
+ };
+
+ ret = kernel_clone(&args);
+
+ /* If we get an error and potentially restart the system
+ * call, we're screwed because copy_thread() clobbered
+ * the parent's %o1. So detect that case and restore it
+ * here.
+ */
+ if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
+ regs->u_regs[UREG_I1] = orig_i1;
+
+ return ret;
+}
+
+asmlinkage long sparc_clone(struct pt_regs *regs)
+{
+ unsigned long orig_i1 = regs->u_regs[UREG_I1];
+ unsigned int flags = lower_32_bits(regs->u_regs[UREG_I0]);
+ long ret;
+
+ struct kernel_clone_args args = {
+ .flags = (flags & ~CSIGNAL),
+ .exit_signal = (flags & CSIGNAL),
+ .tls = regs->u_regs[UREG_I3],
+ };
+
+#ifdef CONFIG_COMPAT
+ if (test_thread_flag(TIF_32BIT)) {
+ args.pidfd = compat_ptr(regs->u_regs[UREG_I2]);
+ args.child_tid = compat_ptr(regs->u_regs[UREG_I4]);
+ args.parent_tid = compat_ptr(regs->u_regs[UREG_I2]);
+ } else
+#endif
+ {
+ args.pidfd = (int __user *)regs->u_regs[UREG_I2];
+ args.child_tid = (int __user *)regs->u_regs[UREG_I4];
+ args.parent_tid = (int __user *)regs->u_regs[UREG_I2];
+ }
+
+ /* Did userspace give setup a separate stack for the child or are we
+ * reusing the parent's?
+ */
+ if (regs->u_regs[UREG_I1])
+ args.stack = regs->u_regs[UREG_I1];
+ else
+ args.stack = regs->u_regs[UREG_FP];
+
+ ret = kernel_clone(&args);
+
+ /* If we get an error and potentially restart the system
+ * call, we're screwed because copy_thread() clobbered
+ * the parent's %o1. So detect that case and restore it
+ * here.
+ */
+ if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
+ regs->u_regs[UREG_I1] = orig_i1;
+
+ return ret;
+}
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index fdd819dfdacf..5a28c0e91bf1 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* linux/arch/sparc/kernel/process.c
*
* Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net)
@@ -7,12 +8,13 @@
/*
* This file handles the architecture-dependent parts of process handling..
*/
-
-#include <stdarg.h>
-
+#include <linux/elfcore.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
@@ -22,15 +24,13 @@
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/pm.h>
-#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
#include <asm/auxio.h>
#include <asm/oplib.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/delay.h>
#include <asm/processor.h>
#include <asm/psr.h>
@@ -39,6 +39,8 @@
#include <asm/unistd.h>
#include <asm/setup.h>
+#include "kernel.h"
+
/*
* Power management idle function
* Set in pm platform drivers (apc.c and pmc.c)
@@ -69,7 +71,6 @@ void arch_cpu_idle(void)
{
if (sparc_idle)
(*sparc_idle)();
- local_irq_enable();
}
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
@@ -103,8 +104,12 @@ void machine_restart(char * cmd)
void machine_power_off(void)
{
if (auxio_power_register &&
- (strcmp(of_console_device->type, "serial") || scons_pwroff))
- *auxio_power_register |= AUXIO_POWER_OFF;
+ (!of_node_is_type(of_console_device, "serial") || scons_pwroff)) {
+ u8 power_register = sbus_readb(auxio_power_register);
+ power_register |= AUXIO_POWER_OFF;
+ sbus_writeb(power_register, auxio_power_register);
+ }
+
machine_halt();
}
@@ -134,10 +139,10 @@ void show_regs(struct pt_regs *r)
}
/*
- * The show_stack is an external API which we do not use ourselves.
+ * The show_stack() is external API which we do not use ourselves.
* The oops is printed in die_if_kernel.
*/
-void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl)
{
unsigned long pc, fp;
unsigned long task_base;
@@ -159,39 +164,31 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
break;
rw = (struct reg_window32 *) fp;
pc = rw->ins[7];
- printk("[%08lx : ", pc);
- printk("%pS ] ", (void *) pc);
+ printk("%s[%08lx : ", loglvl, pc);
+ printk("%s%pS ] ", loglvl, (void *) pc);
fp = rw->ins[6];
} while (++count < 16);
- printk("\n");
-}
-
-/*
- * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- return task_thread_info(tsk)->kpc;
+ printk("%s\n", loglvl);
}
/*
* Free current thread data structures etc..
*/
-void exit_thread(void)
+void exit_thread(struct task_struct *tsk)
{
#ifndef CONFIG_SMP
- if(last_task_used_math == current) {
+ if (last_task_used_math == tsk) {
#else
- if (test_thread_flag(TIF_USEDFPU)) {
+ if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) {
#endif
/* Keep process from leaving FPU in a bogon state. */
put_psr(get_psr() | PSR_EF);
- fpsave(&current->thread.float_regs[0], &current->thread.fsr,
- &current->thread.fpqueue[0], &current->thread.fpqdepth);
+ fpsave(&tsk->thread.float_regs[0], &tsk->thread.fsr,
+ &tsk->thread.fpqueue[0], &tsk->thread.fpqdepth);
#ifndef CONFIG_SMP
last_task_used_math = NULL;
#else
- clear_thread_flag(TIF_USEDFPU);
+ clear_ti_thread_flag(task_thread_info(tsk), TIF_USEDFPU);
#endif
}
}
@@ -215,16 +212,6 @@ void flush_thread(void)
clear_thread_flag(TIF_USEDFPU);
#endif
}
-
- /* This task is no longer a kernel thread. */
- if (current->thread.flags & SPARC_FLAG_KTHREAD) {
- current->thread.flags &= ~SPARC_FLAG_KTHREAD;
-
- /* We must fixup kregs as well. */
- /* XXX This was not fixed for ti for a while, worked. Unused? */
- current->thread.kregs = (struct pt_regs *)
- (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ));
- }
}
static inline struct sparc_stackf __user *
@@ -255,33 +242,6 @@ clone_stackframe(struct sparc_stackf __user *dst,
return sp;
}
-asmlinkage int sparc_do_fork(unsigned long clone_flags,
- unsigned long stack_start,
- struct pt_regs *regs,
- unsigned long stack_size)
-{
- unsigned long parent_tid_ptr, child_tid_ptr;
- unsigned long orig_i1 = regs->u_regs[UREG_I1];
- long ret;
-
- parent_tid_ptr = regs->u_regs[UREG_I2];
- child_tid_ptr = regs->u_regs[UREG_I4];
-
- ret = do_fork(clone_flags, stack_start, stack_size,
- (int __user *) parent_tid_ptr,
- (int __user *) child_tid_ptr);
-
- /* If we get an error and potentially restart the system
- * call, we're screwed because copy_thread() clobbered
- * the parent's %o1. So detect that case and restore it
- * here.
- */
- if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
- regs->u_regs[UREG_I1] = orig_i1;
-
- return ret;
-}
-
/* Copy a Sparc thread. The fork() return value conventions
* under SunOS are nothing short of bletcherous:
* Parent --> %o0 == childs pid, %o1 == 0
@@ -298,9 +258,11 @@ asmlinkage int sparc_do_fork(unsigned long clone_flags,
extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void);
-int copy_thread(unsigned long clone_flags, unsigned long sp,
- unsigned long arg, struct task_struct *p)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ u64 clone_flags = args->flags;
+ unsigned long sp = args->stack;
+ unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
char *new_stack;
@@ -326,24 +288,22 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ);
/*
- * A new process must start with interrupts closed in 2.5,
- * because this is how Mingo's scheduler works (see schedule_tail
- * and finish_arch_switch). If we do not do it, a timer interrupt hits
- * before we unlock, attempts to re-take the rq->lock, and then we die.
- * Thus, kpsr|=PSR_PIL.
+ * A new process must start with interrupts disabled, see schedule_tail()
+ * and finish_task_switch(). (If we do not do it and if a timer interrupt
+ * hits before we unlock and attempts to take the rq->lock, we deadlock.)
+ *
+ * Thus, kpsr |= PSR_PIL.
*/
ti->ksp = (unsigned long) new_stack;
p->thread.kregs = childregs;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(args->fn)) {
extern int nwindows;
unsigned long psr;
memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
- p->thread.flags |= SPARC_FLAG_KTHREAD;
- p->thread.current_ds = KERNEL_DS;
ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8);
- childregs->u_regs[UREG_G1] = sp; /* function */
- childregs->u_regs[UREG_G2] = arg;
+ childregs->u_regs[UREG_G1] = (unsigned long) args->fn;
+ childregs->u_regs[UREG_G2] = (unsigned long) args->fn_arg;
psr = childregs->psr = get_psr();
ti->kpsr = psr | PSR_PIL;
ti->kwim = 1 << (((psr & PSR_CWP) + 1) % nwindows);
@@ -351,8 +311,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
}
memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ);
childregs->u_regs[UREG_FP] = sp;
- p->thread.flags &= ~SPARC_FLAG_KTHREAD;
- p->thread.current_ds = USER_DS;
ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
ti->kwim = current->thread.fork_kwim;
@@ -401,61 +359,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
regs->u_regs[UREG_I1] = 0;
if (clone_flags & CLONE_SETTLS)
- childregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
+ childregs->u_regs[UREG_G7] = tls;
return 0;
}
-/*
- * fill in the fpu structure for a core dump.
- */
-int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
-{
- if (used_math()) {
- memset(fpregs, 0, sizeof(*fpregs));
- fpregs->pr_q_entrysize = 8;
- return 1;
- }
-#ifdef CONFIG_SMP
- if (test_thread_flag(TIF_USEDFPU)) {
- put_psr(get_psr() | PSR_EF);
- fpsave(&current->thread.float_regs[0], &current->thread.fsr,
- &current->thread.fpqueue[0], &current->thread.fpqdepth);
- if (regs != NULL) {
- regs->psr &= ~(PSR_EF);
- clear_thread_flag(TIF_USEDFPU);
- }
- }
-#else
- if (current == last_task_used_math) {
- put_psr(get_psr() | PSR_EF);
- fpsave(&current->thread.float_regs[0], &current->thread.fsr,
- &current->thread.fpqueue[0], &current->thread.fpqdepth);
- if (regs != NULL) {
- regs->psr &= ~(PSR_EF);
- last_task_used_math = NULL;
- }
- }
-#endif
- memcpy(&fpregs->pr_fr.pr_regs[0],
- &current->thread.float_regs[0],
- (sizeof(unsigned long) * 32));
- fpregs->pr_fsr = current->thread.fsr;
- fpregs->pr_qcnt = current->thread.fpqdepth;
- fpregs->pr_q_entrysize = 8;
- fpregs->pr_en = 1;
- if(fpregs->pr_qcnt != 0) {
- memcpy(&fpregs->pr_q[0],
- &current->thread.fpqueue[0],
- sizeof(struct fpq) * fpregs->pr_qcnt);
- }
- /* Zero out the rest. */
- memset(&fpregs->pr_q[fpregs->pr_qcnt], 0,
- sizeof(struct fpq) * (32 - fpregs->pr_qcnt));
- return 1;
-}
-
-unsigned long get_wchan(struct task_struct *task)
+unsigned long __get_wchan(struct task_struct *task)
{
unsigned long pc, fp, bias = 0;
unsigned long task_base = (unsigned long) task;
@@ -463,10 +372,6 @@ unsigned long get_wchan(struct task_struct *task)
struct reg_window32 *rw;
int count = 0;
- if (!task || task == current ||
- task->state == TASK_RUNNING)
- goto out;
-
fp = task_thread_info(task)->ksp + bias;
do {
/* Bogus frame pointer? */
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index baebab215492..25781923788a 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* arch/sparc64/kernel/process.c
*
* Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net)
@@ -8,12 +9,12 @@
/*
* This file handles the architecture-dependent parts of process handling..
*/
-
-#include <stdarg.h>
-
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
@@ -31,11 +32,12 @@
#include <linux/elfcore.h>
#include <linux/sysrq.h>
#include <linux/nmi.h>
+#include <linux/context_tracking.h>
+#include <linux/signal.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/pstate.h>
#include <asm/elf.h>
@@ -60,6 +62,8 @@ void arch_cpu_idle(void)
} else {
unsigned long pstate;
+ raw_local_irq_enable();
+
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
* the cpu sleep hypervisor call.
*/
@@ -70,8 +74,13 @@ void arch_cpu_idle(void)
: "=&r" (pstate)
: "i" (PSTATE_IE));
- if (!need_resched() && !cpu_is_offline(smp_processor_id()))
+ if (!need_resched() && !cpu_is_offline(smp_processor_id())) {
sun4v_cpu_yield();
+ /* If resumed by cpu_poke then we need to explicitly
+ * call scheduler_ipi().
+ */
+ scheduler_poke();
+ }
/* Re-enable interrupts. */
__asm__ __volatile__(
@@ -80,12 +89,13 @@ void arch_cpu_idle(void)
"wrpr %0, %%g0, %%pstate"
: "=&r" (pstate)
: "i" (PSTATE_IE));
+
+ raw_local_irq_disable();
}
- local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead()
+void __noreturn arch_cpu_idle_dead(void)
{
sched_preempt_enable_no_resched();
cpu_play_dead();
@@ -97,18 +107,13 @@ static void show_regwindow32(struct pt_regs *regs)
{
struct reg_window32 __user *rw;
struct reg_window32 r_w;
- mm_segment_t old_fs;
__asm__ __volatile__ ("flushw");
- rw = compat_ptr((unsigned)regs->u_regs[14]);
- old_fs = get_fs();
- set_fs (USER_DS);
+ rw = compat_ptr((unsigned int)regs->u_regs[14]);
if (copy_from_user (&r_w, rw, sizeof(r_w))) {
- set_fs (old_fs);
return;
}
- set_fs (old_fs);
printk("l0: %08x l1: %08x l2: %08x l3: %08x "
"l4: %08x l5: %08x l6: %08x l7: %08x\n",
r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
@@ -127,7 +132,6 @@ static void show_regwindow(struct pt_regs *regs)
struct reg_window __user *rw;
struct reg_window *rwk;
struct reg_window r_w;
- mm_segment_t old_fs;
if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
__asm__ __volatile__ ("flushw");
@@ -136,14 +140,10 @@ static void show_regwindow(struct pt_regs *regs)
rwk = (struct reg_window *)
(regs->u_regs[14] + STACK_BIAS);
if (!(regs->tstate & TSTATE_PRIV)) {
- old_fs = get_fs();
- set_fs (USER_DS);
if (copy_from_user (&r_w, rw, sizeof(r_w))) {
- set_fs (old_fs);
return;
}
rwk = &r_w;
- set_fs (old_fs);
}
} else {
show_regwindow32(regs);
@@ -182,7 +182,7 @@ void show_regs(struct pt_regs *regs)
regs->u_regs[15]);
printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
show_regwindow(regs);
- show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
+ show_stack(current, (unsigned long *)regs->u_regs[UREG_FP], KERN_DEFAULT);
}
union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
@@ -236,7 +236,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
}
}
-void arch_trigger_all_cpu_backtrace(void)
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
{
struct thread_info *tp = current_thread_info();
struct pt_regs *regs = get_irq_regs();
@@ -248,16 +248,22 @@ void arch_trigger_all_cpu_backtrace(void)
spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
- memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
-
this_cpu = raw_smp_processor_id();
- __global_reg_self(tp, regs, this_cpu);
+ memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
+
+ if (cpumask_test_cpu(this_cpu, mask) && this_cpu != exclude_cpu)
+ __global_reg_self(tp, regs, this_cpu);
smp_fetch_global_regs();
- for_each_online_cpu(cpu) {
- struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg;
+ for_each_cpu(cpu, mask) {
+ struct global_reg_snapshot *gp;
+
+ if (cpu == exclude_cpu)
+ continue;
+
+ gp = &global_cpu_snapshot[cpu].reg;
__global_reg_poll(gp);
@@ -278,6 +284,8 @@ void arch_trigger_all_cpu_backtrace(void)
printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
gp->tpc, gp->o7, gp->i7, gp->rpc);
}
+
+ touch_nmi_watchdog();
}
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
@@ -287,12 +295,12 @@ void arch_trigger_all_cpu_backtrace(void)
#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_globreg(int key)
+static void sysrq_handle_globreg(u8 key)
{
- arch_trigger_all_cpu_backtrace();
+ trigger_all_cpu_backtrace();
}
-static struct sysrq_key_op sparc_globalreg_op = {
+static const struct sysrq_key_op sparc_globalreg_op = {
.handler = sysrq_handle_globreg,
.help_msg = "global-regs(y)",
.action_msg = "Show Global CPU Regs",
@@ -303,6 +311,9 @@ static void __global_pmu_self(int this_cpu)
struct global_pmu_snapshot *pp;
int i, num;
+ if (!pcr_ops)
+ return;
+
pp = &global_cpu_snapshot[this_cpu].pmu;
num = 1;
@@ -350,6 +361,8 @@ static void pmu_snapshot_all_cpus(void)
(cpu == this_cpu ? '*' : ' '), cpu,
pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
+
+ touch_nmi_watchdog();
}
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
@@ -357,12 +370,12 @@ static void pmu_snapshot_all_cpus(void)
spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
}
-static void sysrq_handle_globpmu(int key)
+static void sysrq_handle_globpmu(u8 key)
{
pmu_snapshot_all_cpus();
}
-static struct sysrq_key_op sparc_globalpmu_op = {
+static const struct sysrq_key_op sparc_globalpmu_op = {
.handler = sysrq_handle_globpmu,
.help_msg = "global-pmu(x)",
.action_msg = "Show Global PMU Regs",
@@ -381,29 +394,10 @@ core_initcall(sparc_sysrq_init);
#endif
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- struct thread_info *ti = task_thread_info(tsk);
- unsigned long ret = 0xdeadbeefUL;
-
- if (ti && ti->ksp) {
- unsigned long *sp;
- sp = (unsigned long *)(ti->ksp + STACK_BIAS);
- if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
- sp[14]) {
- unsigned long *fp;
- fp = (unsigned long *)(sp[14] + STACK_BIAS);
- if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
- ret = fp[15];
- }
- }
- return ret;
-}
-
/* Free current thread data structures etc.. */
-void exit_thread(void)
+void exit_thread(struct task_struct *tsk)
{
- struct thread_info *t = current_thread_info();
+ struct thread_info *t = task_thread_info(tsk);
if (t->utraps) {
if (t->utraps[0] < 2)
@@ -452,7 +446,7 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
distance = fp - psp;
rval = (csp - distance);
- if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
+ if (raw_copy_in_user((void __user *)rval, (void __user *)psp, distance))
rval = 0;
else if (!stack_64bit) {
if (put_user(((u32)csp),
@@ -512,17 +506,15 @@ void synchronize_user_stack(void)
static void stack_unaligned(unsigned long sp)
{
- siginfo_t info;
-
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRALN;
- info.si_addr = (void __user *) sp;
- info.si_trapno = 0;
- force_sig_info(SIGBUS, &info, current);
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp);
}
-void fault_in_user_windows(void)
+static const char uwfault32[] = KERN_INFO \
+ "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n";
+static const char uwfault64[] = KERN_INFO \
+ "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n";
+
+void fault_in_user_windows(struct pt_regs *regs)
{
struct thread_info *t = current_thread_info();
unsigned long window;
@@ -535,9 +527,9 @@ void fault_in_user_windows(void)
do {
struct reg_window *rwin = &t->reg_window[window];
int winsize = sizeof(struct reg_window);
- unsigned long sp;
+ unsigned long sp, orig_sp;
- sp = t->rwbuf_stkptrs[window];
+ orig_sp = sp = t->rwbuf_stkptrs[window];
if (test_thread_64bit_stack(sp))
sp += STACK_BIAS;
@@ -548,8 +540,16 @@ void fault_in_user_windows(void)
stack_unaligned(sp);
if (unlikely(copy_to_user((char __user *)sp,
- rwin, winsize)))
+ rwin, winsize))) {
+ if (show_unhandled_signals)
+ printk_ratelimited(is_compat_task() ?
+ uwfault32 : uwfault64,
+ current->comm, current->pid,
+ sp, orig_sp,
+ regs->tpc,
+ regs->u_regs[UREG_I7]);
goto barf;
+ }
} while (window--);
}
set_thread_wsaved(0);
@@ -557,41 +557,7 @@ void fault_in_user_windows(void)
barf:
set_thread_wsaved(window + 1);
- do_exit(SIGILL);
-}
-
-asmlinkage long sparc_do_fork(unsigned long clone_flags,
- unsigned long stack_start,
- struct pt_regs *regs,
- unsigned long stack_size)
-{
- int __user *parent_tid_ptr, *child_tid_ptr;
- unsigned long orig_i1 = regs->u_regs[UREG_I1];
- long ret;
-
-#ifdef CONFIG_COMPAT
- if (test_thread_flag(TIF_32BIT)) {
- parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
- child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
- } else
-#endif
- {
- parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
- child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
- }
-
- ret = do_fork(clone_flags, stack_start, stack_size,
- parent_tid_ptr, child_tid_ptr);
-
- /* If we get an error and potentially restart the system
- * call, we're screwed because copy_thread() clobbered
- * the parent's %o1. So detect that case and restore it
- * here.
- */
- if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
- regs->u_regs[UREG_I1] = orig_i1;
-
- return ret;
+ force_sig(SIGSEGV);
}
/* Copy a Sparc thread. The fork() return value conventions
@@ -599,9 +565,11 @@ asmlinkage long sparc_do_fork(unsigned long clone_flags,
* Parent --> %o0 == childs pid, %o1 == 0
* Child --> %o0 == parents pid, %o1 == 1
*/
-int copy_thread(unsigned long clone_flags, unsigned long sp,
- unsigned long arg, struct task_struct *p)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ u64 clone_flags = args->flags;
+ unsigned long sp = args->stack;
+ unsigned long tls = args->tls;
struct thread_info *t = task_thread_info(p);
struct pt_regs *regs = current_pt_regs();
struct sparc_stackf *parent_sf;
@@ -619,13 +587,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
sizeof(struct sparc_stackf));
t->fpsaved[0] = 0;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(args->fn)) {
memset(child_trap_frame, 0, child_stack_sz);
__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
(current_pt_regs()->tstate + 1) & TSTATE_CWP;
- t->current_ds = ASI_P;
- t->kregs->u_regs[UREG_G1] = sp; /* function */
- t->kregs->u_regs[UREG_G2] = arg;
+ t->kregs->u_regs[UREG_G1] = (unsigned long) args->fn;
+ t->kregs->u_regs[UREG_G2] = (unsigned long) args->fn_arg;
return 0;
}
@@ -638,7 +605,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
t->kregs->u_regs[UREG_FP] = sp;
__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
(regs->tstate + 1) & TSTATE_CWP;
- t->current_ds = ASI_AIUS;
if (sp != regs->u_regs[UREG_FP]) {
unsigned long csp;
@@ -658,78 +624,37 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
regs->u_regs[UREG_I1] = 0;
if (clone_flags & CLONE_SETTLS)
- t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
+ t->kregs->u_regs[UREG_G7] = tls;
return 0;
}
-typedef struct {
- union {
- unsigned int pr_regs[32];
- unsigned long pr_dregs[16];
- } pr_fr;
- unsigned int __unused;
- unsigned int pr_fsr;
- unsigned char pr_qcnt;
- unsigned char pr_q_entrysize;
- unsigned char pr_en;
- unsigned int pr_q[64];
-} elf_fpregset_t32;
-
-/*
- * fill in the fpu structure for a core dump.
+/* TIF_MCDPER in thread info flags for current task is updated lazily upon
+ * a context switch. Update this flag in current task's thread flags
+ * before dup so the dup'd task will inherit the current TIF_MCDPER flag.
*/
-int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- unsigned long *kfpregs = current_thread_info()->fpregs;
- unsigned long fprs = current_thread_info()->fpsaved[0];
+ if (adi_capable()) {
+ register unsigned long tmp_mcdper;
- if (test_thread_flag(TIF_32BIT)) {
- elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
-
- if (fprs & FPRS_DL)
- memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
- sizeof(unsigned int) * 32);
- else
- memset(&fpregs32->pr_fr.pr_regs[0], 0,
- sizeof(unsigned int) * 32);
- fpregs32->pr_qcnt = 0;
- fpregs32->pr_q_entrysize = 8;
- memset(&fpregs32->pr_q[0], 0,
- (sizeof(unsigned int) * 64));
- if (fprs & FPRS_FEF) {
- fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
- fpregs32->pr_en = 1;
- } else {
- fpregs32->pr_fsr = 0;
- fpregs32->pr_en = 0;
- }
- } else {
- if(fprs & FPRS_DL)
- memcpy(&fpregs->pr_regs[0], kfpregs,
- sizeof(unsigned int) * 32);
- else
- memset(&fpregs->pr_regs[0], 0,
- sizeof(unsigned int) * 32);
- if(fprs & FPRS_DU)
- memcpy(&fpregs->pr_regs[16], kfpregs+16,
- sizeof(unsigned int) * 32);
+ __asm__ __volatile__(
+ ".word 0x83438000\n\t" /* rd %mcdper, %g1 */
+ "mov %%g1, %0\n\t"
+ : "=r" (tmp_mcdper)
+ :
+ : "g1");
+ if (tmp_mcdper)
+ set_thread_flag(TIF_MCDPER);
else
- memset(&fpregs->pr_regs[16], 0,
- sizeof(unsigned int) * 32);
- if(fprs & FPRS_FEF) {
- fpregs->pr_fsr = current_thread_info()->xfsr[0];
- fpregs->pr_gsr = current_thread_info()->gsr[0];
- } else {
- fpregs->pr_fsr = fpregs->pr_gsr = 0;
- }
- fpregs->pr_fprs = fprs;
+ clear_thread_flag(TIF_MCDPER);
}
- return 1;
+
+ *dst = *src;
+ return 0;
}
-EXPORT_SYMBOL(dump_fpu);
-unsigned long get_wchan(struct task_struct *task)
+unsigned long __get_wchan(struct task_struct *task)
{
unsigned long pc, fp, bias = 0;
struct thread_info *tp;
@@ -737,10 +662,6 @@ unsigned long get_wchan(struct task_struct *task)
unsigned long ret = 0;
int count = 0;
- if (!task || task == current ||
- task->state == TASK_RUNNING)
- goto out;
-
tp = task_thread_info(task);
bias = STACK_BIAS;
fp = task_thread_info(task)->ksp + bias;
diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h
index cf5fe1c0b024..26a1cca7c761 100644
--- a/arch/sparc/kernel/prom.h
+++ b/arch/sparc/kernel/prom.h
@@ -1,10 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PROM_H
#define __PROM_H
#include <linux/spinlock.h>
#include <asm/prom.h>
-extern void of_console_init(void);
+void of_console_init(void);
extern unsigned int prom_early_allocated;
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index b51cbb9e87dc..cd94f1e8d644 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Procedures for creating, accessing and interpreting the device tree.
*
@@ -8,18 +9,13 @@
* {engebret|bergner}@us.ibm.com
*
* Adapted for sparc32 by David S. Miller davem@davemloft.net
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/prom.h>
#include <asm/oplib.h>
@@ -32,9 +28,7 @@ void * __init prom_early_alloc(unsigned long size)
{
void *ret;
- ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
- if (ret != NULL)
- memset(ret, 0, size);
+ ret = memblock_alloc_or_panic(size, SMP_CACHE_BYTES);
prom_early_allocated += size;
@@ -60,6 +54,7 @@ void * __init prom_early_alloc(unsigned long size)
*/
static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_registers *regs;
struct property *rprop;
@@ -69,13 +64,14 @@ static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf)
regs = rprop->value;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
regs->which_io, regs->phys_addr);
}
/* "name@slot,offset" */
static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_registers *regs;
struct property *prop;
@@ -85,7 +81,7 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
regs->which_io,
regs->phys_addr);
}
@@ -93,6 +89,7 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
/* "name@devnum[,func]" */
static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_pci_registers *regs;
struct property *prop;
unsigned int devfn;
@@ -105,12 +102,12 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
devfn = (regs->phys_hi >> 8) & 0xff;
if (devfn & 0x07) {
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
devfn >> 3,
devfn & 0x07);
} else {
sprintf(tmp_buf, "%s@%x",
- dp->name,
+ name,
devfn >> 3);
}
}
@@ -118,6 +115,7 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
/* "name@addrhi,addrlo" */
static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_registers *regs;
struct property *prop;
@@ -128,15 +126,17 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
regs->which_io, regs->phys_addr);
}
-/* "name:vendor:device@irq,addrlo" */
+/* "name@irq,addrlo" */
static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct amba_prom_registers *regs;
- unsigned int *intr, *device, *vendor, reg0;
+ unsigned int *intr;
+ unsigned int reg0;
struct property *prop;
int interrupt = 0;
@@ -158,18 +158,7 @@ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
else
intr = prop->value;
- prop = of_find_property(dp, "vendor", NULL);
- if (!prop)
- return;
- vendor = prop->value;
- prop = of_find_property(dp, "device", NULL);
- if (!prop)
- return;
- device = prop->value;
-
- sprintf(tmp_buf, "%s:%d:%d@%x,%x",
- dp->name, *vendor, *device,
- *intr, reg0);
+ sprintf(tmp_buf, "%s@%x,%x", name, *intr, reg0);
}
static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
@@ -177,14 +166,14 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
struct device_node *parent = dp->parent;
if (parent != NULL) {
- if (!strcmp(parent->type, "pci") ||
- !strcmp(parent->type, "pciex"))
+ if (of_node_is_type(parent, "pci") ||
+ of_node_is_type(parent, "pciex"))
return pci_path_component(dp, tmp_buf);
- if (!strcmp(parent->type, "sbus"))
+ if (of_node_is_type(parent, "sbus"))
return sbus_path_component(dp, tmp_buf);
- if (!strcmp(parent->type, "ebus"))
+ if (of_node_is_type(parent, "ebus"))
return ebus_path_component(dp, tmp_buf);
- if (!strcmp(parent->type, "ambapp"))
+ if (of_node_is_type(parent, "ambapp"))
return ambapp_path_component(dp, tmp_buf);
/* "isa" is handled with platform naming */
@@ -196,15 +185,18 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
char * __init build_path_component(struct device_node *dp)
{
+ const char *name = of_get_property(dp, "name", NULL);
char tmp_buf[64], *n;
+ size_t n_sz;
tmp_buf[0] = '\0';
__build_path_component(dp, tmp_buf);
if (tmp_buf[0] == '\0')
- strcpy(tmp_buf, dp->name);
+ strscpy(tmp_buf, name);
- n = prom_early_alloc(strlen(tmp_buf) + 1);
- strcpy(n, tmp_buf);
+ n_sz = strlen(tmp_buf) + 1;
+ n = prom_early_alloc(n_sz);
+ strscpy(n, tmp_buf, n_sz);
return n;
}
@@ -214,13 +206,14 @@ extern void restore_current(void);
void __init of_console_init(void)
{
char *msg = "OF stdout device is: %s\n";
+ const size_t of_console_path_sz = 256;
struct device_node *dp;
unsigned long flags;
const char *type;
phandle node;
int skip, tmp, fd;
- of_console_path = prom_early_alloc(256);
+ of_console_path = prom_early_alloc(of_console_path_sz);
switch (prom_vers) {
case PROM_V0:
@@ -232,7 +225,7 @@ void __init of_console_init(void)
case PROMDEV_TTYB:
skip = 1;
- /* FALLTHRU */
+ fallthrough;
case PROMDEV_TTYA:
type = "serial";
@@ -255,7 +248,7 @@ void __init of_console_init(void)
}
of_console_device = dp;
- strcpy(of_console_path, dp->full_name);
+ sprintf(of_console_path, "%pOF", dp);
if (!strcmp(type, "serial")) {
strcat(of_console_path,
(skip ? ":b" : ":a"));
@@ -278,15 +271,9 @@ void __init of_console_init(void)
prom_halt();
}
dp = of_find_node_by_phandle(node);
- type = of_get_property(dp, "device_type", NULL);
-
- if (!type) {
- prom_printf("Console stdout lacks "
- "device_type property.\n");
- prom_halt();
- }
- if (strcmp(type, "display") && strcmp(type, "serial")) {
+ if (!of_node_is_type(dp, "display") &&
+ !of_node_is_type(dp, "serial")) {
prom_printf("Console device_type is neither display "
"nor serial.\n");
prom_halt();
@@ -295,7 +282,7 @@ void __init of_console_init(void)
of_console_device = dp;
if (prom_vers == PROM_V2) {
- strcpy(of_console_path, dp->full_name);
+ sprintf(of_console_path, "%pOF", dp);
switch (*romvec->pv_stdout) {
case PROMDEV_TTYA:
strcat(of_console_path, ":a");
@@ -313,7 +300,7 @@ void __init of_console_init(void)
prom_printf("No stdout-path in root node.\n");
prom_halt();
}
- strcpy(of_console_path, path);
+ strscpy(of_console_path, path, of_console_path_sz);
}
break;
}
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index d397d7fc5c28..aa4799cbb9c1 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Procedures for creating, accessing and interpreting the device tree.
*
@@ -8,18 +9,14 @@
* {engebret|bergner}@us.ibm.com
*
* Adapted for sparc64 by David S. Miller davem@davemloft.net
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
+#include <linux/memblock.h>
#include <linux/kernel.h>
-#include <linux/types.h>
#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/cpu.h>
#include <linux/mm.h>
-#include <linux/memblock.h>
#include <linux/of.h>
#include <asm/prom.h>
@@ -33,16 +30,13 @@
void * __init prom_early_alloc(unsigned long size)
{
- unsigned long paddr = memblock_alloc(size, SMP_CACHE_BYTES);
- void *ret;
+ void *ret = memblock_alloc(size, SMP_CACHE_BYTES);
- if (!paddr) {
+ if (!ret) {
prom_printf("prom_early_alloc(%lu) failed\n", size);
prom_halt();
}
- ret = __va(paddr);
- memset(ret, 0, size);
prom_early_allocated += size;
return ret;
@@ -71,6 +65,7 @@ void * __init prom_early_alloc(unsigned long size)
*/
static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom64_registers *regs;
struct property *rprop;
u32 high_bits, low_bits, type;
@@ -82,7 +77,7 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
regs = rprop->value;
if (!of_node_is_root(dp->parent)) {
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
return;
@@ -97,21 +92,22 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
if (low_bits)
sprintf(tmp_buf, "%s@%s%x,%x",
- dp->name, prefix,
+ name, prefix,
high_bits, low_bits);
else
sprintf(tmp_buf, "%s@%s%x",
- dp->name,
+ name,
prefix,
high_bits);
} else if (type == 12) {
sprintf(tmp_buf, "%s@%x",
- dp->name, high_bits);
+ name, high_bits);
}
}
static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom64_registers *regs;
struct property *prop;
@@ -122,7 +118,7 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
if (!of_node_is_root(dp->parent)) {
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
return;
@@ -138,7 +134,7 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
mask = 0x7fffff;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
*(u32 *)prop->value,
(unsigned int) (regs->phys_addr & mask));
}
@@ -147,6 +143,7 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
/* "name@slot,offset" */
static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_registers *regs;
struct property *prop;
@@ -156,7 +153,7 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
regs->which_io,
regs->phys_addr);
}
@@ -164,6 +161,7 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
/* "name@devnum[,func]" */
static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_pci_registers *regs;
struct property *prop;
unsigned int devfn;
@@ -176,12 +174,12 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
devfn = (regs->phys_hi >> 8) & 0xff;
if (devfn & 0x07) {
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
devfn >> 3,
devfn & 0x07);
} else {
sprintf(tmp_buf, "%s@%x",
- dp->name,
+ name,
devfn >> 3);
}
}
@@ -189,6 +187,7 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
/* "name@UPA_PORTID,offset" */
static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom64_registers *regs;
struct property *prop;
@@ -203,7 +202,7 @@ static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
return;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
*(u32 *) prop->value,
(unsigned int) (regs->phys_addr & 0xffffffffUL));
}
@@ -211,6 +210,7 @@ static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
/* "name@reg" */
static void __init vdev_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct property *prop;
u32 *regs;
@@ -220,12 +220,13 @@ static void __init vdev_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
- sprintf(tmp_buf, "%s@%x", dp->name, *regs);
+ sprintf(tmp_buf, "%s@%x", name, *regs);
}
/* "name@addrhi,addrlo" */
static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom64_registers *regs;
struct property *prop;
@@ -236,7 +237,7 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
}
@@ -244,6 +245,7 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
/* "name@bus,addr" */
static void __init i2c_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct property *prop;
u32 *regs;
@@ -257,12 +259,13 @@ static void __init i2c_path_component(struct device_node *dp, char *tmp_buf)
* property of the i2c bus node etc. etc.
*/
sprintf(tmp_buf, "%s@%x,%x",
- dp->name, regs[0], regs[1]);
+ name, regs[0], regs[1]);
}
/* "name@reg0[,reg1]" */
static void __init usb_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct property *prop;
u32 *regs;
@@ -274,16 +277,17 @@ static void __init usb_path_component(struct device_node *dp, char *tmp_buf)
if (prop->length == sizeof(u32) || regs[1] == 1) {
sprintf(tmp_buf, "%s@%x",
- dp->name, regs[0]);
+ name, regs[0]);
} else {
sprintf(tmp_buf, "%s@%x,%x",
- dp->name, regs[0], regs[1]);
+ name, regs[0], regs[1]);
}
}
/* "name@reg0reg1[,reg2reg3]" */
static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct property *prop;
u32 *regs;
@@ -295,10 +299,10 @@ static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf
if (regs[2] || regs[3]) {
sprintf(tmp_buf, "%s@%08x%08x,%04x%08x",
- dp->name, regs[0], regs[1], regs[2], regs[3]);
+ name, regs[0], regs[1], regs[2], regs[3]);
} else {
sprintf(tmp_buf, "%s@%08x%08x",
- dp->name, regs[0], regs[1]);
+ name, regs[0], regs[1]);
}
}
@@ -307,37 +311,37 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
struct device_node *parent = dp->parent;
if (parent != NULL) {
- if (!strcmp(parent->type, "pci") ||
- !strcmp(parent->type, "pciex")) {
+ if (of_node_is_type(parent, "pci") ||
+ of_node_is_type(parent, "pciex")) {
pci_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->type, "sbus")) {
+ if (of_node_is_type(parent, "sbus")) {
sbus_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->type, "upa")) {
+ if (of_node_is_type(parent, "upa")) {
upa_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->type, "ebus")) {
+ if (of_node_is_type(parent, "ebus")) {
ebus_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->name, "usb") ||
- !strcmp(parent->name, "hub")) {
+ if (of_node_name_eq(parent, "usb") ||
+ of_node_name_eq(parent, "hub")) {
usb_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->type, "i2c")) {
+ if (of_node_is_type(parent, "i2c")) {
i2c_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->type, "firewire")) {
+ if (of_node_is_type(parent, "firewire")) {
ieee1394_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->type, "virtual-devices")) {
+ if (of_node_is_type(parent, "virtual-devices")) {
vdev_path_component(dp, tmp_buf);
return;
}
@@ -355,15 +359,18 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
char * __init build_path_component(struct device_node *dp)
{
+ const char *name = of_get_property(dp, "name", NULL);
char tmp_buf[64], *n;
+ size_t n_sz;
tmp_buf[0] = '\0';
__build_path_component(dp, tmp_buf);
if (tmp_buf[0] == '\0')
- strcpy(tmp_buf, dp->name);
+ strscpy(tmp_buf, name);
- n = prom_early_alloc(strlen(tmp_buf) + 1);
- strcpy(n, tmp_buf);
+ n_sz = strlen(tmp_buf) + 1;
+ n = prom_early_alloc(n_sz);
+ strscpy(n, tmp_buf, n_sz);
return n;
}
@@ -373,6 +380,59 @@ static const char *get_mid_prop(void)
return (tlb_type == spitfire ? "upa-portid" : "portid");
}
+bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
+ int cpu, unsigned int *thread)
+{
+ const char *mid_prop = get_mid_prop();
+ int this_cpu_id;
+
+ /* On hypervisor based platforms we interrogate the 'reg'
+ * property. On everything else we look for a 'upa-portid',
+ * 'portid', or 'cpuid' property.
+ */
+
+ if (tlb_type == hypervisor) {
+ struct property *prop = of_find_property(cpun, "reg", NULL);
+ u32 *regs;
+
+ if (!prop) {
+ pr_warn("CPU node missing reg property\n");
+ return false;
+ }
+ regs = prop->value;
+ this_cpu_id = regs[0] & 0x0fffffff;
+ } else {
+ this_cpu_id = of_getintprop_default(cpun, mid_prop, -1);
+
+ if (this_cpu_id < 0) {
+ mid_prop = "cpuid";
+ this_cpu_id = of_getintprop_default(cpun, mid_prop, -1);
+ }
+ if (this_cpu_id < 0) {
+ pr_warn("CPU node missing cpu ID property\n");
+ return false;
+ }
+ }
+ if (this_cpu_id == cpu) {
+ if (thread) {
+ int proc_id = cpu_data(cpu).proc_id;
+
+ /* On sparc64, the cpu thread information is obtained
+ * either from OBP or the machine description. We've
+ * actually probed this information already long before
+ * this interface gets called so instead of interrogating
+ * both the OF node and the MDESC again, just use what
+ * we discovered already.
+ */
+ if (proc_id < 0)
+ proc_id = 0;
+ *thread = proc_id;
+ }
+ return true;
+ }
+ return false;
+}
+
static void *of_iterate_over_cpus(void *(*func)(struct device_node *, int, int), int arg)
{
struct device_node *dp;
@@ -425,7 +485,9 @@ static void *record_one_cpu(struct device_node *dp, int cpuid, int arg)
ncpus_probed++;
#ifdef CONFIG_SMP
set_cpu_present(cpuid, true);
- set_cpu_possible(cpuid, true);
+
+ if (num_possible_cpus() < nr_cpu_ids)
+ set_cpu_possible(cpuid, true);
#endif
return NULL;
}
@@ -444,7 +506,7 @@ static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg)
struct device_node *portid_parent = NULL;
int portid = -1;
- if (of_find_property(dp, "cpuid", NULL)) {
+ if (of_property_present(dp, "cpuid")) {
int limit = 2;
portid_parent = dp;
@@ -502,9 +564,6 @@ static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg)
cpu_data(cpuid).core_id = portid + 1;
cpu_data(cpuid).proc_id = portid;
-#ifdef CONFIG_SMP
- sparc64_multi_core = 1;
-#endif
} else {
cpu_data(cpuid).dcache_size =
of_getintprop_default(dp, "dcache-size", 16 * 1024);
@@ -543,7 +602,6 @@ void __init of_console_init(void)
{
char *msg = "OF stdout device is: %s\n";
struct device_node *dp;
- const char *type;
phandle node;
of_console_path = prom_early_alloc(256);
@@ -566,13 +624,8 @@ void __init of_console_init(void)
}
dp = of_find_node_by_phandle(node);
- type = of_get_property(dp, "device_type", NULL);
- if (!type) {
- prom_printf("Console stdout lacks device_type property.\n");
- prom_halt();
- }
- if (strcmp(type, "display") && strcmp(type, "serial")) {
+ if (!of_node_is_type(dp, "display") && !of_node_is_type(dp, "serial")) {
prom_printf("Console device_type is neither display "
"nor serial.\n");
prom_halt();
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 79cc0d1a477d..d258fd10db01 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/* prom_common.c: OF device tree support common code.
*
* Paul Mackerras August 1996.
@@ -7,11 +8,6 @@
* {engebret|bergner}@us.ibm.com
*
* Adapted for sparc by David S. Miller davem@davemloft.net
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
@@ -124,11 +120,14 @@ EXPORT_SYMBOL(of_find_in_proplist);
*/
static int __init handle_nextprop_quirks(char *buf, const char *name)
{
- if (!name || strlen(name) == 0)
+ size_t name_len;
+
+ name_len = name ? strlen(name) : 0;
+ if (name_len == 0)
return -1;
#ifdef CONFIG_SPARC32
- strcpy(buf, name);
+ strscpy(buf, name, name_len + 1);
#endif
return 0;
}
diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c
index 40e4936bd479..5752bfd73ac0 100644
--- a/arch/sparc/kernel/prom_irqtrans.c
+++ b/arch/sparc/kernel/prom_irqtrans.c
@@ -1,8 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/oplib.h>
#include <asm/prom.h>
@@ -192,7 +194,7 @@ static int sabre_device_needs_wsync(struct device_node *dp)
* the DMA synchronization handling
*/
while (parent) {
- if (!strcmp(parent->type, "pci"))
+ if (of_node_is_type(parent, "pci"))
break;
parent = parent->parent;
}
@@ -392,7 +394,7 @@ static unsigned int schizo_irq_build(struct device_node *dp,
iclr = schizo_ino_to_iclr(pbm_regs, ino);
/* On Schizo, no inofixup occurs. This is because each
- * INO has it's own IMAP register. On Psycho and Sabre
+ * INO has its own IMAP register. On Psycho and Sabre
* there is only one IMAP register for each PCI slot even
* though four different INOs can be generated by each
* PCI slot.
@@ -724,11 +726,11 @@ static unsigned int central_build_irq(struct device_node *dp,
unsigned long imap, iclr;
u32 tmp;
- if (!strcmp(dp->name, "eeprom")) {
+ if (of_node_name_eq(dp, "eeprom")) {
res = &central_op->resource[5];
- } else if (!strcmp(dp->name, "zs")) {
+ } else if (of_node_name_eq(dp, "zs")) {
res = &central_op->resource[4];
- } else if (!strcmp(dp->name, "clock-board")) {
+ } else if (of_node_name_eq(dp, "clock-board")) {
res = &central_op->resource[3];
} else {
return ino;
@@ -823,19 +825,19 @@ void __init irq_trans_init(struct device_node *dp)
}
#endif
#ifdef CONFIG_SBUS
- if (!strcmp(dp->name, "sbus") ||
- !strcmp(dp->name, "sbi")) {
+ if (of_node_name_eq(dp, "sbus") ||
+ of_node_name_eq(dp, "sbi")) {
sbus_irq_trans_init(dp);
return;
}
#endif
- if (!strcmp(dp->name, "fhc") &&
- !strcmp(dp->parent->name, "central")) {
+ if (of_node_name_eq(dp, "fhc") &&
+ of_node_name_eq(dp->parent, "central")) {
central_irq_trans_init(dp);
return;
}
- if (!strcmp(dp->name, "virtual-devices") ||
- !strcmp(dp->name, "niu")) {
+ if (of_node_name_eq(dp, "virtual-devices") ||
+ of_node_name_eq(dp, "niu")) {
sun4v_vdev_irq_trans_init(dp);
return;
}
diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c
index 8db48e808ed4..4557ef18f371 100644
--- a/arch/sparc/kernel/psycho_common.c
+++ b/arch/sparc/kernel/psycho_common.c
@@ -1,9 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/* psycho_common.c: Code common to PSYCHO and derivative PCI controllers.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/numa.h>
+#include <linux/platform_device.h>
#include <asm/upa.h>
@@ -47,7 +50,7 @@ static void psycho_check_stc_error(struct pci_pbm_info *pbm)
spin_lock(&stc_buf_lock);
/* This is __REALLY__ dangerous. When we put the streaming
- * buffer into diagnostic mode to probe it's tags and error
+ * buffer into diagnostic mode to probe its tags and error
* status, we _must_ clear all of the line tag valid bits
* before re-enabling the streaming buffer. If any dirty data
* lives in the STC when we do this, we will end up
@@ -453,7 +456,7 @@ void psycho_pbm_init_common(struct pci_pbm_info *pbm, struct platform_device *op
struct device_node *dp = op->dev.of_node;
pbm->name = dp->full_name;
- pbm->numa_node = -1;
+ pbm->numa_node = NUMA_NO_NODE;
pbm->chip_type = chip_type;
pbm->chip_version = of_getintprop_default(dp, "version#", 0);
pbm->chip_revision = of_getintprop_default(dp, "module-revision#", 0);
diff --git a/arch/sparc/kernel/psycho_common.h b/arch/sparc/kernel/psycho_common.h
index 590b4ed8ab5e..6925231c50e4 100644
--- a/arch/sparc/kernel/psycho_common.h
+++ b/arch/sparc/kernel/psycho_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PSYCHO_COMMON_H
#define _PSYCHO_COMMON_H
@@ -30,19 +31,19 @@ enum psycho_error_type {
UE_ERR, CE_ERR, PCI_ERR
};
-extern void psycho_check_iommu_error(struct pci_pbm_info *pbm,
- unsigned long afsr,
- unsigned long afar,
- enum psycho_error_type type);
+void psycho_check_iommu_error(struct pci_pbm_info *pbm,
+ unsigned long afsr,
+ unsigned long afar,
+ enum psycho_error_type type);
-extern irqreturn_t psycho_pcierr_intr(int irq, void *dev_id);
+irqreturn_t psycho_pcierr_intr(int irq, void *dev_id);
-extern int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize,
- u32 dvma_offset, u32 dma_mask,
- unsigned long write_complete_offset);
+int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize,
+ u32 dvma_offset, u32 dma_mask,
+ unsigned long write_complete_offset);
-extern void psycho_pbm_init_common(struct pci_pbm_info *pbm,
- struct platform_device *op,
- const char *chip_name, int chip_type);
+void psycho_pbm_init_common(struct pci_pbm_info *pbm,
+ struct platform_device *op,
+ const char *chip_name, int chip_type);
#endif /* _PSYCHO_COMMON_H */
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index 896ba7c5cd8e..c56333975fb1 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* ptrace.c: Sparc process tracing support.
*
* Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
@@ -20,12 +21,12 @@
#include <linux/signal.h>
#include <linux/regset.h>
#include <linux/elf.h>
-#include <linux/tracehook.h>
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/cacheflush.h>
+#include "kernel.h"
+
/* #define ALLOW_INIT_TRACING */
/*
@@ -43,82 +44,63 @@ enum sparc_regset {
REGSET_FP,
};
-static int genregs32_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+static int regwindow32_get(struct task_struct *target,
+ const struct pt_regs *regs,
+ u32 *uregs)
{
- const struct pt_regs *regs = target->thread.kregs;
- unsigned long __user *reg_window;
- unsigned long *k = kbuf;
- unsigned long __user *u = ubuf;
- unsigned long reg;
-
- if (target == current)
- flush_user_windows();
+ unsigned long reg_window = regs->u_regs[UREG_I6];
+ int size = 16 * sizeof(u32);
- pos /= sizeof(reg);
- count /= sizeof(reg);
+ if (target == current) {
+ if (copy_from_user(uregs, (void __user *)reg_window, size))
+ return -EFAULT;
+ } else {
+ if (access_process_vm(target, reg_window, uregs, size,
+ FOLL_FORCE) != size)
+ return -EFAULT;
+ }
+ return 0;
+}
- if (kbuf) {
- for (; count > 0 && pos < 16; count--)
- *k++ = regs->u_regs[pos++];
+static int regwindow32_set(struct task_struct *target,
+ const struct pt_regs *regs,
+ u32 *uregs)
+{
+ unsigned long reg_window = regs->u_regs[UREG_I6];
+ int size = 16 * sizeof(u32);
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (get_user(*k++, &reg_window[pos++]))
- return -EFAULT;
- }
+ if (target == current) {
+ if (copy_to_user((void __user *)reg_window, uregs, size))
+ return -EFAULT;
} else {
- for (; count > 0 && pos < 16; count--) {
- if (put_user(regs->u_regs[pos++], u++))
- return -EFAULT;
- }
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (get_user(reg, &reg_window[pos++]) ||
- put_user(reg, u++))
- return -EFAULT;
- }
- }
- while (count > 0) {
- switch (pos) {
- case 32: /* PSR */
- reg = regs->psr;
- break;
- case 33: /* PC */
- reg = regs->pc;
- break;
- case 34: /* NPC */
- reg = regs->npc;
- break;
- case 35: /* Y */
- reg = regs->y;
- break;
- case 36: /* WIM */
- case 37: /* TBR */
- reg = 0;
- break;
- default:
- goto finish;
- }
-
- if (kbuf)
- *k++ = reg;
- else if (put_user(reg, u++))
+ if (access_process_vm(target, reg_window, uregs, size,
+ FOLL_FORCE | FOLL_WRITE) != size)
return -EFAULT;
- pos++;
- count--;
}
-finish:
- pos *= sizeof(reg);
- count *= sizeof(reg);
+ return 0;
+}
+
+static int genregs32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ const struct pt_regs *regs = target->thread.kregs;
+ u32 uregs[16];
- return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- 38 * sizeof(reg), -1);
+ if (target == current)
+ flush_user_windows();
+
+ membuf_write(&to, regs->u_regs, 16 * sizeof(u32));
+ if (!to.left)
+ return 0;
+ if (regwindow32_get(target, regs, uregs))
+ return -EFAULT;
+ membuf_write(&to, uregs, 16 * sizeof(u32));
+ membuf_store(&to, regs->psr);
+ membuf_store(&to, regs->pc);
+ membuf_store(&to, regs->npc);
+ membuf_store(&to, regs->y);
+ return membuf_zero(&to, 2 * sizeof(u32));
}
static int genregs32_set(struct task_struct *target,
@@ -127,126 +109,74 @@ static int genregs32_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = target->thread.kregs;
- unsigned long __user *reg_window;
- const unsigned long *k = kbuf;
- const unsigned long __user *u = ubuf;
- unsigned long reg;
+ u32 uregs[16];
+ u32 psr;
+ int ret;
if (target == current)
flush_user_windows();
- pos /= sizeof(reg);
- count /= sizeof(reg);
-
- if (kbuf) {
- for (; count > 0 && pos < 16; count--)
- regs->u_regs[pos++] = *k++;
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (put_user(*k++, &reg_window[pos++]))
- return -EFAULT;
- }
- } else {
- for (; count > 0 && pos < 16; count--) {
- if (get_user(reg, u++))
- return -EFAULT;
- regs->u_regs[pos++] = reg;
- }
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (get_user(reg, u++) ||
- put_user(reg, &reg_window[pos++]))
- return -EFAULT;
- }
- }
- while (count > 0) {
- unsigned long psr;
-
- if (kbuf)
- reg = *k++;
- else if (get_user(reg, u++))
- return -EFAULT;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->u_regs,
+ 0, 16 * sizeof(u32));
+ if (ret || !count)
+ return ret;
- switch (pos) {
- case 32: /* PSR */
- psr = regs->psr;
- psr &= ~(PSR_ICC | PSR_SYSCALL);
- psr |= (reg & (PSR_ICC | PSR_SYSCALL));
- regs->psr = psr;
- break;
- case 33: /* PC */
- regs->pc = reg;
- break;
- case 34: /* NPC */
- regs->npc = reg;
- break;
- case 35: /* Y */
- regs->y = reg;
- break;
- case 36: /* WIM */
- case 37: /* TBR */
- break;
- default:
- goto finish;
- }
-
- pos++;
- count--;
- }
-finish:
- pos *= sizeof(reg);
- count *= sizeof(reg);
+ if (regwindow32_get(target, regs, uregs))
+ return -EFAULT;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ uregs,
+ 16 * sizeof(u32), 32 * sizeof(u32));
+ if (ret)
+ return ret;
+ if (regwindow32_set(target, regs, uregs))
+ return -EFAULT;
+ if (!count)
+ return 0;
- return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- 38 * sizeof(reg), -1);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &psr,
+ 32 * sizeof(u32), 33 * sizeof(u32));
+ if (ret)
+ return ret;
+ regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) |
+ (psr & (PSR_ICC | PSR_SYSCALL));
+ if (!count)
+ return 0;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->pc,
+ 33 * sizeof(u32), 34 * sizeof(u32));
+ if (ret || !count)
+ return ret;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->npc,
+ 34 * sizeof(u32), 35 * sizeof(u32));
+ if (ret || !count)
+ return ret;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->y,
+ 35 * sizeof(u32), 36 * sizeof(u32));
+ if (ret || !count)
+ return ret;
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 36 * sizeof(u32),
+ 38 * sizeof(u32));
+ return 0;
}
static int fpregs32_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
- const unsigned long *fpregs = target->thread.float_regs;
- int ret = 0;
-
#if 0
if (target == current)
save_and_clear_fpu();
#endif
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- fpregs,
- 0, 32 * sizeof(u32));
-
- if (!ret)
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- 32 * sizeof(u32),
- 33 * sizeof(u32));
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.fsr,
- 33 * sizeof(u32),
- 34 * sizeof(u32));
-
- if (!ret) {
- unsigned long val;
-
- val = (1 << 8) | (8 << 16);
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &val,
- 34 * sizeof(u32),
- 35 * sizeof(u32));
- }
-
- if (!ret)
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- 35 * sizeof(u32), -1);
-
- return ret;
+ membuf_write(&to, target->thread.float_regs, 32 * sizeof(u32));
+ membuf_zero(&to, sizeof(u32));
+ membuf_write(&to, &target->thread.fsr, sizeof(u32));
+ membuf_store(&to, (u32)((1 << 8) | (8 << 16)));
+ return membuf_zero(&to, 64 * sizeof(u32));
}
static int fpregs32_set(struct task_struct *target,
@@ -268,16 +198,14 @@ static int fpregs32_set(struct task_struct *target,
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
32 * sizeof(u32),
33 * sizeof(u32));
- if (!ret && count > 0) {
+ if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fsr,
33 * sizeof(u32),
34 * sizeof(u32));
- }
-
if (!ret)
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- 34 * sizeof(u32), -1);
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 34 * sizeof(u32), -1);
return ret;
}
@@ -290,10 +218,10 @@ static const struct user_regset sparc32_regsets[] = {
* PSR, PC, nPC, Y, WIM, TBR
*/
[REGSET_GENERAL] = {
- .core_note_type = NT_PRSTATUS,
+ USER_REGSET_NOTE_TYPE(PRSTATUS),
.n = 38,
.size = sizeof(u32), .align = sizeof(u32),
- .get = genregs32_get, .set = genregs32_set
+ .regset_get = genregs32_get, .set = genregs32_set
},
/* Format is:
* F0 --> F31
@@ -306,13 +234,107 @@ static const struct user_regset sparc32_regsets[] = {
* FPU QUEUE (64 32-bit ints)
*/
[REGSET_FP] = {
- .core_note_type = NT_PRFPREG,
+ USER_REGSET_NOTE_TYPE(PRFPREG),
.n = 99,
.size = sizeof(u32), .align = sizeof(u32),
- .get = fpregs32_get, .set = fpregs32_set
+ .regset_get = fpregs32_get, .set = fpregs32_set
},
};
+static int getregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ const struct pt_regs *regs = target->thread.kregs;
+
+ if (target == current)
+ flush_user_windows();
+
+ membuf_store(&to, regs->psr);
+ membuf_store(&to, regs->pc);
+ membuf_store(&to, regs->npc);
+ membuf_store(&to, regs->y);
+ return membuf_write(&to, regs->u_regs + 1, 15 * sizeof(u32));
+}
+
+static int setregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = target->thread.kregs;
+ u32 v[4];
+ int ret;
+
+ if (target == current)
+ flush_user_windows();
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ v,
+ 0, 4 * sizeof(u32));
+ if (ret)
+ return ret;
+ regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) |
+ (v[0] & (PSR_ICC | PSR_SYSCALL));
+ regs->pc = v[1];
+ regs->npc = v[2];
+ regs->y = v[3];
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->u_regs + 1,
+ 4 * sizeof(u32) , 19 * sizeof(u32));
+}
+
+static int getfpregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+#if 0
+ if (target == current)
+ save_and_clear_fpu();
+#endif
+ membuf_write(&to, &target->thread.float_regs, 32 * sizeof(u32));
+ membuf_write(&to, &target->thread.fsr, sizeof(u32));
+ return membuf_zero(&to, 35 * sizeof(u32));
+}
+
+static int setfpregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned long *fpregs = target->thread.float_regs;
+ int ret;
+
+#if 0
+ if (target == current)
+ save_and_clear_fpu();
+#endif
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ fpregs,
+ 0, 32 * sizeof(u32));
+ if (ret)
+ return ret;
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fsr,
+ 32 * sizeof(u32),
+ 33 * sizeof(u32));
+}
+
+static const struct user_regset ptrace32_regsets[] = {
+ [REGSET_GENERAL] = {
+ .n = 19, .size = sizeof(u32),
+ .regset_get = getregs_get, .set = setregs_set,
+ },
+ [REGSET_FP] = {
+ .n = 68, .size = sizeof(u32),
+ .regset_get = getfpregs_get, .set = setfpregs_set,
+ },
+};
+
+static const struct user_regset_view ptrace32_view = {
+ .regsets = ptrace32_regsets, .n = ARRAY_SIZE(ptrace32_regsets)
+};
+
static const struct user_regset_view user_sparc32_view = {
.name = "sparc", .e_machine = EM_SPARC,
.regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets)
@@ -340,74 +362,44 @@ long arch_ptrace(struct task_struct *child, long request,
{
unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4];
void __user *addr2p;
- const struct user_regset_view *view;
struct pt_regs __user *pregs;
struct fps __user *fps;
int ret;
- view = task_user_regset_view(current);
addr2p = (void __user *) addr2;
pregs = (struct pt_regs __user *) addr;
fps = (struct fps __user *) addr;
switch(request) {
case PTRACE_GETREGS: {
- ret = copy_regset_to_user(child, view, REGSET_GENERAL,
- 32 * sizeof(u32),
- 4 * sizeof(u32),
- &pregs->psr);
- if (!ret)
- copy_regset_to_user(child, view, REGSET_GENERAL,
- 1 * sizeof(u32),
- 15 * sizeof(u32),
- &pregs->u_regs[0]);
+ ret = copy_regset_to_user(child, &ptrace32_view,
+ REGSET_GENERAL, 0,
+ 19 * sizeof(u32),
+ pregs);
break;
}
case PTRACE_SETREGS: {
- ret = copy_regset_from_user(child, view, REGSET_GENERAL,
- 32 * sizeof(u32),
- 4 * sizeof(u32),
- &pregs->psr);
- if (!ret)
- copy_regset_from_user(child, view, REGSET_GENERAL,
- 1 * sizeof(u32),
- 15 * sizeof(u32),
- &pregs->u_regs[0]);
+ ret = copy_regset_from_user(child, &ptrace32_view,
+ REGSET_GENERAL, 0,
+ 19 * sizeof(u32),
+ pregs);
break;
}
case PTRACE_GETFPREGS: {
- ret = copy_regset_to_user(child, view, REGSET_FP,
- 0 * sizeof(u32),
- 32 * sizeof(u32),
- &fps->regs[0]);
- if (!ret)
- ret = copy_regset_to_user(child, view, REGSET_FP,
- 33 * sizeof(u32),
- 1 * sizeof(u32),
- &fps->fsr);
-
- if (!ret) {
- if (__put_user(0, &fps->fpqd) ||
- __put_user(0, &fps->flags) ||
- __put_user(0, &fps->extra) ||
- clear_user(fps->fpq, sizeof(fps->fpq)))
- ret = -EFAULT;
- }
+ ret = copy_regset_to_user(child, &ptrace32_view,
+ REGSET_FP, 0,
+ 68 * sizeof(u32),
+ fps);
break;
}
case PTRACE_SETFPREGS: {
- ret = copy_regset_from_user(child, view, REGSET_FP,
- 0 * sizeof(u32),
- 32 * sizeof(u32),
- &fps->regs[0]);
- if (!ret)
- ret = copy_regset_from_user(child, view, REGSET_FP,
- 33 * sizeof(u32),
- 1 * sizeof(u32),
- &fps->fsr);
+ ret = copy_regset_from_user(child, &ptrace32_view,
+ REGSET_FP, 0,
+ 33 * sizeof(u32),
+ fps);
break;
}
@@ -447,9 +439,9 @@ asmlinkage int syscall_trace(struct pt_regs *regs, int syscall_exit_p)
if (test_thread_flag(TIF_SYSCALL_TRACE)) {
if (syscall_exit_p)
- tracehook_report_syscall_exit(regs, 0);
+ ptrace_report_syscall_exit(regs, 0);
else
- ret = tracehook_report_syscall_entry(regs);
+ ret = ptrace_report_syscall_entry(regs);
}
return ret;
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 7ff45e4ba681..9fc67fa9336f 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* ptrace.c: Sparc process tracing support.
*
* Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
@@ -12,8 +13,10 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/smp.h>
@@ -22,14 +25,13 @@
#include <linux/audit.h>
#include <linux/signal.h>
#include <linux/regset.h>
-#include <linux/tracehook.h>
#include <trace/syscall.h>
#include <linux/compat.h>
#include <linux/elf.h>
+#include <linux/context_tracking.h>
#include <asm/asi.h>
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/psrcompat.h>
#include <asm/visasm.h>
#include <asm/spitfire.h>
@@ -44,6 +46,43 @@
/* #define ALLOW_INIT_TRACING */
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(n, r) \
+ {.name = n, .offset = (PT_V9_##r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME("g0", G0),
+ REG_OFFSET_NAME("g1", G1),
+ REG_OFFSET_NAME("g2", G2),
+ REG_OFFSET_NAME("g3", G3),
+ REG_OFFSET_NAME("g4", G4),
+ REG_OFFSET_NAME("g5", G5),
+ REG_OFFSET_NAME("g6", G6),
+ REG_OFFSET_NAME("g7", G7),
+
+ REG_OFFSET_NAME("i0", I0),
+ REG_OFFSET_NAME("i1", I1),
+ REG_OFFSET_NAME("i2", I2),
+ REG_OFFSET_NAME("i3", I3),
+ REG_OFFSET_NAME("i4", I4),
+ REG_OFFSET_NAME("i5", I5),
+ REG_OFFSET_NAME("i6", I6),
+ REG_OFFSET_NAME("i7", I7),
+
+ REG_OFFSET_NAME("tstate", TSTATE),
+ REG_OFFSET_NAME("pc", TPC),
+ REG_OFFSET_NAME("npc", TNPC),
+ REG_OFFSET_NAME("y", Y),
+ REG_OFFSET_NAME("lr", I7),
+
+ REG_OFFSET_END,
+};
+
/*
* Called by kernel/ptrace.c when detaching..
*
@@ -116,6 +155,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
preempt_enable();
}
+EXPORT_SYMBOL_GPL(flush_ptrace_access);
static int get_from_target(struct task_struct *target, unsigned long uaddr,
void *kbuf, int len)
@@ -124,7 +164,8 @@ static int get_from_target(struct task_struct *target, unsigned long uaddr,
if (copy_from_user(kbuf, (void __user *) uaddr, len))
return -EFAULT;
} else {
- int len2 = access_process_vm(target, uaddr, kbuf, len, 0);
+ int len2 = access_process_vm(target, uaddr, kbuf, len,
+ FOLL_FORCE);
if (len2 != len)
return -EFAULT;
}
@@ -138,7 +179,8 @@ static int set_to_target(struct task_struct *target, unsigned long uaddr,
if (copy_to_user((void __user *) uaddr, kbuf, len))
return -EFAULT;
} else {
- int len2 = access_process_vm(target, uaddr, kbuf, len, 1);
+ int len2 = access_process_vm(target, uaddr, kbuf, len,
+ FOLL_FORCE | FOLL_WRITE);
if (len2 != len)
return -EFAULT;
}
@@ -203,52 +245,23 @@ enum sparc_regset {
static int genregs64_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
const struct pt_regs *regs = task_pt_regs(target);
- int ret;
+ struct reg_window window;
if (target == current)
flushw_user();
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- regs->u_regs,
- 0, 16 * sizeof(u64));
- if (!ret && count && pos < (32 * sizeof(u64))) {
- struct reg_window window;
-
- if (regwindow64_get(target, regs, &window))
- return -EFAULT;
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &window,
- 16 * sizeof(u64),
- 32 * sizeof(u64));
- }
-
- if (!ret) {
- /* TSTATE, TPC, TNPC */
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &regs->tstate,
- 32 * sizeof(u64),
- 35 * sizeof(u64));
- }
-
- if (!ret) {
- unsigned long y = regs->y;
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &y,
- 35 * sizeof(u64),
- 36 * sizeof(u64));
- }
-
- if (!ret) {
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- 36 * sizeof(u64), -1);
-
- }
- return ret;
+ membuf_write(&to, regs->u_regs, 16 * sizeof(u64));
+ if (!to.left)
+ return 0;
+ if (regwindow64_get(target, regs, &window))
+ return -EFAULT;
+ membuf_write(&to, &window, 16 * sizeof(u64));
+ /* TSTATE, TPC, TNPC */
+ membuf_write(&to, &regs->tstate, 3 * sizeof(u64));
+ return membuf_store(&to, (u64)regs->y);
}
static int genregs64_set(struct task_struct *target,
@@ -308,7 +321,7 @@ static int genregs64_set(struct task_struct *target,
}
if (!ret) {
- unsigned long y;
+ unsigned long y = regs->y;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&y,
@@ -319,77 +332,40 @@ static int genregs64_set(struct task_struct *target,
}
if (!ret)
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- 36 * sizeof(u64), -1);
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 36 * sizeof(u64), -1);
return ret;
}
static int fpregs64_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
- const unsigned long *fpregs = task_thread_info(target)->fpregs;
- unsigned long fprs, fsr, gsr;
- int ret;
+ struct thread_info *t = task_thread_info(target);
+ unsigned long fprs;
if (target == current)
save_and_clear_fpu();
- fprs = task_thread_info(target)->fpsaved[0];
+ fprs = t->fpsaved[0];
if (fprs & FPRS_DL)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- fpregs,
- 0, 16 * sizeof(u64));
+ membuf_write(&to, t->fpregs, 16 * sizeof(u64));
else
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- 0,
- 16 * sizeof(u64));
-
- if (!ret) {
- if (fprs & FPRS_DU)
- ret = user_regset_copyout(&pos, &count,
- &kbuf, &ubuf,
- fpregs + 16,
- 16 * sizeof(u64),
- 32 * sizeof(u64));
- else
- ret = user_regset_copyout_zero(&pos, &count,
- &kbuf, &ubuf,
- 16 * sizeof(u64),
- 32 * sizeof(u64));
- }
+ membuf_zero(&to, 16 * sizeof(u64));
+ if (fprs & FPRS_DU)
+ membuf_write(&to, t->fpregs + 16, 16 * sizeof(u64));
+ else
+ membuf_zero(&to, 16 * sizeof(u64));
if (fprs & FPRS_FEF) {
- fsr = task_thread_info(target)->xfsr[0];
- gsr = task_thread_info(target)->gsr[0];
+ membuf_store(&to, t->xfsr[0]);
+ membuf_store(&to, t->gsr[0]);
} else {
- fsr = gsr = 0;
+ membuf_zero(&to, 2 * sizeof(u64));
}
-
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &fsr,
- 32 * sizeof(u64),
- 33 * sizeof(u64));
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &gsr,
- 33 * sizeof(u64),
- 34 * sizeof(u64));
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &fprs,
- 34 * sizeof(u64),
- 35 * sizeof(u64));
-
- if (!ret)
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- 35 * sizeof(u64), -1);
-
- return ret;
+ return membuf_store(&to, fprs);
}
static int fpregs64_set(struct task_struct *target,
@@ -430,8 +406,8 @@ static int fpregs64_set(struct task_struct *target,
task_thread_info(target)->fpsaved[0] = fprs;
if (!ret)
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- 35 * sizeof(u64), -1);
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 35 * sizeof(u64), -1);
return ret;
}
@@ -444,10 +420,10 @@ static const struct user_regset sparc64_regsets[] = {
* TSTATE, TPC, TNPC, Y
*/
[REGSET_GENERAL] = {
- .core_note_type = NT_PRSTATUS,
+ USER_REGSET_NOTE_TYPE(PRSTATUS),
.n = 36,
.size = sizeof(u64), .align = sizeof(u64),
- .get = genregs64_get, .set = genregs64_set
+ .regset_get = genregs64_get, .set = genregs64_set
},
/* Format is:
* F0 --> F63
@@ -456,13 +432,97 @@ static const struct user_regset sparc64_regsets[] = {
* FPRS
*/
[REGSET_FP] = {
- .core_note_type = NT_PRFPREG,
+ USER_REGSET_NOTE_TYPE(PRFPREG),
.n = 35,
.size = sizeof(u64), .align = sizeof(u64),
- .get = fpregs64_get, .set = fpregs64_set
+ .regset_get = fpregs64_get, .set = fpregs64_set
},
};
+static int getregs64_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+
+ if (target == current)
+ flushw_user();
+
+ membuf_write(&to, regs->u_regs + 1, 15 * sizeof(u64));
+ membuf_store(&to, (u64)0);
+ membuf_write(&to, &regs->tstate, 3 * sizeof(u64));
+ return membuf_store(&to, (u64)regs->y);
+}
+
+static int setregs64_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ unsigned long y = regs->y;
+ unsigned long tstate;
+ int ret;
+
+ if (target == current)
+ flushw_user();
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->u_regs + 1,
+ 0 * sizeof(u64),
+ 15 * sizeof(u64));
+ if (ret)
+ return ret;
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 15 * sizeof(u64), 16 * sizeof(u64));
+ /* TSTATE */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &tstate,
+ 16 * sizeof(u64),
+ 17 * sizeof(u64));
+ if (ret)
+ return ret;
+ /* Only the condition codes and the "in syscall"
+ * state can be modified in the %tstate register.
+ */
+ tstate &= (TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
+ regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
+ regs->tstate |= tstate;
+
+ /* TPC, TNPC */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->tpc,
+ 17 * sizeof(u64),
+ 19 * sizeof(u64));
+ if (ret)
+ return ret;
+ /* Y */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &y,
+ 19 * sizeof(u64),
+ 20 * sizeof(u64));
+ if (!ret)
+ regs->y = y;
+ return ret;
+}
+
+static const struct user_regset ptrace64_regsets[] = {
+ /* Format is:
+ * G1 --> G7
+ * O0 --> O7
+ * 0
+ * TSTATE, TPC, TNPC, Y
+ */
+ [REGSET_GENERAL] = {
+ .n = 20, .size = sizeof(u64),
+ .regset_get = getregs64_get, .set = setregs64_set,
+ },
+};
+
+static const struct user_regset_view ptrace64_view = {
+ .regsets = ptrace64_regsets, .n = ARRAY_SIZE(ptrace64_regsets)
+};
+
static const struct user_regset_view user_sparc64_view = {
.name = "sparc64", .e_machine = EM_SPARCV9,
.regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets)
@@ -471,111 +531,28 @@ static const struct user_regset_view user_sparc64_view = {
#ifdef CONFIG_COMPAT
static int genregs32_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
const struct pt_regs *regs = task_pt_regs(target);
- compat_ulong_t __user *reg_window;
- compat_ulong_t *k = kbuf;
- compat_ulong_t __user *u = ubuf;
- compat_ulong_t reg;
+ u32 uregs[16];
+ int i;
if (target == current)
flushw_user();
- pos /= sizeof(reg);
- count /= sizeof(reg);
-
- if (kbuf) {
- for (; count > 0 && pos < 16; count--)
- *k++ = regs->u_regs[pos++];
-
- reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- if (target == current) {
- for (; count > 0 && pos < 32; count--) {
- if (get_user(*k++, &reg_window[pos++]))
- return -EFAULT;
- }
- } else {
- for (; count > 0 && pos < 32; count--) {
- if (access_process_vm(target,
- (unsigned long)
- &reg_window[pos],
- k, sizeof(*k), 0)
- != sizeof(*k))
- return -EFAULT;
- k++;
- pos++;
- }
- }
- } else {
- for (; count > 0 && pos < 16; count--) {
- if (put_user((compat_ulong_t) regs->u_regs[pos++], u++))
- return -EFAULT;
- }
-
- reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- if (target == current) {
- for (; count > 0 && pos < 32; count--) {
- if (get_user(reg, &reg_window[pos++]) ||
- put_user(reg, u++))
- return -EFAULT;
- }
- } else {
- for (; count > 0 && pos < 32; count--) {
- if (access_process_vm(target,
- (unsigned long)
- &reg_window[pos],
- &reg, sizeof(reg), 0)
- != sizeof(reg))
- return -EFAULT;
- if (access_process_vm(target,
- (unsigned long) u,
- &reg, sizeof(reg), 1)
- != sizeof(reg))
- return -EFAULT;
- pos++;
- u++;
- }
- }
- }
- while (count > 0) {
- switch (pos) {
- case 32: /* PSR */
- reg = tstate_to_psr(regs->tstate);
- break;
- case 33: /* PC */
- reg = regs->tpc;
- break;
- case 34: /* NPC */
- reg = regs->tnpc;
- break;
- case 35: /* Y */
- reg = regs->y;
- break;
- case 36: /* WIM */
- case 37: /* TBR */
- reg = 0;
- break;
- default:
- goto finish;
- }
-
- if (kbuf)
- *k++ = reg;
- else if (put_user(reg, u++))
- return -EFAULT;
- pos++;
- count--;
- }
-finish:
- pos *= sizeof(reg);
- count *= sizeof(reg);
-
- return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- 38 * sizeof(reg), -1);
+ for (i = 0; i < 16; i++)
+ membuf_store(&to, (u32)regs->u_regs[i]);
+ if (!to.left)
+ return 0;
+ if (get_from_target(target, regs->u_regs[UREG_I6],
+ uregs, sizeof(uregs)))
+ return -EFAULT;
+ membuf_write(&to, uregs, 16 * sizeof(u32));
+ membuf_store(&to, (u32)tstate_to_psr(regs->tstate));
+ membuf_store(&to, (u32)(regs->tpc));
+ membuf_store(&to, (u32)(regs->tnpc));
+ membuf_store(&to, (u32)(regs->y));
+ return membuf_zero(&to, 2 * sizeof(u32));
}
static int genregs32_set(struct task_struct *target,
@@ -612,7 +589,8 @@ static int genregs32_set(struct task_struct *target,
(unsigned long)
&reg_window[pos],
(void *) k,
- sizeof(*k), 1)
+ sizeof(*k),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(*k))
return -EFAULT;
k++;
@@ -636,16 +614,13 @@ static int genregs32_set(struct task_struct *target,
}
} else {
for (; count > 0 && pos < 32; count--) {
- if (access_process_vm(target,
- (unsigned long)
- u,
- &reg, sizeof(reg), 0)
- != sizeof(reg))
+ if (get_user(reg, u++))
return -EFAULT;
if (access_process_vm(target,
(unsigned long)
&reg_window[pos],
- &reg, sizeof(reg), 1)
+ &reg, sizeof(reg),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(reg))
return -EFAULT;
pos++;
@@ -693,62 +668,31 @@ finish:
pos *= sizeof(reg);
count *= sizeof(reg);
- return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- 38 * sizeof(reg), -1);
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 38 * sizeof(reg), -1);
+ return 0;
}
static int fpregs32_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
- const unsigned long *fpregs = task_thread_info(target)->fpregs;
- compat_ulong_t enabled;
- unsigned long fprs;
- compat_ulong_t fsr;
- int ret = 0;
+ struct thread_info *t = task_thread_info(target);
+ bool enabled;
if (target == current)
save_and_clear_fpu();
- fprs = task_thread_info(target)->fpsaved[0];
- if (fprs & FPRS_FEF) {
- fsr = task_thread_info(target)->xfsr[0];
- enabled = 1;
- } else {
- fsr = 0;
- enabled = 0;
- }
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- fpregs,
- 0, 32 * sizeof(u32));
-
- if (!ret)
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- 32 * sizeof(u32),
- 33 * sizeof(u32));
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &fsr,
- 33 * sizeof(u32),
- 34 * sizeof(u32));
-
- if (!ret) {
- compat_ulong_t val;
-
- val = (enabled << 8) | (8 << 16);
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &val,
- 34 * sizeof(u32),
- 35 * sizeof(u32));
- }
-
- if (!ret)
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- 35 * sizeof(u32), -1);
+ enabled = t->fpsaved[0] & FPRS_FEF;
- return ret;
+ membuf_write(&to, t->fpregs, 32 * sizeof(u32));
+ membuf_zero(&to, sizeof(u32));
+ if (enabled)
+ membuf_store(&to, (u32)t->xfsr[0]);
+ else
+ membuf_zero(&to, sizeof(u32));
+ membuf_store(&to, (u32)((enabled << 8) | (8 << 16)));
+ return membuf_zero(&to, 64 * sizeof(u32));
}
static int fpregs32_set(struct task_struct *target,
@@ -792,8 +736,8 @@ static int fpregs32_set(struct task_struct *target,
task_thread_info(target)->fpsaved[0] = fprs;
if (!ret)
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- 34 * sizeof(u32), -1);
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 34 * sizeof(u32), -1);
return ret;
}
@@ -806,10 +750,10 @@ static const struct user_regset sparc32_regsets[] = {
* PSR, PC, nPC, Y, WIM, TBR
*/
[REGSET_GENERAL] = {
- .core_note_type = NT_PRSTATUS,
+ USER_REGSET_NOTE_TYPE(PRSTATUS),
.n = 38,
.size = sizeof(u32), .align = sizeof(u32),
- .get = genregs32_get, .set = genregs32_set
+ .regset_get = genregs32_get, .set = genregs32_set
},
/* Format is:
* F0 --> F31
@@ -822,11 +766,134 @@ static const struct user_regset sparc32_regsets[] = {
* FPU QUEUE (64 32-bit ints)
*/
[REGSET_FP] = {
- .core_note_type = NT_PRFPREG,
+ USER_REGSET_NOTE_TYPE(PRFPREG),
.n = 99,
.size = sizeof(u32), .align = sizeof(u32),
- .get = fpregs32_get, .set = fpregs32_set
+ .regset_get = fpregs32_get, .set = fpregs32_set
+ },
+};
+
+static int getregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+ int i;
+
+ if (target == current)
+ flushw_user();
+
+ membuf_store(&to, (u32)tstate_to_psr(regs->tstate));
+ membuf_store(&to, (u32)(regs->tpc));
+ membuf_store(&to, (u32)(regs->tnpc));
+ membuf_store(&to, (u32)(regs->y));
+ for (i = 1; i < 16; i++)
+ membuf_store(&to, (u32)regs->u_regs[i]);
+ return to.left;
+}
+
+static int setregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ unsigned long tstate;
+ u32 uregs[19];
+ int i, ret;
+
+ if (target == current)
+ flushw_user();
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ uregs,
+ 0, 19 * sizeof(u32));
+ if (ret)
+ return ret;
+
+ tstate = regs->tstate;
+ tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
+ tstate |= psr_to_tstate_icc(uregs[0]);
+ if (uregs[0] & PSR_SYSCALL)
+ tstate |= TSTATE_SYSCALL;
+ regs->tstate = tstate;
+ regs->tpc = uregs[1];
+ regs->tnpc = uregs[2];
+ regs->y = uregs[3];
+
+ for (i = 1; i < 15; i++)
+ regs->u_regs[i] = uregs[3 + i];
+ return 0;
+}
+
+static int getfpregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct thread_info *t = task_thread_info(target);
+
+ if (target == current)
+ save_and_clear_fpu();
+
+ membuf_write(&to, t->fpregs, 32 * sizeof(u32));
+ if (t->fpsaved[0] & FPRS_FEF)
+ membuf_store(&to, (u32)t->xfsr[0]);
+ else
+ membuf_zero(&to, sizeof(u32));
+ return membuf_zero(&to, 35 * sizeof(u32));
+}
+
+static int setfpregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned long *fpregs = task_thread_info(target)->fpregs;
+ unsigned long fprs;
+ int ret;
+
+ if (target == current)
+ save_and_clear_fpu();
+
+ fprs = task_thread_info(target)->fpsaved[0];
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ fpregs,
+ 0, 32 * sizeof(u32));
+ if (!ret) {
+ compat_ulong_t fsr;
+ unsigned long val;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &fsr,
+ 32 * sizeof(u32),
+ 33 * sizeof(u32));
+ if (!ret) {
+ val = task_thread_info(target)->xfsr[0];
+ val &= 0xffffffff00000000UL;
+ val |= fsr;
+ task_thread_info(target)->xfsr[0] = val;
+ }
+ }
+
+ fprs |= (FPRS_FEF | FPRS_DL);
+ task_thread_info(target)->fpsaved[0] = fprs;
+ return ret;
+}
+
+static const struct user_regset ptrace32_regsets[] = {
+ [REGSET_GENERAL] = {
+ .n = 19, .size = sizeof(u32),
+ .regset_get = getregs_get, .set = setregs_set,
},
+ [REGSET_FP] = {
+ .n = 68, .size = sizeof(u32),
+ .regset_get = getfpregs_get, .set = setfpregs_set,
+ },
+};
+
+static const struct user_regset_view ptrace32_view = {
+ .regsets = ptrace32_regsets, .n = ARRAY_SIZE(ptrace32_regsets)
};
static const struct user_regset_view user_sparc32_view = {
@@ -860,7 +927,6 @@ struct compat_fps {
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
- const struct user_regset_view *view = task_user_regset_view(current);
compat_ulong_t caddr2 = task_pt_regs(current)->u_regs[UREG_I4];
struct pt_regs32 __user *pregs;
struct compat_fps __user *fps;
@@ -878,58 +944,31 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
case PTRACE_GETREGS:
- ret = copy_regset_to_user(child, view, REGSET_GENERAL,
- 32 * sizeof(u32),
- 4 * sizeof(u32),
- &pregs->psr);
- if (!ret)
- ret = copy_regset_to_user(child, view, REGSET_GENERAL,
- 1 * sizeof(u32),
- 15 * sizeof(u32),
- &pregs->u_regs[0]);
+ ret = copy_regset_to_user(child, &ptrace32_view,
+ REGSET_GENERAL, 0,
+ 19 * sizeof(u32),
+ pregs);
break;
case PTRACE_SETREGS:
- ret = copy_regset_from_user(child, view, REGSET_GENERAL,
- 32 * sizeof(u32),
- 4 * sizeof(u32),
- &pregs->psr);
- if (!ret)
- ret = copy_regset_from_user(child, view, REGSET_GENERAL,
- 1 * sizeof(u32),
- 15 * sizeof(u32),
- &pregs->u_regs[0]);
+ ret = copy_regset_from_user(child, &ptrace32_view,
+ REGSET_GENERAL, 0,
+ 19 * sizeof(u32),
+ pregs);
break;
case PTRACE_GETFPREGS:
- ret = copy_regset_to_user(child, view, REGSET_FP,
- 0 * sizeof(u32),
- 32 * sizeof(u32),
- &fps->regs[0]);
- if (!ret)
- ret = copy_regset_to_user(child, view, REGSET_FP,
- 33 * sizeof(u32),
- 1 * sizeof(u32),
- &fps->fsr);
- if (!ret) {
- if (__put_user(0, &fps->flags) ||
- __put_user(0, &fps->extra) ||
- __put_user(0, &fps->fpqd) ||
- clear_user(&fps->fpq[0], 32 * sizeof(unsigned int)))
- ret = -EFAULT;
- }
+ ret = copy_regset_to_user(child, &ptrace32_view,
+ REGSET_FP, 0,
+ 68 * sizeof(u32),
+ fps);
break;
case PTRACE_SETFPREGS:
- ret = copy_regset_from_user(child, view, REGSET_FP,
- 0 * sizeof(u32),
- 32 * sizeof(u32),
- &fps->regs[0]);
- if (!ret)
- ret = copy_regset_from_user(child, view, REGSET_FP,
- 33 * sizeof(u32),
- 1 * sizeof(u32),
- &fps->fsr);
+ ret = copy_regset_from_user(child, &ptrace32_view,
+ REGSET_FP, 0,
+ 33 * sizeof(u32),
+ fps);
break;
case PTRACE_READTEXT:
@@ -988,31 +1027,17 @@ long arch_ptrace(struct task_struct *child, long request,
break;
case PTRACE_GETREGS64:
- ret = copy_regset_to_user(child, view, REGSET_GENERAL,
- 1 * sizeof(u64),
- 15 * sizeof(u64),
- &pregs->u_regs[0]);
- if (!ret) {
- /* XXX doesn't handle 'y' register correctly XXX */
- ret = copy_regset_to_user(child, view, REGSET_GENERAL,
- 32 * sizeof(u64),
- 4 * sizeof(u64),
- &pregs->tstate);
- }
+ ret = copy_regset_to_user(child, &ptrace64_view,
+ REGSET_GENERAL, 0,
+ 19 * sizeof(u64),
+ pregs);
break;
case PTRACE_SETREGS64:
- ret = copy_regset_from_user(child, view, REGSET_GENERAL,
- 1 * sizeof(u64),
- 15 * sizeof(u64),
- &pregs->u_regs[0]);
- if (!ret) {
- /* XXX doesn't handle 'y' register correctly XXX */
- ret = copy_regset_from_user(child, view, REGSET_GENERAL,
- 32 * sizeof(u64),
- 4 * sizeof(u64),
- &pregs->tstate);
- }
+ ret = copy_regset_from_user(child, &ptrace64_view,
+ REGSET_GENERAL, 0,
+ 19 * sizeof(u64),
+ pregs);
break;
case PTRACE_GETFPREGS64:
@@ -1064,19 +1089,17 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
/* do the secure computing check first */
secure_computing_strict(regs->u_regs[UREG_G1]);
+ if (test_thread_flag(TIF_NOHZ))
+ user_exit();
+
if (test_thread_flag(TIF_SYSCALL_TRACE))
- ret = tracehook_report_syscall_entry(regs);
+ ret = ptrace_report_syscall_entry(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->u_regs[UREG_G1]);
- audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
- AUDIT_ARCH_SPARC :
- AUDIT_ARCH_SPARC64),
- regs->u_regs[UREG_G1],
- regs->u_regs[UREG_I0],
- regs->u_regs[UREG_I1],
- regs->u_regs[UREG_I2],
+ audit_syscall_entry(regs->u_regs[UREG_G1], regs->u_regs[UREG_I0],
+ regs->u_regs[UREG_I1], regs->u_regs[UREG_I2],
regs->u_regs[UREG_I3]);
return ret;
@@ -1084,11 +1107,70 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{
+ if (test_thread_flag(TIF_NOHZ))
+ user_exit();
+
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_exit(regs, regs->u_regs[UREG_G1]);
+ trace_sys_exit(regs, regs->u_regs[UREG_I0]);
if (test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, 0);
+ ptrace_report_syscall_exit(regs, 0);
+
+ if (test_thread_flag(TIF_NOHZ))
+ user_enter();
+}
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static inline int regs_within_kernel_stack(struct pt_regs *regs,
+ unsigned long addr)
+{
+ unsigned long ksp = kernel_stack_pointer(regs) + STACK_BIAS;
+ return ((addr & ~(THREAD_SIZE - 1)) ==
+ (ksp & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long ksp = kernel_stack_pointer(regs) + STACK_BIAS;
+ unsigned long *addr = (unsigned long *)ksp;
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
}
diff --git a/arch/sparc/kernel/reboot.c b/arch/sparc/kernel/reboot.c
index eba7d918162a..69c1b6c047d5 100644
--- a/arch/sparc/kernel/reboot.c
+++ b/arch/sparc/kernel/reboot.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* reboot.c: reboot/shutdown/halt/poweroff handling
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
@@ -6,6 +7,7 @@
#include <linux/reboot.h>
#include <linux/export.h>
#include <linux/pm.h>
+#include <linux/of.h>
#include <asm/oplib.h>
#include <asm/prom.h>
@@ -24,7 +26,7 @@ EXPORT_SYMBOL(pm_power_off);
void machine_power_off(void)
{
- if (strcmp(of_console_device->type, "serial") || scons_pwroff)
+ if (!of_node_is_type(of_console_device, "serial") || scons_pwroff)
prom_halt_power_off();
prom_halt();
diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S
index 6c34de0c2abd..8931fe266346 100644
--- a/arch/sparc/kernel/rtrap_32.S
+++ b/arch/sparc/kernel/rtrap_32.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* rtrap.S: Return from Sparc trap low-level code.
*
@@ -74,7 +75,7 @@ signal_p:
ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
mov %g2, %o2
- mov %l5, %o1
+ mov %l6, %o1
call do_notify_resume
add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index afa2a9e3d0a0..eef102765a7e 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* rtrap.S: Preparing for return from trap on Sparc V9.
*
@@ -14,21 +15,42 @@
#include <asm/visasm.h>
#include <asm/processor.h>
-#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
-#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
-#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+# define SCHEDULE_USER schedule_user
+#else
+# define SCHEDULE_USER schedule
+#endif
.text
.align 32
__handle_preemption:
- call schedule
- wrpr %g0, RTRAP_PSTATE, %pstate
+ call SCHEDULE_USER
+661: wrpr %g0, RTRAP_PSTATE, %pstate
+ /* If userspace is using ADI, it could potentially pass
+ * a pointer with version tag embedded in it. To maintain
+ * the ADI security, we must re-enable PSTATE.mcde before
+ * we continue execution in the kernel for another thread.
+ */
+ .section .sun_m7_1insn_patch, "ax"
+ .word 661b
+ wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
+ .previous
ba,pt %xcc, __handle_preemption_continue
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
__handle_user_windows:
+ add %sp, PTREGS_OFF, %o0
call fault_in_user_windows
- wrpr %g0, RTRAP_PSTATE, %pstate
+661: wrpr %g0, RTRAP_PSTATE, %pstate
+ /* If userspace is using ADI, it could potentially pass
+ * a pointer with version tag embedded in it. To maintain
+ * the ADI security, we must re-enable PSTATE.mcde before
+ * we continue execution in the kernel for another thread.
+ */
+ .section .sun_m7_1insn_patch, "ax"
+ .word 661b
+ wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
+ .previous
ba,pt %xcc, __handle_preemption_continue
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
@@ -45,7 +67,16 @@ __handle_signal:
add %sp, PTREGS_OFF, %o0
mov %l0, %o2
call do_notify_resume
- wrpr %g0, RTRAP_PSTATE, %pstate
+661: wrpr %g0, RTRAP_PSTATE, %pstate
+ /* If userspace is using ADI, it could potentially pass
+ * a pointer with version tag embedded in it. To maintain
+ * the ADI security, we must re-enable PSTATE.mcde before
+ * we continue execution in the kernel for another thread.
+ */
+ .section .sun_m7_1insn_patch, "ax"
+ .word 661b
+ wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
+ .previous
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
/* Signal delivery can modify pt_regs tstate, so we must
@@ -54,8 +85,9 @@ __handle_signal:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
+ andn %l1, %l4, %l1
ba,pt %xcc, __handle_preemption_continue
- andn %l1, %l4, %l1
+ srl %l4, 20, %l4
/* When returning from a NMI (%pil==15) interrupt we want to
* avoid running softirqs, doing IRQ tracing, preempting, etc.
@@ -67,7 +99,13 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
andn %l1, %l4, %l1
srl %l4, 20, %l4
ba,pt %xcc, rtrap_no_irq_enable
- wrpr %l4, %pil
+ nop
+ /* Do not actually set the %pil here. We will do that
+ * below after we clear PSTATE_IE in the %pstate register.
+ * If we re-enable interrupts here, we can recurse down
+ * the hardirq stack potentially endlessly, causing a
+ * stack overflow.
+ */
.align 64
.globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
@@ -216,10 +254,19 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
rdpr %otherwin, %l2
srl %l1, 3, %l1
- wrpr %l2, %g0, %canrestore
+661: wrpr %l2, %g0, %canrestore
+ .section .fast_win_ctrl_1insn_patch, "ax"
+ .word 661b
+ .word 0x89880000 ! normalw
+ .previous
+
wrpr %l1, %g0, %wstate
brnz,pt %l2, user_rtt_restore
- wrpr %g0, %g0, %otherwin
+661: wrpr %g0, %g0, %otherwin
+ .section .fast_win_ctrl_1insn_patch, "ax"
+ .word 661b
+ nop
+ .previous
ldx [%g6 + TI_FLAGS], %g3
wr %g0, ASI_AIUP, %asi
@@ -229,53 +276,19 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
bne,pt %xcc, user_rtt_fill_32bit
wrpr %g1, %cwp
ba,a,pt %xcc, user_rtt_fill_64bit
+ nop
-user_rtt_fill_fixup:
- rdpr %cwp, %g1
- add %g1, 1, %g1
- wrpr %g1, 0x0, %cwp
-
- rdpr %wstate, %g2
- sll %g2, 3, %g2
- wrpr %g2, 0x0, %wstate
-
- /* We know %canrestore and %otherwin are both zero. */
-
- sethi %hi(sparc64_kern_pri_context), %g2
- ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
- mov PRIMARY_CONTEXT, %g1
-
-661: stxa %g2, [%g1] ASI_DMMU
- .section .sun4v_1insn_patch, "ax"
- .word 661b
- stxa %g2, [%g1] ASI_MMU
- .previous
-
- sethi %hi(KERNBASE), %g1
- flush %g1
-
- or %g4, FAULT_CODE_WINFIXUP, %g4
- stb %g4, [%g6 + TI_FAULT_CODE]
- stx %g5, [%g6 + TI_FAULT_ADDR]
-
- mov %g6, %l1
- wrpr %g0, 0x0, %tl
-
-661: nop
- .section .sun4v_1insn_patch, "ax"
- .word 661b
- SET_GL(0)
- .previous
+user_rtt_fill_fixup_dax:
+ ba,pt %xcc, user_rtt_fill_fixup_common
+ mov 1, %g3
- wrpr %g0, RTRAP_PSTATE, %pstate
+user_rtt_fill_fixup_mna:
+ ba,pt %xcc, user_rtt_fill_fixup_common
+ mov 2, %g3
- mov %l1, %g6
- ldx [%g6 + TI_TASK], %g4
- LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
- call do_sparc64_fault
- add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+user_rtt_fill_fixup:
+ ba,pt %xcc, user_rtt_fill_fixup_common
+ clr %g3
user_rtt_pre_restore:
add %g1, 1, %g1
@@ -297,7 +310,7 @@ kern_rtt_restore:
retry
to_kernel:
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
ldsw [%g6 + TI_PRE_COUNT], %l5
brnz %l5, kern_fpucheck
ldx [%g6 + TI_FLAGS], %l5
@@ -306,12 +319,10 @@ to_kernel:
nop
cmp %l4, 0
bne,pn %xcc, kern_fpucheck
- sethi %hi(PREEMPT_ACTIVE), %l6
- stw %l6, [%g6 + TI_PRE_COUNT]
- call schedule
+ nop
+ call preempt_schedule_irq
nop
ba,pt %xcc, rtrap
- stw %g0, [%g6 + TI_PRE_COUNT]
#endif
kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
brz,pt %l5, rt_continue
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
index be5bdf93c767..0bababf6f2bc 100644
--- a/arch/sparc/kernel/sbus.c
+++ b/arch/sparc/kernel/sbus.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* sbus.c: UltraSparc SBUS controller support.
*
@@ -13,7 +14,9 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/numa.h>
#include <asm/page.h>
#include <asm/io.h>
@@ -66,8 +69,8 @@ void sbus_set_sbus64(struct device *dev, int bursts)
regs = of_get_property(op->dev.of_node, "reg", NULL);
if (!regs) {
- printk(KERN_ERR "sbus_set_sbus64: Cannot find regs for %s\n",
- op->dev.of_node->full_name);
+ printk(KERN_ERR "sbus_set_sbus64: Cannot find regs for %pOF\n",
+ op->dev.of_node);
return;
}
slot = regs->which_io;
@@ -560,7 +563,7 @@ static void __init sbus_iommu_init(struct platform_device *op)
op->dev.archdata.iommu = iommu;
op->dev.archdata.stc = strbuf;
- op->dev.archdata.numa_node = -1;
+ op->dev.archdata.numa_node = NUMA_NO_NODE;
reg_base = regs + SYSIO_IOMMUREG_BASE;
iommu->iommu_control = reg_base + IOMMU_CONTROL;
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
new file mode 100644
index 000000000000..4975867d9001
--- /dev/null
+++ b/arch/sparc/kernel/setup.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm/setup.h>
+#include <linux/sysctl.h>
+
+static const struct ctl_table sparc_sysctl_table[] = {
+ {
+ .procname = "reboot-cmd",
+ .data = reboot_command,
+ .maxlen = 256,
+ .mode = 0644,
+ .proc_handler = proc_dostring,
+ },
+ {
+ .procname = "stop-a",
+ .data = &stop_a_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "scons-poweroff",
+ .data = &scons_pwroff,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#ifdef CONFIG_SPARC64
+ {
+ .procname = "tsb-ratio",
+ .data = &sysctl_tsb_ratio,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
+};
+
+
+static int __init init_sparc_sysctls(void)
+{
+ register_sysctl_init("kernel", sparc_sysctl_table);
+ return 0;
+}
+
+arch_initcall(init_sparc_sysctls);
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 1434526970a6..704375c061e7 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sparc/kernel/setup.c
*
@@ -16,7 +17,6 @@
#include <linux/initrd.h>
#include <asm/smp.h>
#include <linux/user.h>
-#include <linux/screen_info.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
@@ -33,12 +33,12 @@
#include <linux/kdebug.h>
#include <linux/export.h>
#include <linux/start_kernel.h>
+#include <uapi/linux/mount.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/oplib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/vaddrs.h>
#include <asm/mbus.h>
@@ -50,26 +50,12 @@
#include "kernel.h"
-struct screen_info screen_info = {
- 0, 0, /* orig-x, orig-y */
- 0, /* unused */
- 0, /* orig-video-page */
- 0, /* orig-video-mode */
- 128, /* orig-video-cols */
- 0,0,0, /* ega_ax, ega_bx, ega_cx */
- 54, /* orig-video-lines */
- 0, /* orig-video-isVGA */
- 16 /* orig-video-points */
-};
-
/* Typing sync at the prom prompt calls the function pointed to by
* romvec->pv_synchook which I set to the following function.
* This should sync all filesystems and return, for now it just
* prints out pretty messages and returns.
*/
-extern unsigned long trapbase;
-
/* Pretty sick eh? */
static void prom_sync_me(void)
{
@@ -81,13 +67,13 @@ static void prom_sync_me(void)
__asm__ __volatile__("wr %0, 0x0, %%tbr\n\t"
"nop\n\t"
"nop\n\t"
- "nop\n\t" : : "r" (&trapbase));
+ "nop\n\t" : : "r" (&trapbase[0]));
prom_printf("PROM SYNC COMMAND...\n");
- show_free_areas(0);
+ show_mem();
if (!is_idle_task(current)) {
local_irq_enable();
- sys_sync();
+ ksys_sync();
local_irq_disable();
}
prom_printf("Returning to prom\n");
@@ -109,7 +95,7 @@ unsigned long cmdline_memory_size __initdata = 0;
unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */
static void
-prom_console_write(struct console *con, const char *s, unsigned n)
+prom_console_write(struct console *con, const char *s, unsigned int n)
{
prom_write(s, n);
}
@@ -150,7 +136,7 @@ static void __init boot_flags_init(char *commands)
{
while (*commands) {
/* Move to the start of the next "argument". */
- while (*commands && *commands == ' ')
+ while (*commands == ' ')
commands++;
/* Process any command switches, otherwise skip it. */
@@ -267,7 +253,6 @@ static __init void leon_patch(void)
}
struct tt_entry *sparc_ttable;
-struct pt_regs fake_swapper_regs;
/* Called from head_32.S - before we have setup anything
* in the kernel. Be very careful with what you do here.
@@ -300,43 +285,38 @@ void __init setup_arch(char **cmdline_p)
int i;
unsigned long highest_paddr;
- sparc_ttable = (struct tt_entry *) &trapbase;
+ sparc_ttable = &trapbase[0];
/* Initialize PROM console and command line. */
*cmdline_p = prom_getbootargs();
- strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
+ strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
parse_early_param();
boot_flags_init(*cmdline_p);
register_console(&prom_early_console);
- printk("ARCH: ");
switch(sparc_cpu_model) {
case sun4m:
- printk("SUN4M\n");
+ pr_info("ARCH: SUN4M\n");
break;
case sun4d:
- printk("SUN4D\n");
+ pr_info("ARCH: SUN4D\n");
break;
case sun4e:
- printk("SUN4E\n");
+ pr_info("ARCH: SUN4E\n");
break;
case sun4u:
- printk("SUN4U\n");
+ pr_info("ARCH: SUN4U\n");
break;
case sparc_leon:
- printk("LEON\n");
+ pr_info("ARCH: LEON\n");
break;
default:
- printk("UNKNOWN!\n");
+ pr_info("ARCH: UNKNOWN!\n");
break;
}
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
-
idprom_init();
load_mmu();
@@ -359,20 +339,16 @@ void __init setup_arch(char **cmdline_p)
ROOT_DEV = old_decode_dev(root_dev);
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
- rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
- rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
#endif
prom_setsync(prom_sync_me);
- if((boot_flags&BOOTME_DEBUG) && (linux_dbvec!=0) &&
+ if((boot_flags & BOOTME_DEBUG) && (linux_dbvec != NULL) &&
((*(short *)linux_dbvec) != -1)) {
printk("Booted under KADB. Syncing trap table.\n");
(*(linux_dbvec->teach_debugger))();
}
- init_task.thread.kregs = &fake_swapper_regs;
-
/* Run-time patch instructions to match the cpu model */
per_cpu_patch();
@@ -423,3 +399,10 @@ static int __init topology_init(void)
}
subsys_initcall(topology_init);
+
+#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP)
+void __init arch_cpu_finalize_init(void)
+{
+ cpu_data(0).udelay_val = loops_per_jiffy;
+}
+#endif
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 13785547e435..63615f5c99b4 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sparc64/kernel/setup.c
*
@@ -14,7 +15,6 @@
#include <linux/ptrace.h>
#include <asm/smp.h>
#include <linux/user.h>
-#include <linux/screen_info.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
@@ -30,12 +30,14 @@
#include <linux/cpu.h>
#include <linux/initrd.h>
#include <linux/module.h>
+#include <linux/start_kernel.h>
+#include <linux/memblock.h>
+#include <uapi/linux/mount.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/oplib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/idprom.h>
#include <asm/head.h>
#include <asm/starfire.h>
@@ -49,6 +51,8 @@
#include <asm/elf.h>
#include <asm/mdesc.h>
#include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
#ifdef CONFIG_IP_PNP
#include <net/ipconfig.h>
@@ -63,20 +67,8 @@
DEFINE_SPINLOCK(ns87303_lock);
EXPORT_SYMBOL(ns87303_lock);
-struct screen_info screen_info = {
- 0, 0, /* orig-x, orig-y */
- 0, /* unused */
- 0, /* orig-video-page */
- 0, /* orig-video-mode */
- 128, /* orig-video-cols */
- 0, 0, 0, /* unused, ega_bx, unused */
- 54, /* orig-video-lines */
- 0, /* orig-video-isVGA */
- 16 /* orig-video-points */
-};
-
static void
-prom_console_write(struct console *con, const char *s, unsigned n)
+prom_console_write(struct console *con, const char *s, unsigned int n)
{
prom_write(s, n);
}
@@ -91,7 +83,7 @@ static struct console prom_early_console = {
.index = -1,
};
-/*
+/*
* Process kernel command line switches that are specific to the
* SPARC or that require special low-level processing.
*/
@@ -129,7 +121,7 @@ static void __init boot_flags_init(char *commands)
{
while (*commands) {
/* Move to the start of the next "argument". */
- while (*commands && *commands == ' ')
+ while (*commands == ' ')
commands++;
/* Process any command switches, otherwise skip it. */
@@ -141,21 +133,9 @@ static void __init boot_flags_init(char *commands)
process_switch(*commands++);
continue;
}
- if (!strncmp(commands, "mem=", 4)) {
- /*
- * "mem=XXX[kKmM]" overrides the PROM-reported
- * memory size.
- */
- cmdline_memory_size = simple_strtoul(commands + 4,
- &commands, 0);
- if (*commands == 'K' || *commands == 'k') {
- cmdline_memory_size <<= 10;
- commands++;
- } else if (*commands=='M' || *commands=='m') {
- cmdline_memory_size <<= 20;
- commands++;
- }
- }
+ if (!strncmp(commands, "mem=", 4))
+ cmdline_memory_size = memparse(commands + 4, &commands);
+
while (*commands && *commands != ' ')
commands++;
}
@@ -172,9 +152,7 @@ extern int root_mountflags;
char reboot_command[COMMAND_LINE_SIZE];
-static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
-
-void __init per_cpu_patch(void)
+static void __init per_cpu_patch(void)
{
struct cpuid_patch_entry *p;
unsigned long ver;
@@ -266,7 +244,25 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
}
}
-void __init sun4v_patch(void)
+void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
+ struct sun4v_2insn_patch_entry *end)
+{
+ while (start < end) {
+ unsigned long addr = start->addr;
+
+ *(unsigned int *) (addr + 0) = start->insns[0];
+ wmb();
+ __asm__ __volatile__("flush %0" : : "r" (addr + 0));
+
+ *(unsigned int *) (addr + 4) = start->insns[1];
+ wmb();
+ __asm__ __volatile__("flush %0" : : "r" (addr + 4));
+
+ start++;
+ }
+}
+
+static void __init sun4v_patch(void)
{
extern void sun4v_hvapi_init(void);
@@ -279,6 +275,24 @@ void __init sun4v_patch(void)
sun4v_patch_2insn_range(&__sun4v_2insn_patch,
&__sun4v_2insn_patch_end);
+ switch (sun4v_chip_type) {
+ case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_M8:
+ case SUN4V_CHIP_SPARC_SN:
+ sun4v_patch_1insn_range(&__sun_m7_1insn_patch,
+ &__sun_m7_1insn_patch_end);
+ sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
+ &__sun_m7_2insn_patch_end);
+ break;
+ default:
+ break;
+ }
+
+ if (sun4v_chip_type != SUN4V_CHIP_NIAGARA1) {
+ sun4v_patch_1insn_range(&__fast_win_ctrl_1insn_patch,
+ &__fast_win_ctrl_1insn_patch_end);
+ }
+
sun4v_hvapi_init();
}
@@ -335,14 +349,27 @@ static void __init pause_patch(void)
}
}
-#ifdef CONFIG_SMP
-void __init boot_cpu_id_too_large(int cpu)
+void __init start_early_boot(void)
{
- prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
- cpu, NR_CPUS);
- prom_halt();
+ int cpu;
+
+ check_if_starfire();
+ per_cpu_patch();
+ sun4v_patch();
+ smp_init_cpu_poke();
+
+ cpu = hard_smp_processor_id();
+ if (cpu >= NR_CPUS) {
+ prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
+ cpu, NR_CPUS);
+ prom_halt();
+ }
+ current_thread_info()->cpu = cpu;
+
+ time_init_early();
+ prom_init_report();
+ start_kernel();
}
-#endif
/* On Ultra, we support all of the v8 capabilities. */
unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
@@ -359,7 +386,8 @@ static const char *hwcaps[] = {
*/
"mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
"ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
- "ima", "cspare", "pause", "cbcond",
+ "ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
+ "adp",
};
static const char *crypto_hwcaps[] = {
@@ -375,7 +403,7 @@ void cpucap_info(struct seq_file *m)
seq_puts(m, "cpucaps\t\t: ");
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
- if (caps & bit) {
+ if (hwcaps[i] && (caps & bit)) {
seq_printf(m, "%s%s",
printed ? "," : "", hwcaps[i]);
printed++;
@@ -429,7 +457,7 @@ static void __init report_hwcaps(unsigned long caps)
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
- if (caps & bit)
+ if (hwcaps[i] && (caps & bit))
report_one_hwcap(&printed, hwcaps[i]);
}
if (caps & HWCAP_SPARC_CRYPTO)
@@ -464,7 +492,7 @@ static unsigned long __init mdesc_cpu_hwcap_list(void)
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
- if (!strcmp(prop, hwcaps[i])) {
+ if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
caps |= bit;
break;
}
@@ -499,12 +527,22 @@ static void __init init_sparc64_elf_hwcap(void)
sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_BLKINIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_N2;
}
@@ -530,13 +568,23 @@ static void __init init_sparc64_elf_hwcap(void)
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
AV_SPARC_ASI_BLK_INIT |
AV_SPARC_POPC);
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
AV_SPARC_FMAF);
}
@@ -551,11 +599,31 @@ static void __init init_sparc64_elf_hwcap(void)
pause_patch();
}
+static void __init alloc_irqstack_bootmem(void)
+{
+ unsigned int i, node;
+
+ for_each_possible_cpu(i) {
+ node = cpu_to_node(i);
+
+ softirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
+ THREAD_SIZE, node);
+ if (!softirq_stack[i])
+ panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
+ __func__, THREAD_SIZE, THREAD_SIZE, node);
+ hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
+ THREAD_SIZE, node);
+ if (!hardirq_stack[i])
+ panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
+ __func__, THREAD_SIZE, THREAD_SIZE, node);
+ }
+}
+
void __init setup_arch(char **cmdline_p)
{
/* Initialize PROM console and command line. */
*cmdline_p = prom_getbootargs();
- strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
+ strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
parse_early_param();
boot_flags_init(*cmdline_p);
@@ -565,13 +633,9 @@ void __init setup_arch(char **cmdline_p)
register_console(&prom_early_console);
if (tlb_type == hypervisor)
- printk("ARCH: SUN4V\n");
+ pr_info("ARCH: SUN4V\n");
else
- printk("ARCH: SUN4U\n");
-
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
+ pr_info("ARCH: SUN4U\n");
idprom_init();
@@ -580,17 +644,13 @@ void __init setup_arch(char **cmdline_p)
ROOT_DEV = old_decode_dev(root_dev);
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
- rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
- rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
#endif
- task_thread_info(&init_task)->kregs = &fake_swapper_regs;
-
#ifdef CONFIG_IP_PNP
if (!ic_set_manually) {
phandle chosen = prom_finddevice("/chosen");
u32 cl, sv, gw;
-
+
cl = prom_getintdefault (chosen, "client-ip", 0);
sv = prom_getintdefault (chosen, "server-ip", 0);
gw = prom_getintdefault (chosen, "gateway-ip", 0);
@@ -611,6 +671,12 @@ void __init setup_arch(char **cmdline_p)
paging_init();
init_sparc64_elf_hwcap();
+ /*
+ * Once the OF device tree and MDESC have been setup and nr_cpus has
+ * been parsed, we know the list of possible cpus. Therefore we can
+ * allocate the IRQ stacks.
+ */
+ alloc_irqstack_bootmem();
}
extern int stop_a_enabled;
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index b524f91dd0e5..a23cdd7459bb 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* arch/sparc64/kernel/signal32.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
@@ -19,11 +20,9 @@
#include <linux/binfmts.h>
#include <linux/compat.h>
#include <linux/bitops.h>
-#include <linux/tracehook.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/psrcompat.h>
#include <asm/fpumacro.h>
#include <asm/visasm.h>
@@ -31,6 +30,7 @@
#include <asm/switch_to.h>
#include "sigutil.h"
+#include "kernel.h"
/* This magic should be in g_upper[0] for all upper parts
* to be valid.
@@ -68,73 +68,16 @@ struct rt_signal_frame32 {
/* __siginfo_rwin_t * */u32 rwin_save;
} __attribute__((aligned(8)));
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
-{
- int err;
-
- if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- /* If you change siginfo_t structure, please be sure
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member.
- This routine must convert siginfo from 64bit to 32bit as well
- at the same time. */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user((short)from->si_code, &to->si_code);
- if (from->si_code < 0)
- err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (from->si_code >> 16) {
- case __SI_TIMER >> 16:
- err |= __put_user(from->si_tid, &to->si_tid);
- err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- case __SI_CHLD >> 16:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- default:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- case __SI_FAULT >> 16:
- err |= __put_user(from->si_trapno, &to->si_trapno);
- err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
- break;
- case __SI_POLL >> 16:
- err |= __put_user(from->si_band, &to->si_band);
- err |= __put_user(from->si_fd, &to->si_fd);
- break;
- case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
- case __SI_MESGQ >> 16:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- }
- }
- return err;
-}
-
-/* CAUTION: This is just a very minimalist implementation for the
- * sake of compat_sys_rt_sigqueueinfo()
+/* Checks if the fp is valid. We always build signal frames which are
+ * 16-byte aligned, therefore we can always enforce that the restore
+ * frame has that property as well.
*/
-int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+static bool invalid_frame_pointer(void __user *fp, int fplen)
{
- if (!access_ok(VERIFY_WRITE, from, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- if (copy_from_user(to, from, 3*sizeof(int)) ||
- copy_from_user(to->_sifields._pad, from->_sifields._pad,
- SI_PAD_SIZE))
- return -EFAULT;
-
- return 0;
+ if ((((unsigned long) fp) & 15) ||
+ ((unsigned long)fp) > 0x100000000ULL - fplen)
+ return true;
+ return false;
}
void do_sigreturn32(struct pt_regs *regs)
@@ -142,14 +85,14 @@ void do_sigreturn32(struct pt_regs *regs)
struct signal_frame32 __user *sf;
compat_uptr_t fpu_save;
compat_uptr_t rwin_save;
- unsigned int psr;
- unsigned pc, npc;
+ unsigned int psr, ufp;
+ unsigned int pc, npc;
sigset_t set;
- unsigned seta[_COMPAT_NSIG_WORDS];
+ compat_sigset_t seta;
int err, i;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
@@ -157,11 +100,16 @@ void do_sigreturn32(struct pt_regs *regs)
sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
- (((unsigned long) sf) & 3))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv;
- if (get_user(pc, &sf->info.si_regs.pc) ||
+ if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+ goto segv;
+
+ if (ufp & 0x7)
+ goto segv;
+
+ if (__get_user(pc, &sf->info.si_regs.pc) ||
__get_user(npc, &sf->info.si_regs.npc))
goto segv;
@@ -209,47 +157,47 @@ void do_sigreturn32(struct pt_regs *regs)
if (restore_rwin_state(compat_ptr(rwin_save)))
goto segv;
}
- err |= __get_user(seta[0], &sf->info.si_mask);
- err |= copy_from_user(seta+1, &sf->extramask,
+ err |= __get_user(seta.sig[0], &sf->info.si_mask);
+ err |= copy_from_user(&seta.sig[1], &sf->extramask,
(_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
if (err)
goto segv;
- switch (_NSIG_WORDS) {
- case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
- case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
- case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
- case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
- }
+
+ set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
set_current_blocked(&set);
return;
segv:
- force_sig(SIGSEGV, current);
+ force_sig(SIGSEGV);
}
asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
{
struct rt_signal_frame32 __user *sf;
- unsigned int psr, pc, npc;
+ unsigned int psr, pc, npc, ufp;
compat_uptr_t fpu_save;
compat_uptr_t rwin_save;
sigset_t set;
- compat_sigset_t seta;
int err, i;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
- (((unsigned long) sf) & 3))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv;
- if (get_user(pc, &sf->regs.pc) ||
+ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+ goto segv;
+
+ if (ufp & 0x7)
+ goto segv;
+
+ if (__get_user(pc, &sf->regs.pc) ||
__get_user(npc, &sf->regs.npc))
goto segv;
@@ -292,7 +240,7 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
err |= __get_user(fpu_save, &sf->fpu_save);
if (!err && fpu_save)
err |= restore_fpu_state(regs, compat_ptr(fpu_save));
- err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t));
+ err |= get_compat_sigset(&set, &sf->mask);
err |= compat_restore_altstack(&sf->stack);
if (err)
goto segv;
@@ -303,24 +251,10 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
goto segv;
}
- switch (_NSIG_WORDS) {
- case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32);
- case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32);
- case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
- case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
- }
set_current_blocked(&set);
return;
segv:
- force_sig(SIGSEGV, current);
-}
-
-/* Checks if the fp is valid */
-static int invalid_frame_pointer(void __user *fp, int fplen)
-{
- if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
- return 1;
- return 0;
+ force_sig(SIGSEGV);
}
static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
@@ -363,6 +297,7 @@ static void flush_signal_insns(unsigned long address)
unsigned long pstate, paddr;
pte_t *ptep, pte;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
@@ -382,7 +317,10 @@ static void flush_signal_insns(unsigned long address)
pgdp = pgd_offset(current->mm, address);
if (pgd_none(*pgdp))
goto out_irqs_on;
- pudp = pud_offset(pgdp, address);
+ p4dp = p4d_offset(pgdp, address);
+ if (p4d_none(*p4dp))
+ goto out_irqs_on;
+ pudp = pud_offset(p4dp, address);
if (pud_none(*pudp))
goto out_irqs_on;
pmdp = pmd_offset(pudp, address);
@@ -390,6 +328,8 @@ static void flush_signal_insns(unsigned long address)
goto out_irqs_on;
ptep = pte_offset_map(pmdp, address);
+ if (!ptep)
+ goto out_irqs_on;
pte = *ptep;
if (!pte_present(pte))
goto out_unmap;
@@ -417,7 +357,7 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
void __user *tail;
int sigframe_size;
u32 psr;
- unsigned int seta[_COMPAT_NSIG_WORDS];
+ compat_sigset_t seta;
/* 1. Make sure everything is clean */
synchronize_user_stack();
@@ -435,7 +375,11 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
get_sigframe(ksig, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size)) {
- do_exit(SIGILL);
+ if (show_unhandled_signals)
+ pr_info("%s[%d] bad frame in setup_frame32: %08lx TPC %08lx O7 %08lx\n",
+ current->comm, current->pid, (unsigned long)sf,
+ regs->tpc, regs->u_regs[UREG_I7]);
+ force_sigsegv(ksig->sig);
return -EINVAL;
}
@@ -481,24 +425,20 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
err |= __put_user(0, &sf->rwin_save);
}
- switch (_NSIG_WORDS) {
- case 4: seta[7] = (oldset->sig[3] >> 32);
- seta[6] = oldset->sig[3];
- case 3: seta[5] = (oldset->sig[2] >> 32);
- seta[4] = oldset->sig[2];
- case 2: seta[3] = (oldset->sig[1] >> 32);
- seta[2] = oldset->sig[1];
- case 1: seta[1] = (oldset->sig[0] >> 32);
- seta[0] = oldset->sig[0];
- }
- err |= __put_user(seta[0], &sf->info.si_mask);
- err |= __copy_to_user(sf->extramask, seta + 1,
+ /* If these change we need to know - assignments to seta relies on these sizes */
+ BUILD_BUG_ON(_NSIG_WORDS != 1);
+ BUILD_BUG_ON(_COMPAT_NSIG_WORDS != 2);
+ seta.sig[1] = (oldset->sig[0] >> 32);
+ seta.sig[0] = oldset->sig[0];
+
+ err |= __put_user(seta.sig[0], &sf->info.si_mask);
+ err |= __copy_to_user(sf->extramask, &seta.sig[1],
(_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
if (!wsaved) {
- err |= copy_in_user((u32 __user *)sf,
- (u32 __user *)(regs->u_regs[UREG_FP]),
- sizeof(struct reg_window32));
+ err |= raw_copy_in_user((u32 __user *)sf,
+ (u32 __user *)(regs->u_regs[UREG_FP]),
+ sizeof(struct reg_window32));
} else {
struct reg_window *rp;
@@ -552,7 +492,6 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs,
void __user *tail;
int sigframe_size;
u32 psr;
- compat_sigset_t seta;
/* 1. Make sure everything is clean */
synchronize_user_stack();
@@ -570,7 +509,11 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs,
get_sigframe(ksig, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size)) {
- do_exit(SIGILL);
+ if (show_unhandled_signals)
+ pr_info("%s[%d] bad frame in setup_rt_frame32: %08lx TPC %08lx O7 %08lx\n",
+ current->comm, current->pid, (unsigned long)sf,
+ regs->tpc, regs->u_regs[UREG_I7]);
+ force_sigsegv(ksig->sig);
return -EINVAL;
}
@@ -622,22 +565,12 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs,
/* Setup sigaltstack */
err |= __compat_save_altstack(&sf->stack, regs->u_regs[UREG_FP]);
- switch (_NSIG_WORDS) {
- case 4: seta.sig[7] = (oldset->sig[3] >> 32);
- seta.sig[6] = oldset->sig[3];
- case 3: seta.sig[5] = (oldset->sig[2] >> 32);
- seta.sig[4] = oldset->sig[2];
- case 2: seta.sig[3] = (oldset->sig[1] >> 32);
- seta.sig[2] = oldset->sig[1];
- case 1: seta.sig[1] = (oldset->sig[0] >> 32);
- seta.sig[0] = oldset->sig[0];
- }
- err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t));
+ err |= put_compat_sigset(&sf->mask, oldset, sizeof(compat_sigset_t));
if (!wsaved) {
- err |= copy_in_user((u32 __user *)sf,
- (u32 __user *)(regs->u_regs[UREG_FP]),
- sizeof(struct reg_window32));
+ err |= raw_copy_in_user((u32 __user *)sf,
+ (u32 __user *)(regs->u_regs[UREG_FP]),
+ sizeof(struct reg_window32));
} else {
struct reg_window *rp;
@@ -714,7 +647,7 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
@@ -754,6 +687,7 @@ void do_signal32(struct pt_regs * regs)
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
@@ -812,3 +746,41 @@ asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp)
out:
return ret;
}
+
+/*
+ * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
+ * changes likely come with new fields that should be added below.
+ */
+static_assert(NSIGILL == 11);
+static_assert(NSIGFPE == 15);
+static_assert(NSIGSEGV == 10);
+static_assert(NSIGBUS == 5);
+static_assert(NSIGTRAP == 6);
+static_assert(NSIGCHLD == 6);
+static_assert(NSIGSYS == 2);
+static_assert(sizeof(compat_siginfo_t) == 128);
+static_assert(__alignof__(compat_siginfo_t) == 4);
+static_assert(offsetof(compat_siginfo_t, si_signo) == 0x00);
+static_assert(offsetof(compat_siginfo_t, si_errno) == 0x04);
+static_assert(offsetof(compat_siginfo_t, si_code) == 0x08);
+static_assert(offsetof(compat_siginfo_t, si_pid) == 0x0c);
+static_assert(offsetof(compat_siginfo_t, si_uid) == 0x10);
+static_assert(offsetof(compat_siginfo_t, si_tid) == 0x0c);
+static_assert(offsetof(compat_siginfo_t, si_overrun) == 0x10);
+static_assert(offsetof(compat_siginfo_t, si_status) == 0x14);
+static_assert(offsetof(compat_siginfo_t, si_utime) == 0x18);
+static_assert(offsetof(compat_siginfo_t, si_stime) == 0x1c);
+static_assert(offsetof(compat_siginfo_t, si_value) == 0x14);
+static_assert(offsetof(compat_siginfo_t, si_int) == 0x14);
+static_assert(offsetof(compat_siginfo_t, si_ptr) == 0x14);
+static_assert(offsetof(compat_siginfo_t, si_addr) == 0x0c);
+static_assert(offsetof(compat_siginfo_t, si_trapno) == 0x10);
+static_assert(offsetof(compat_siginfo_t, si_addr_lsb) == 0x10);
+static_assert(offsetof(compat_siginfo_t, si_lower) == 0x14);
+static_assert(offsetof(compat_siginfo_t, si_upper) == 0x18);
+static_assert(offsetof(compat_siginfo_t, si_pkey) == 0x14);
+static_assert(offsetof(compat_siginfo_t, si_perf_data) == 0x10);
+static_assert(offsetof(compat_siginfo_t, si_perf_type) == 0x14);
+static_assert(offsetof(compat_siginfo_t, si_perf_flags) == 0x18);
+static_assert(offsetof(compat_siginfo_t, si_band) == 0x0c);
+static_assert(offsetof(compat_siginfo_t, si_fd) == 0x10);
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 7d5d8e1f8415..478014d2e59b 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* linux/arch/sparc/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
@@ -18,16 +19,15 @@
#include <linux/smp.h>
#include <linux/binfmts.h> /* do_coredum */
#include <linux/bitops.h>
-#include <linux/tracehook.h>
+#include <linux/resume_user_mode.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/ptrace.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h> /* flush_sig_insns */
#include <asm/switch_to.h>
#include "sigutil.h"
+#include "kernel.h"
extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
void *fpqueue, unsigned long *fpqdepth);
@@ -59,27 +59,42 @@ struct rt_signal_frame {
#define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
+/* Checks if the fp is valid. We always build signal frames which are
+ * 16-byte aligned, therefore we can always enforce that the restore
+ * frame has that property as well.
+ */
+static inline bool invalid_frame_pointer(void __user *fp, int fplen)
+{
+ if ((((unsigned long) fp) & 15) || !access_ok(fp, fplen))
+ return true;
+
+ return false;
+}
+
asmlinkage void do_sigreturn(struct pt_regs *regs)
{
+ unsigned long up_psr, pc, npc, ufp;
struct signal_frame __user *sf;
- unsigned long up_psr, pc, npc;
sigset_t set;
__siginfo_fpu_t __user *fpu_save;
__siginfo_rwin_t __user *rwin_save;
int err;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv_and_exit;
- if (((unsigned long) sf) & 3)
+ if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+ goto segv_and_exit;
+
+ if (ufp & 0x7)
goto segv_and_exit;
err = __get_user(pc, &sf->info.si_regs.pc);
@@ -120,13 +135,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
return;
segv_and_exit:
- force_sig(SIGSEGV, current);
+ force_sig(SIGSEGV);
}
asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
{
struct rt_signal_frame __user *sf;
- unsigned int psr, pc, npc;
+ unsigned int psr, pc, npc, ufp;
__siginfo_fpu_t __user *fpu_save;
__siginfo_rwin_t __user *rwin_save;
sigset_t set;
@@ -134,8 +149,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
synchronize_user_stack();
sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
- (((unsigned long) sf) & 0x03))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
+ goto segv;
+
+ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+ goto segv;
+
+ if (ufp & 0x7)
goto segv;
err = __get_user(pc, &sf->regs.pc);
@@ -174,16 +194,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
set_current_blocked(&set);
return;
segv:
- force_sig(SIGSEGV, current);
-}
-
-/* Checks if the fp is valid */
-static inline int invalid_frame_pointer(void __user *fp, int fplen)
-{
- if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
- return 1;
-
- return 0;
+ force_sig(SIGSEGV);
}
static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
@@ -233,7 +244,7 @@ static int setup_frame(struct ksignal *ksig, struct pt_regs *regs,
get_sigframe(ksig, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size)) {
- do_exit(SIGILL);
+ force_exit_sig(SIGILL);
return -EINVAL;
}
@@ -325,7 +336,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
sf = (struct rt_signal_frame __user *)
get_sigframe(ksig, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size)) {
- do_exit(SIGILL);
+ force_exit_sig(SIGILL);
return -EINVAL;
}
@@ -341,7 +352,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
err |= __put_user(0, &sf->extra_size);
if (psr & PSR_EF) {
- __siginfo_fpu_t *fp = tail;
+ __siginfo_fpu_t __user *fp = tail;
tail += sizeof(*fp);
err |= save_fpu_state(regs, fp);
err |= __put_user(fp, &sf->fpu_save);
@@ -349,7 +360,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
err |= __put_user(0, &sf->fpu_save);
}
if (wsaved) {
- __siginfo_rwin_t *rwp = tail;
+ __siginfo_rwin_t __user *rwp = tail;
tail += sizeof(*rwp);
err |= save_rwin_state(wsaved, rwp);
err |= __put_user(rwp, &sf->rwin_save);
@@ -389,8 +400,8 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
else {
regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);
- /* mov __NR_sigreturn, %g1 */
- err |= __put_user(0x821020d8, &sf->insns[0]);
+ /* mov __NR_rt_sigreturn, %g1 */
+ err |= __put_user(0x82102065, &sf->insns[0]);
/* t 0x10 */
err |= __put_user(0x91d02010, &sf->insns[1]);
@@ -429,7 +440,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->pc -= 4;
@@ -462,7 +473,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
*
* %g7 is used as the "thread register". %g6 is not used in
* any fixed manner. %g6 is used as a scratch register and
- * a compiler temporary, but it's value is never used across
+ * a compiler temporary, but its value is never used across
* a system call. Therefore %g6 is usable for orig_i0 storage.
*/
if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
@@ -495,6 +506,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->pc -= 4;
regs->npc -= 4;
pt_regs_clear_syscall(regs);
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->pc -= 4;
@@ -509,17 +521,15 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
unsigned long thread_info_flags)
{
- if (thread_info_flags & _TIF_SIGPENDING)
+ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs, orig_i0);
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- }
+ if (thread_info_flags & _TIF_NOTIFY_RESUME)
+ resume_user_mode_work(regs);
}
-asmlinkage int
-do_sys_sigstack(struct sigstack __user *ssptr, struct sigstack __user *ossptr,
- unsigned long sp)
+asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr,
+ struct sigstack __user *ossptr,
+ unsigned long sp)
{
int ret = -EFAULT;
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 35923e8abd82..2d64566a1f88 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* arch/sparc64/kernel/signal.c
*
@@ -8,25 +9,22 @@
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
-#ifdef CONFIG_COMPAT
-#include <linux/compat.h> /* for compat_old_sigset_t */
-#endif
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
-#include <linux/tracehook.h>
+#include <linux/resume_user_mode.h>
#include <linux/unistd.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/bitops.h>
+#include <linux/context_tracking.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/fpumacro.h>
#include <asm/uctx.h>
#include <asm/siginfo.h>
@@ -34,22 +32,24 @@
#include <asm/switch_to.h>
#include <asm/cacheflush.h>
-#include "entry.h"
-#include "systbls.h"
#include "sigutil.h"
+#include "systbls.h"
+#include "kernel.h"
+#include "entry.h"
/* {set, get}context() needed for 64-bit SparcLinux userland. */
asmlinkage void sparc64_set_context(struct pt_regs *regs)
{
struct ucontext __user *ucp = (struct ucontext __user *)
regs->u_regs[UREG_I0];
+ enum ctx_state prev_state = exception_enter();
mc_gregset_t __user *grp;
unsigned long pc, npc, tstate;
unsigned long fp, i7;
unsigned char fenab;
int err;
- flush_user_windows();
+ synchronize_user_stack();
if (get_thread_wsaved() ||
(((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
(!__access_ok(ucp, sizeof(*ucp))))
@@ -129,16 +129,19 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
}
if (err)
goto do_sigsegv;
-
+out:
+ exception_exit(prev_state);
return;
do_sigsegv:
- force_sig(SIGSEGV, current);
+ force_sig(SIGSEGV);
+ goto out;
}
asmlinkage void sparc64_get_context(struct pt_regs *regs)
{
struct ucontext __user *ucp = (struct ucontext __user *)
regs->u_regs[UREG_I0];
+ enum ctx_state prev_state = exception_enter();
mc_gregset_t __user *grp;
mcontext_t __user *mcp;
unsigned long fp, i7;
@@ -220,10 +223,23 @@ asmlinkage void sparc64_get_context(struct pt_regs *regs)
}
if (err)
goto do_sigsegv;
-
+out:
+ exception_exit(prev_state);
return;
do_sigsegv:
- force_sig(SIGSEGV, current);
+ force_sig(SIGSEGV);
+ goto out;
+}
+
+/* Checks if the fp is valid. We always build rt signal frames which
+ * are 16-byte aligned, therefore we can always enforce that the
+ * restore frame has that property as well.
+ */
+static bool invalid_frame_pointer(void __user *fp)
+{
+ if (((unsigned long) fp) & 15)
+ return true;
+ return false;
}
struct rt_signal_frame {
@@ -238,25 +254,31 @@ struct rt_signal_frame {
void do_rt_sigreturn(struct pt_regs *regs)
{
+ unsigned long tpc, tnpc, tstate, ufp;
struct rt_signal_frame __user *sf;
- unsigned long tpc, tnpc, tstate;
__siginfo_fpu_t __user *fpu_save;
__siginfo_rwin_t __user *rwin_save;
sigset_t set;
int err;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack ();
sf = (struct rt_signal_frame __user *)
(regs->u_regs [UREG_FP] + STACK_BIAS);
/* 1. Make sure we are not getting garbage from the user */
- if (((unsigned long) sf) & 3)
+ if (invalid_frame_pointer(sf))
+ goto segv;
+
+ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
goto segv;
- err = get_user(tpc, &sf->regs.tpc);
+ if ((ufp + STACK_BIAS) & 0x7)
+ goto segv;
+
+ err = __get_user(tpc, &sf->regs.tpc);
err |= __get_user(tnpc, &sf->regs.tnpc);
if (test_thread_flag(TIF_32BIT)) {
tpc &= 0xffffffff;
@@ -297,15 +319,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
set_current_blocked(&set);
return;
segv:
- force_sig(SIGSEGV, current);
-}
-
-/* Checks if the fp is valid */
-static int invalid_frame_pointer(void __user *fp)
-{
- if (((unsigned long) fp) & 15)
- return 1;
- return 0;
+ force_sig(SIGSEGV);
}
static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
@@ -355,7 +369,11 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
get_sigframe(ksig, regs, sf_size);
if (invalid_frame_pointer (sf)) {
- do_exit(SIGILL); /* won't return, actually */
+ if (show_unhandled_signals)
+ pr_info("%s[%d] bad frame in setup_rt_frame: %016lx TPC %016lx O7 %016lx\n",
+ current->comm, current->pid, (unsigned long)sf,
+ regs->tpc, regs->u_regs[UREG_I7]);
+ force_sigsegv(ksig->sig);
return -EINVAL;
}
@@ -388,10 +406,10 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
err |= copy_to_user(&sf->mask, sigmask_to_save(), sizeof(sigset_t));
if (!wsaved) {
- err |= copy_in_user((u64 __user *)sf,
- (u64 __user *)(regs->u_regs[UREG_FP] +
- STACK_BIAS),
- sizeof(struct reg_window));
+ err |= raw_copy_in_user((u64 __user *)sf,
+ (u64 __user *)(regs->u_regs[UREG_FP] +
+ STACK_BIAS),
+ sizeof(struct reg_window));
} else {
struct reg_window *rp;
@@ -443,7 +461,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
@@ -476,7 +494,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
*
* %g7 is used as the "thread register". %g6 is not used in
* any fixed manner. %g6 is used as a scratch register and
- * a compiler temporary, but it's value is never used across
+ * a compiler temporary, but its value is never used across
* a system call. Therefore %g6 is usable for orig_i0 storage.
*/
if (pt_regs_is_syscall(regs) &&
@@ -485,7 +503,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT)) {
- extern void do_signal32(struct pt_regs *);
do_signal32(regs);
return;
}
@@ -515,6 +532,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
@@ -528,11 +546,50 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
{
- if (thread_info_flags & _TIF_SIGPENDING)
+ user_exit();
+ if (thread_info_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs, orig_i0);
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- }
+ if (thread_info_flags & _TIF_NOTIFY_RESUME)
+ resume_user_mode_work(regs);
+ user_enter();
}
+/*
+ * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
+ * changes likely come with new fields that should be added below.
+ */
+static_assert(NSIGILL == 11);
+static_assert(NSIGFPE == 15);
+static_assert(NSIGSEGV == 10);
+static_assert(NSIGBUS == 5);
+static_assert(NSIGTRAP == 6);
+static_assert(NSIGCHLD == 6);
+static_assert(NSIGSYS == 2);
+static_assert(sizeof(siginfo_t) == 128);
+static_assert(__alignof__(siginfo_t) == 8);
+static_assert(offsetof(siginfo_t, si_signo) == 0x00);
+static_assert(offsetof(siginfo_t, si_errno) == 0x04);
+static_assert(offsetof(siginfo_t, si_code) == 0x08);
+static_assert(offsetof(siginfo_t, si_pid) == 0x10);
+static_assert(offsetof(siginfo_t, si_uid) == 0x14);
+static_assert(offsetof(siginfo_t, si_tid) == 0x10);
+static_assert(offsetof(siginfo_t, si_overrun) == 0x14);
+static_assert(offsetof(siginfo_t, si_status) == 0x18);
+static_assert(offsetof(siginfo_t, si_utime) == 0x20);
+static_assert(offsetof(siginfo_t, si_stime) == 0x28);
+static_assert(offsetof(siginfo_t, si_value) == 0x18);
+static_assert(offsetof(siginfo_t, si_int) == 0x18);
+static_assert(offsetof(siginfo_t, si_ptr) == 0x18);
+static_assert(offsetof(siginfo_t, si_addr) == 0x10);
+static_assert(offsetof(siginfo_t, si_trapno) == 0x18);
+static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18);
+static_assert(offsetof(siginfo_t, si_lower) == 0x20);
+static_assert(offsetof(siginfo_t, si_upper) == 0x28);
+static_assert(offsetof(siginfo_t, si_pkey) == 0x20);
+static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
+static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
+static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
+static_assert(offsetof(siginfo_t, si_band) == 0x10);
+static_assert(offsetof(siginfo_t, si_fd) == 0x14);
diff --git a/arch/sparc/kernel/sigutil.h b/arch/sparc/kernel/sigutil.h
index d223aa432bb6..21d332d8bddd 100644
--- a/arch/sparc/kernel/sigutil.h
+++ b/arch/sparc/kernel/sigutil.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SIGUTIL_H
#define _SIGUTIL_H
diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
index 0f6eebe71e6c..f25c6daa9f52 100644
--- a/arch/sparc/kernel/sigutil_32.c
+++ b/arch/sparc/kernel/sigutil_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/thread_info.h>
@@ -48,6 +49,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
int err;
+
+ if (((unsigned long) fpu) & 3)
+ return -EFAULT;
+
#ifdef CONFIG_SMP
if (test_tsk_thread_flag(current, TIF_USEDFPU))
regs->psr &= ~PSR_EF;
@@ -60,7 +65,7 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
set_used_math();
clear_tsk_thread_flag(current, TIF_USEDFPU);
- if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu)))
+ if (!access_ok(fpu, sizeof(*fpu)))
return -EFAULT;
err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0],
@@ -97,7 +102,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
struct thread_info *t = current_thread_info();
int i, wsaved, err;
- __get_user(wsaved, &rp->wsaved);
+ if (((unsigned long) rp) & 3)
+ return -EFAULT;
+
+ get_user(wsaved, &rp->wsaved);
if (wsaved > NSWINS)
return -EFAULT;
diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
index 387834a9c56a..512e4639e4a1 100644
--- a/arch/sparc/kernel/sigutil_64.c
+++ b/arch/sparc/kernel/sigutil_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/thread_info.h>
@@ -37,7 +38,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
unsigned long fprs;
int err;
- err = __get_user(fprs, &fpu->si_fprs);
+ if (((unsigned long) fpu) & 7)
+ return -EFAULT;
+
+ err = get_user(fprs, &fpu->si_fprs);
fprs_write(0);
regs->tstate &= ~TSTATE_PEF;
if (fprs & FPRS_DL)
@@ -72,7 +76,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
struct thread_info *t = current_thread_info();
int i, wsaved, err;
- __get_user(wsaved, &rp->wsaved);
+ if (((unsigned long) rp) & 7)
+ return -EFAULT;
+
+ get_user(wsaved, &rp->wsaved);
if (wsaved > NSWINS)
return -EFAULT;
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index e3f2b81c23f1..87eaa7719fa2 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* smp.c: Sparc SMP support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -20,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/cache.h>
#include <linux/delay.h>
+#include <linux/profile.h>
#include <linux/cpu.h>
#include <asm/ptrace.h>
@@ -27,8 +29,6 @@
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
@@ -39,7 +39,7 @@
#include "kernel.h"
#include "irq.h"
-volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
+volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
cpumask_t smp_commenced_mask = CPU_MASK_NONE;
@@ -53,7 +53,7 @@ const struct sparc32_ipi_ops *sparc32_ipi_ops;
* instruction which is much better...
*/
-void __cpuinit smp_store_cpu_info(int id)
+void smp_store_cpu_info(int id)
{
int cpu_node;
int mid;
@@ -67,7 +67,7 @@ void __cpuinit smp_store_cpu_info(int id)
mid = cpu_get_hwmid(cpu_node);
if (mid < 0) {
- printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
+ printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08x", id, cpu_node);
mid = 0;
}
cpu_data(id).mid = mid;
@@ -75,8 +75,6 @@ void __cpuinit smp_store_cpu_info(int id)
void __init smp_cpus_done(unsigned int max_cpus)
{
- extern void smp4m_smp_done(void);
- extern void smp4d_smp_done(void);
unsigned long bogosum = 0;
int cpu, num = 0;
@@ -120,9 +118,9 @@ void cpu_panic(void)
panic("SMP bolixed\n");
}
-struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
+struct linux_prom_registers smp_penguin_ctable = { 0 };
-void smp_send_reschedule(int cpu)
+void arch_smp_send_reschedule(int cpu)
{
/*
* CPU model dependent way of implementing IPI generation targeting
@@ -176,15 +174,8 @@ void smp_call_function_interrupt(void)
irq_exit();
}
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- extern void __init smp4m_boot_cpus(void);
- extern void __init smp4d_boot_cpus(void);
int i, cpuid, extra;
printk("Entering SMP Mode...\n");
@@ -259,10 +250,8 @@ void __init smp_prepare_boot_cpu(void)
set_cpu_possible(cpuid, true);
}
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- extern int __cpuinit smp4m_boot_one_cpu(int, struct task_struct *);
- extern int __cpuinit smp4d_boot_one_cpu(int, struct task_struct *);
int ret=0;
switch(sparc_cpu_model) {
@@ -297,7 +286,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
return ret;
}
-void __cpuinit arch_cpu_pre_starting(void *arg)
+static void arch_cpu_pre_starting(void *arg)
{
local_ops->cache_all();
local_ops->tlb_all();
@@ -317,7 +306,7 @@ void __cpuinit arch_cpu_pre_starting(void *arg)
}
}
-void __cpuinit arch_cpu_pre_online(void *arg)
+static void arch_cpu_pre_online(void *arg)
{
unsigned int cpuid = hard_smp_processor_id();
@@ -344,7 +333,7 @@ void __cpuinit arch_cpu_pre_online(void *arg)
}
}
-void __cpuinit sparc_start_secondary(void *arg)
+static void sparc_start_secondary(void *arg)
{
unsigned int cpu;
@@ -354,12 +343,9 @@ void __cpuinit sparc_start_secondary(void *arg)
*/
arch_cpu_pre_starting(arg);
- preempt_disable();
cpu = smp_processor_id();
- /* Invoke the CPU_STARTING notifier callbacks */
notify_cpu_starting(cpu);
-
arch_cpu_pre_online(arg);
/* Set the CPU in the cpu_online_mask */
@@ -369,13 +355,13 @@ void __cpuinit sparc_start_secondary(void *arg)
local_irq_enable();
wmb();
- cpu_startup_entry(CPUHP_ONLINE);
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
/* We should never reach here! */
BUG();
}
-void __cpuinit smp_callin(void)
+void smp_callin(void)
{
sparc_start_secondary(NULL);
}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 77539eda928c..5cbd6ed5ef6f 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* smp.c: Sparc64 SMP support.
*
* Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
@@ -5,7 +6,8 @@
#include <linux/export.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/hotplug.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/threads.h>
@@ -20,11 +22,12 @@
#include <linux/cache.h>
#include <linux/jiffies.h>
#include <linux/profile.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/slab.h>
+#include <linux/kgdb.h>
#include <asm/head.h>
#include <asm/ptrace.h>
@@ -35,15 +38,16 @@
#include <asm/hvtramp.h>
#include <asm/io.h>
#include <asm/timer.h>
+#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/starfire.h>
#include <asm/tlb.h>
+#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/mdesc.h>
@@ -52,18 +56,28 @@
#include <asm/pcr.h>
#include "cpumap.h"
-
-int sparc64_multi_core __read_mostly;
+#include "kernel.h"
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
+ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+
+cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
+ [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
+
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_SYMBOL(cpu_core_map);
+EXPORT_SYMBOL(cpu_core_sib_map);
+EXPORT_SYMBOL(cpu_core_sib_cache_map);
static cpumask_t smp_commenced_mask;
+static DEFINE_PER_CPU(bool, poke);
+static bool cpu_poke;
+
void smp_info(struct seq_file *m)
{
int i;
@@ -87,7 +101,7 @@ extern void setup_sparc64_timer(void);
static volatile unsigned long callin_flag = 0;
-void __cpuinit smp_callin(void)
+void smp_callin(void)
{
int cpuid = hard_smp_processor_id();
@@ -113,7 +127,7 @@ void __cpuinit smp_callin(void)
current_thread_info()->new_child = 0;
/* Attach to the address space of init_task. */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
/* inform the notifiers about the new cpu */
@@ -123,12 +137,10 @@ void __cpuinit smp_callin(void)
rmb();
set_cpu_online(cpuid, true);
- local_irq_enable();
- /* idle thread is expected to have preempt disabled */
- preempt_disable();
+ local_irq_enable();
- cpu_startup_entry(CPUHP_ONLINE);
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
void cpu_panic(void)
@@ -150,7 +162,7 @@ void cpu_panic(void)
#define NUM_ROUNDS 64 /* magic value */
#define NUM_ITERS 5 /* likewise */
-static DEFINE_SPINLOCK(itc_sync_lock);
+static DEFINE_RAW_SPINLOCK(itc_sync_lock);
static unsigned long go[SLAVE + 1];
#define DEBUG_TICK_SYNC 0
@@ -258,7 +270,7 @@ static void smp_synchronize_one_tick(int cpu)
go[MASTER] = 0;
membar_safe("#StoreLoad");
- spin_lock_irqsave(&itc_sync_lock, flags);
+ raw_spin_lock_irqsave(&itc_sync_lock, flags);
{
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
while (!go[MASTER])
@@ -269,19 +281,12 @@ static void smp_synchronize_one_tick(int cpu)
membar_safe("#StoreLoad");
}
}
- spin_unlock_irqrestore(&itc_sync_lock, flags);
+ raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
}
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
-/* XXX Put this in some common place. XXX */
-static unsigned long kimage_addr_to_ra(void *p)
-{
- unsigned long val = (unsigned long) p;
-
- return kern_base + (val - KERNBASE);
-}
-
-static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
+static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
+ void **descrp)
{
extern unsigned long sparc64_ttable_tl0;
extern unsigned long kern_locked_tte_data;
@@ -292,9 +297,7 @@ static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread
unsigned long hv_err;
int i;
- hdesc = kzalloc(sizeof(*hdesc) +
- (sizeof(struct hvtramp_mapping) *
- num_kernel_image_mappings - 1),
+ hdesc = kzalloc(struct_size(hdesc, maps, num_kernel_image_mappings),
GFP_KERNEL);
if (!hdesc) {
printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
@@ -342,7 +345,7 @@ extern unsigned long sparc64_cpu_startup;
*/
static struct thread_info *cpu_new_thread = NULL;
-static int __cpuinit smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
+static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
{
unsigned long entry =
(unsigned long)(&sparc64_cpu_startup);
@@ -618,22 +621,48 @@ retry:
}
}
-/* Multi-cpu list version. */
+#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
+#define MONDO_USEC_WAIT_MIN 2
+#define MONDO_USEC_WAIT_MAX 100
+#define MONDO_RETRY_LIMIT 500000
+
+/* Multi-cpu list version.
+ *
+ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
+ * Sometimes not all cpus receive the mondo, requiring us to re-send
+ * the mondo until all cpus have received, or cpus are truly stuck
+ * unable to receive mondo, and we timeout.
+ * Occasionally a target cpu strand is borrowed briefly by hypervisor to
+ * perform guest service, such as PCIe error handling. Consider the
+ * service time, 1 second overall wait is reasonable for 1 cpu.
+ * Here two in-between mondo check wait time are defined: 2 usec for
+ * single cpu quick turn around and up to 100usec for large cpu count.
+ * Deliver mondo to large number of cpus could take longer, we adjusts
+ * the retry count as long as target cpus are making forward progress.
+ */
static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
{
- int retries, this_cpu, prev_sent, i, saw_cpu_error;
+ int this_cpu, tot_cpus, prev_sent, i, rem;
+ int usec_wait, retries, tot_retries;
+ u16 first_cpu = 0xffff;
+ unsigned long xc_rcvd = 0;
unsigned long status;
+ int ecpuerror_id = 0;
+ int enocpu_id = 0;
u16 *cpu_list;
+ u16 cpu;
this_cpu = smp_processor_id();
-
cpu_list = __va(tb->cpu_list_pa);
-
- saw_cpu_error = 0;
- retries = 0;
+ usec_wait = cnt * MONDO_USEC_WAIT_MIN;
+ if (usec_wait > MONDO_USEC_WAIT_MAX)
+ usec_wait = MONDO_USEC_WAIT_MAX;
+ retries = tot_retries = 0;
+ tot_cpus = cnt;
prev_sent = 0;
+
do {
- int forward_progress, n_sent;
+ int n_sent, mondo_delivered, target_cpu_busy;
status = sun4v_cpu_mondo_send(cnt,
tb->cpu_list_pa,
@@ -641,94 +670,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
/* HV_EOK means all cpus received the xcall, we're done. */
if (likely(status == HV_EOK))
- break;
+ goto xcall_done;
+
+ /* If not these non-fatal errors, panic */
+ if (unlikely((status != HV_EWOULDBLOCK) &&
+ (status != HV_ECPUERROR) &&
+ (status != HV_ENOCPU)))
+ goto fatal_errors;
/* First, see if we made any forward progress.
*
+ * Go through the cpu_list, count the target cpus that have
+ * received our mondo (n_sent), and those that did not (rem).
+ * Re-pack cpu_list with the cpus remain to be retried in the
+ * front - this simplifies tracking the truly stalled cpus.
+ *
* The hypervisor indicates successful sends by setting
* cpu list entries to the value 0xffff.
+ *
+ * EWOULDBLOCK means some target cpus did not receive the
+ * mondo and retry usually helps.
+ *
+ * ECPUERROR means at least one target cpu is in error state,
+ * it's usually safe to skip the faulty cpu and retry.
+ *
+ * ENOCPU means one of the target cpu doesn't belong to the
+ * domain, perhaps offlined which is unexpected, but not
+ * fatal and it's okay to skip the offlined cpu.
*/
+ rem = 0;
n_sent = 0;
for (i = 0; i < cnt; i++) {
- if (likely(cpu_list[i] == 0xffff))
+ cpu = cpu_list[i];
+ if (likely(cpu == 0xffff)) {
n_sent++;
+ } else if ((status == HV_ECPUERROR) &&
+ (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
+ ecpuerror_id = cpu + 1;
+ } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
+ enocpu_id = cpu + 1;
+ } else {
+ cpu_list[rem++] = cpu;
+ }
}
- forward_progress = 0;
- if (n_sent > prev_sent)
- forward_progress = 1;
+ /* No cpu remained, we're done. */
+ if (rem == 0)
+ break;
- prev_sent = n_sent;
+ /* Otherwise, update the cpu count for retry. */
+ cnt = rem;
- /* If we get a HV_ECPUERROR, then one or more of the cpus
- * in the list are in error state. Use the cpu_state()
- * hypervisor call to find out which cpus are in error state.
+ /* Record the overall number of mondos received by the
+ * first of the remaining cpus.
*/
- if (unlikely(status == HV_ECPUERROR)) {
- for (i = 0; i < cnt; i++) {
- long err;
- u16 cpu;
+ if (first_cpu != cpu_list[0]) {
+ first_cpu = cpu_list[0];
+ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
+ }
- cpu = cpu_list[i];
- if (cpu == 0xffff)
- continue;
+ /* Was any mondo delivered successfully? */
+ mondo_delivered = (n_sent > prev_sent);
+ prev_sent = n_sent;
- err = sun4v_cpu_state(cpu);
- if (err == HV_CPU_STATE_ERROR) {
- saw_cpu_error = (cpu + 1);
- cpu_list[i] = 0xffff;
- }
- }
- } else if (unlikely(status != HV_EWOULDBLOCK))
- goto fatal_mondo_error;
+ /* or, was any target cpu busy processing other mondos? */
+ target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
+ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
- /* Don't bother rewriting the CPU list, just leave the
- * 0xffff and non-0xffff entries in there and the
- * hypervisor will do the right thing.
- *
- * Only advance timeout state if we didn't make any
- * forward progress.
+ /* Retry count is for no progress. If we're making progress,
+ * reset the retry count.
*/
- if (unlikely(!forward_progress)) {
- if (unlikely(++retries > 10000))
- goto fatal_mondo_timeout;
-
- /* Delay a little bit to let other cpus catch up
- * on their cpu mondo queue work.
- */
- udelay(2 * cnt);
+ if (likely(mondo_delivered || target_cpu_busy)) {
+ tot_retries += retries;
+ retries = 0;
+ } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
+ goto fatal_mondo_timeout;
}
- } while (1);
- if (unlikely(saw_cpu_error))
- goto fatal_mondo_cpu_error;
+ /* Delay a little bit to let other cpus catch up on
+ * their cpu mondo queue work.
+ */
+ if (!mondo_delivered)
+ udelay(usec_wait);
- return;
+ retries++;
+ } while (1);
-fatal_mondo_cpu_error:
- printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
- "(including %d) were in error state\n",
- this_cpu, saw_cpu_error - 1);
+xcall_done:
+ if (unlikely(ecpuerror_id > 0)) {
+ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
+ this_cpu, ecpuerror_id - 1);
+ } else if (unlikely(enocpu_id > 0)) {
+ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
+ this_cpu, enocpu_id - 1);
+ }
return;
+fatal_errors:
+ /* fatal errors include bad alignment, etc */
+ pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
+ this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+ panic("Unexpected SUN4V mondo error %lu\n", status);
+
fatal_mondo_timeout:
- printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
- " progress after %d retries.\n",
- this_cpu, retries);
- goto dump_cpu_list_and_out;
-
-fatal_mondo_error:
- printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
- this_cpu, status);
- printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
- "mondo_block_pa(%lx)\n",
- this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
-
-dump_cpu_list_and_out:
- printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
- for (i = 0; i < cnt; i++)
- printk("%u ", cpu_list[i]);
- printk("]\n");
+ /* some cpus being non-responsive to the cpu mondo */
+ pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
+ this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
+ panic("SUN4V mondo timeout panic\n");
}
static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
@@ -821,13 +869,17 @@ void arch_send_call_function_single_ipi(int cpu)
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
+ irq_enter();
generic_smp_call_function_interrupt();
+ irq_exit();
}
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
+ irq_enter();
generic_smp_call_function_single_interrupt();
+ irq_exit();
}
static void tsb_sync(void *info)
@@ -867,25 +919,26 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
#endif
extern unsigned long xcall_flush_dcache_page_spitfire;
-#ifdef CONFIG_DEBUG_DCFLUSH
-extern atomic_t dcpage_flushes;
-extern atomic_t dcpage_flushes_xcall;
-#endif
-
-static inline void __local_flush_dcache_page(struct page *page)
+static inline void __local_flush_dcache_folio(struct folio *folio)
{
+ unsigned int i, nr = folio_nr_pages(folio);
+
#ifdef DCACHE_ALIASING_POSSIBLE
- __flush_dcache_page(page_address(page),
+ for (i = 0; i < nr; i++)
+ __flush_dcache_page(folio_address(folio) + i * PAGE_SIZE,
((tlb_type == spitfire) &&
- page_mapping(page) != NULL));
+ folio_flush_mapping(folio) != NULL));
#else
- if (page_mapping(page) != NULL &&
- tlb_type == spitfire)
- __flush_icache_page(__pa(page_address(page)));
+ if (folio_flush_mapping(folio) != NULL &&
+ tlb_type == spitfire) {
+ unsigned long pfn = folio_pfn(folio)
+ for (i = 0; i < nr; i++)
+ __flush_icache_page((pfn + i) * PAGE_SIZE);
+ }
#endif
}
-void smp_flush_dcache_page_impl(struct page *page, int cpu)
+void smp_flush_dcache_folio_impl(struct folio *folio, int cpu)
{
int this_cpu;
@@ -899,14 +952,14 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
this_cpu = get_cpu();
if (cpu == this_cpu) {
- __local_flush_dcache_page(page);
+ __local_flush_dcache_folio(folio);
} else if (cpu_online(cpu)) {
- void *pg_addr = page_address(page);
+ void *pg_addr = folio_address(folio);
u64 data0 = 0;
if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
- if (page_mapping(page) != NULL)
+ if (folio_flush_mapping(folio) != NULL)
data0 |= ((u64)1 << 32);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE
@@ -914,18 +967,23 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
#endif
}
if (data0) {
- xcall_deliver(data0, __pa(pg_addr),
- (u64) pg_addr, cpumask_of(cpu));
+ unsigned int i, nr = folio_nr_pages(folio);
+
+ for (i = 0; i < nr; i++) {
+ xcall_deliver(data0, __pa(pg_addr),
+ (u64) pg_addr, cpumask_of(cpu));
#ifdef CONFIG_DEBUG_DCFLUSH
- atomic_inc(&dcpage_flushes_xcall);
+ atomic_inc(&dcpage_flushes_xcall);
#endif
+ pg_addr += PAGE_SIZE;
+ }
}
}
put_cpu();
}
-void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+void flush_dcache_folio_all(struct mm_struct *mm, struct folio *folio)
{
void *pg_addr;
u64 data0;
@@ -939,10 +997,10 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
atomic_inc(&dcpage_flushes);
#endif
data0 = 0;
- pg_addr = page_address(page);
+ pg_addr = folio_address(folio);
if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
- if (page_mapping(page) != NULL)
+ if (folio_flush_mapping(folio) != NULL)
data0 |= ((u64)1 << 32);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE
@@ -950,50 +1008,24 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
#endif
}
if (data0) {
- xcall_deliver(data0, __pa(pg_addr),
- (u64) pg_addr, cpu_online_mask);
+ unsigned int i, nr = folio_nr_pages(folio);
+
+ for (i = 0; i < nr; i++) {
+ xcall_deliver(data0, __pa(pg_addr),
+ (u64) pg_addr, cpu_online_mask);
#ifdef CONFIG_DEBUG_DCFLUSH
- atomic_inc(&dcpage_flushes_xcall);
+ atomic_inc(&dcpage_flushes_xcall);
#endif
+ pg_addr += PAGE_SIZE;
+ }
}
- __local_flush_dcache_page(page);
+ __local_flush_dcache_folio(folio);
preempt_enable();
}
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
-{
- struct mm_struct *mm;
- unsigned long flags;
-
- clear_softint(1 << irq);
-
- /* See if we need to allocate a new TLB context because
- * the version of the one we are using is now out of date.
- */
- mm = current->active_mm;
- if (unlikely(!mm || (mm == &init_mm)))
- return;
-
- spin_lock_irqsave(&mm->context.lock, flags);
-
- if (unlikely(!CTX_VALID(mm->context)))
- get_new_mmu_context(mm);
-
- spin_unlock_irqrestore(&mm->context.lock, flags);
-
- load_secondary_context(mm);
- __flush_tlb_mm(CTX_HWBITS(mm->context),
- SECONDARY_CONTEXT);
-}
-
-void smp_new_mmu_context_version(void)
-{
- smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
-}
-
#ifdef CONFIG_KGDB
-void kgdb_roundup_cpus(unsigned long flags)
+void kgdb_roundup_cpus(void)
{
smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
}
@@ -1018,38 +1050,9 @@ void smp_fetch_global_pmu(void)
* are flush_tlb_*() routines, and these run after flush_cache_*()
* which performs the flushw.
*
- * The SMP TLB coherency scheme we use works as follows:
- *
- * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
- * space has (potentially) executed on, this is the heuristic
- * we use to avoid doing cross calls.
- *
- * Also, for flushing from kswapd and also for clones, we
- * use cpu_vm_mask as the list of cpus to make run the TLB.
- *
- * 2) TLB context numbers are shared globally across all processors
- * in the system, this allows us to play several games to avoid
- * cross calls.
- *
- * One invariant is that when a cpu switches to a process, and
- * that processes tsk->active_mm->cpu_vm_mask does not have the
- * current cpu's bit set, that tlb context is flushed locally.
- *
- * If the address space is non-shared (ie. mm->count == 1) we avoid
- * cross calls when we want to flush the currently running process's
- * tlb state. This is done by clearing all cpu bits except the current
- * processor's in current->mm->cpu_vm_mask and performing the
- * flush locally only. This will force any subsequent cpus which run
- * this task to flush the context from the local tlb if the process
- * migrates to another cpu (again).
- *
- * 3) For shared address spaces (threads) and swapping we bite the
- * bullet for most cases and perform the cross call (but only to
- * the cpus listed in cpu_vm_mask).
- *
- * The performance gain from "optimizing" away the cross call for threads is
- * questionable (in theory the big win for threads is the massive sharing of
- * address space state across processors).
+ * mm->cpu_vm_mask is a bit mask of which cpus an address
+ * space has (potentially) executed on, this is the heuristic
+ * we use to limit cross calls.
*/
/* This currently is only used by the hugetlb arch pre-fault
@@ -1059,18 +1062,13 @@ void smp_fetch_global_pmu(void)
void smp_flush_tlb_mm(struct mm_struct *mm)
{
u32 ctx = CTX_HWBITS(mm->context);
- int cpu = get_cpu();
- if (atomic_read(&mm->mm_users) == 1) {
- cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
- goto local_flush_and_out;
- }
+ get_cpu();
smp_cross_call_masked(&xcall_flush_tlb_mm,
ctx, 0, 0,
mm_cpumask(mm));
-local_flush_and_out:
__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
put_cpu();
@@ -1093,17 +1091,15 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
{
u32 ctx = CTX_HWBITS(mm->context);
struct tlb_pending_info info;
- int cpu = get_cpu();
+
+ get_cpu();
info.ctx = ctx;
info.nr = nr;
info.vaddrs = vaddrs;
- if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
- cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
- else
- smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
- &info, 1);
+ smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
+ &info, 1);
__flush_tlb_pending(ctx, nr, vaddrs);
@@ -1113,14 +1109,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
{
unsigned long context = CTX_HWBITS(mm->context);
- int cpu = get_cpu();
- if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
- cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
- else
- smp_cross_call_masked(&xcall_flush_tlb_page,
- context, vaddr, 0,
- mm_cpumask(mm));
+ get_cpu();
+
+ smp_cross_call_masked(&xcall_flush_tlb_page,
+ context, vaddr, 0,
+ mm_cpumask(mm));
+
__flush_tlb_page(context, vaddr);
put_cpu();
@@ -1148,7 +1143,7 @@ static unsigned long penguins_are_doing_time;
void smp_capture(void)
{
- int result = atomic_add_ret(1, &smp_capture_depth);
+ int result = atomic_add_return(1, &smp_capture_depth);
if (result == 1) {
int ncpus = num_online_cpus();
@@ -1205,20 +1200,10 @@ void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
preempt_enable();
}
-/* /proc/profile writes can call this, don't __init it please. */
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
void __init smp_prepare_cpus(unsigned int max_cpus)
{
}
-void smp_prepare_boot_cpu(void)
-{
-}
-
void __init smp_setup_processor_id(void)
{
if (tlb_type == spitfire)
@@ -1249,6 +1234,19 @@ void smp_fill_in_sib_core_maps(void)
}
}
+ for_each_present_cpu(i) {
+ unsigned int j;
+
+ for_each_present_cpu(j) {
+ if (cpu_data(i).max_cache_id ==
+ cpu_data(j).max_cache_id)
+ cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
+
+ if (cpu_data(i).sock_id == cpu_data(j).sock_id)
+ cpumask_set_cpu(j, &cpu_core_sib_map[i]);
+ }
+ }
+
for_each_present_cpu(i) {
unsigned int j;
@@ -1266,7 +1264,7 @@ void smp_fill_in_sib_core_maps(void)
}
}
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret = smp_boot_one_cpu(cpu, tidle);
@@ -1393,13 +1391,88 @@ void __cpu_die(unsigned int cpu)
void __init smp_cpus_done(unsigned int max_cpus)
{
- pcr_arch_init();
}
-void smp_send_reschedule(int cpu)
+static void send_cpu_ipi(int cpu)
{
- xcall_deliver((u64) &xcall_receive_signal, 0, 0,
- cpumask_of(cpu));
+ xcall_deliver((u64) &xcall_receive_signal,
+ 0, 0, cpumask_of(cpu));
+}
+
+void scheduler_poke(void)
+{
+ if (!cpu_poke)
+ return;
+
+ if (!__this_cpu_read(poke))
+ return;
+
+ __this_cpu_write(poke, false);
+ set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
+}
+
+static unsigned long send_cpu_poke(int cpu)
+{
+ unsigned long hv_err;
+
+ per_cpu(poke, cpu) = true;
+ hv_err = sun4v_cpu_poke(cpu);
+ if (hv_err != HV_EOK) {
+ per_cpu(poke, cpu) = false;
+ pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n",
+ __func__, hv_err);
+ }
+
+ return hv_err;
+}
+
+void arch_smp_send_reschedule(int cpu)
+{
+ if (cpu == smp_processor_id()) {
+ WARN_ON_ONCE(preemptible());
+ set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
+ return;
+ }
+
+ /* Use cpu poke to resume idle cpu if supported. */
+ if (cpu_poke && idle_cpu(cpu)) {
+ unsigned long ret;
+
+ ret = send_cpu_poke(cpu);
+ if (ret == HV_EOK)
+ return;
+ }
+
+ /* Use IPI in following cases:
+ * - cpu poke not supported
+ * - cpu not idle
+ * - send_cpu_poke() returns with error
+ */
+ send_cpu_ipi(cpu);
+}
+
+void smp_init_cpu_poke(void)
+{
+ unsigned long major;
+ unsigned long minor;
+ int ret;
+
+ if (tlb_type != hypervisor)
+ return;
+
+ ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor);
+ if (ret) {
+ pr_debug("HV_GRP_CORE is not registered\n");
+ return;
+ }
+
+ if (major == 1 && minor >= 6) {
+ /* CPU POKE is registered. */
+ cpu_poke = true;
+ return;
+ }
+
+ pr_debug("CPU_POKE not supported\n");
}
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
@@ -1408,55 +1481,39 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
scheduler_ipi();
}
-/* This is a nop because we capture all other cpus
- * anyways when making the PROM active.
- */
-void smp_send_stop(void)
+static void stop_this_cpu(void *dummy)
{
+ set_cpu_online(smp_processor_id(), false);
+ prom_stopself();
}
-/**
- * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
- * @cpu: cpu to allocate for
- * @size: size allocation in bytes
- * @align: alignment
- *
- * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
- * does the right thing for NUMA regardless of the current
- * configuration.
- *
- * RETURNS:
- * Pointer to the allocated area on success, NULL on failure.
- */
-static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
- size_t align)
+void smp_send_stop(void)
{
- const unsigned long goal = __pa(MAX_DMA_ADDRESS);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
- int node = cpu_to_node(cpu);
- void *ptr;
-
- if (!node_online(node) || !NODE_DATA(node)) {
- ptr = __alloc_bootmem(size, align, goal);
- pr_info("cpu %d has no node %d or node-local memory\n",
- cpu, node);
- pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
- cpu, size, __pa(ptr));
- } else {
- ptr = __alloc_bootmem_node(NODE_DATA(node),
- size, align, goal);
- pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
- "%016lx\n", cpu, size, node, __pa(ptr));
- }
- return ptr;
-#else
- return __alloc_bootmem(size, align, goal);
+ int cpu;
+
+ if (tlb_type == hypervisor) {
+ int this_cpu = smp_processor_id();
+#ifdef CONFIG_SERIAL_SUNHV
+ sunhv_migrate_hvcons_irq(this_cpu);
#endif
-}
+ for_each_online_cpu(cpu) {
+ if (cpu == this_cpu)
+ continue;
-static void __init pcpu_free_bootmem(void *ptr, size_t size)
-{
- free_bootmem(__pa(ptr), size);
+ set_cpu_online(cpu, false);
+#ifdef CONFIG_SUN_LDOMS
+ if (ldom_domaining_enabled) {
+ unsigned long hv_err;
+ hv_err = sun4v_cpu_stop(cpu);
+ if (hv_err)
+ printk(KERN_ERR "sun4v_cpu_stop() "
+ "failed err=%lu\n", hv_err);
+ } else
+#endif
+ prom_stopcpu_cpuid(cpu);
+ }
+ } else
+ smp_call_function(stop_this_cpu, NULL, 0);
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
@@ -1467,27 +1524,9 @@ static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
return REMOTE_DISTANCE;
}
-static void __init pcpu_populate_pte(unsigned long addr)
+static int __init pcpu_cpu_to_node(int cpu)
{
- pgd_t *pgd = pgd_offset_k(addr);
- pud_t *pud;
- pmd_t *pmd;
-
- pud = pud_offset(pgd, addr);
- if (pud_none(*pud)) {
- pmd_t *new;
-
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
- pud_populate(&init_mm, pud, new);
- }
-
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- pte_t *new;
-
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
- pmd_populate_kernel(&init_mm, pmd, new);
- }
+ return cpu_to_node(cpu);
}
void __init setup_per_cpu_areas(void)
@@ -1500,18 +1539,15 @@ void __init setup_per_cpu_areas(void)
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, 4 << 20,
pcpu_cpu_distance,
- pcpu_alloc_bootmem,
- pcpu_free_bootmem);
+ pcpu_cpu_to_node);
if (rc)
- pr_warning("PERCPU: %s allocator failed (%d), "
- "falling back to page size\n",
- pcpu_fc_names[pcpu_chosen_fc], rc);
+ pr_warn("PERCPU: %s allocator failed (%d), "
+ "falling back to page size\n",
+ pcpu_fc_names[pcpu_chosen_fc], rc);
}
if (rc < 0)
rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
- pcpu_alloc_bootmem,
- pcpu_free_bootmem,
- pcpu_populate_pte);
+ pcpu_cpu_to_node);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
new file mode 100644
index 000000000000..09aa69e422e5
--- /dev/null
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -0,0 +1,12 @@
+/*
+ * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+
+/* This is needed only for drivers/sbus/char/openprom.c */
+EXPORT_SYMBOL(saved_command_line);
diff --git a/arch/sparc/kernel/sparc_ksyms_32.c b/arch/sparc/kernel/sparc_ksyms_32.c
deleted file mode 100644
index e521c54560f9..000000000000
--- a/arch/sparc/kernel/sparc_ksyms_32.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/delay.h>
-#include <asm/head.h>
-#include <asm/dma.h>
-
-struct poll {
- int fd;
- short events;
- short revents;
-};
-
-/* from entry.S */
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__ndelay);
-
-/* from head_32.S */
-EXPORT_SYMBOL(__ret_efault);
-EXPORT_SYMBOL(empty_zero_page);
-
-/* Exporting a symbol from /init/main.c */
-EXPORT_SYMBOL(saved_command_line);
diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c
deleted file mode 100644
index 9f5e24ddcc70..000000000000
--- a/arch/sparc/kernel/sparc_ksyms_64.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
- *
- * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
- * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
- */
-
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-
-#include <asm/cpudata.h>
-#include <asm/uaccess.h>
-#include <asm/spitfire.h>
-#include <asm/oplib.h>
-#include <asm/hypervisor.h>
-#include <asm/cacheflush.h>
-
-struct poll {
- int fd;
- short events;
- short revents;
-};
-
-/* from helpers.S */
-EXPORT_SYMBOL(__flushw_user);
-EXPORT_SYMBOL_GPL(real_hard_smp_processor_id);
-
-/* from head_64.S */
-EXPORT_SYMBOL(__ret_efault);
-EXPORT_SYMBOL(tlb_type);
-EXPORT_SYMBOL(sun4v_chip_type);
-EXPORT_SYMBOL(prom_root_node);
-
-/* from hvcalls.S */
-EXPORT_SYMBOL(sun4v_niagara_getperf);
-EXPORT_SYMBOL(sun4v_niagara_setperf);
-EXPORT_SYMBOL(sun4v_niagara2_getperf);
-EXPORT_SYMBOL(sun4v_niagara2_setperf);
-
-/* from hweight.S */
-EXPORT_SYMBOL(__arch_hweight8);
-EXPORT_SYMBOL(__arch_hweight16);
-EXPORT_SYMBOL(__arch_hweight32);
-EXPORT_SYMBOL(__arch_hweight64);
-
-/* from ffs_ffz.S */
-EXPORT_SYMBOL(ffs);
-EXPORT_SYMBOL(__ffs);
-
-/* Exporting a symbol from /init/main.c */
-EXPORT_SYMBOL(saved_command_line);
diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
index c357e40ffd01..5427af44099a 100644
--- a/arch/sparc/kernel/spiterrs.S
+++ b/arch/sparc/kernel/spiterrs.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* We need to carefully read the error status, ACK the errors,
* prevent recursive traps, and pass the information on to C
* code for logging.
@@ -85,7 +86,7 @@ __spitfire_cee_trap_continue:
ba,pt %xcc, etraptl1
rd %pc, %g7
- ba,pt %xcc, 2f
+ ba,a,pt %xcc, 2f
nop
1: ba,pt %xcc, etrap_irq
@@ -100,8 +101,7 @@ __spitfire_cee_trap_continue:
mov %l5, %o2
call spitfire_access_error
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_access_error,.-__spitfire_access_error
/* This is the trap handler entry point for ECC correctable
@@ -179,8 +179,7 @@ __spitfire_data_access_exception_tl1:
mov %l5, %o2
call spitfire_data_access_exception_tl1
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
.type __spitfire_data_access_exception,#function
@@ -200,8 +199,7 @@ __spitfire_data_access_exception:
mov %l5, %o2
call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_data_access_exception,.-__spitfire_data_access_exception
.type __spitfire_insn_access_exception_tl1,#function
@@ -220,8 +218,7 @@ __spitfire_insn_access_exception_tl1:
mov %l5, %o2
call spitfire_insn_access_exception_tl1
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
.type __spitfire_insn_access_exception,#function
@@ -240,6 +237,5 @@ __spitfire_insn_access_exception:
mov %l5, %o2
call spitfire_insn_access_exception
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception
diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c
index c59af546f522..3bcc4ddc6911 100644
--- a/arch/sparc/kernel/sstate.c
+++ b/arch/sparc/kernel/sstate.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sstate.c: System soft state support.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
@@ -5,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
@@ -43,8 +45,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
"Linux powering off";
static const char rebooting_msg[32] __attribute__((aligned(32))) =
"Linux rebooting";
-static const char panicing_msg[32] __attribute__((aligned(32))) =
- "Linux panicing";
+static const char panicking_msg[32] __attribute__((aligned(32))) =
+ "Linux panicking";
static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
{
@@ -76,7 +78,7 @@ static struct notifier_block sstate_reboot_notifier = {
static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
{
- do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
+ do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
return NOTIFY_DONE;
}
diff --git a/arch/sparc/kernel/stacktrace.c b/arch/sparc/kernel/stacktrace.c
index e78386a0029f..d8eb1d149f9f 100644
--- a/arch/sparc/kernel/stacktrace.c
+++ b/arch/sparc/kernel/stacktrace.c
@@ -1,4 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
#include <linux/sched.h>
+#include <linux/sched/debug.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <linux/ftrace.h>
@@ -56,9 +58,11 @@ static void __save_stack_trace(struct thread_info *tp,
trace->entries[trace->nr_entries++] = pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
- int index = t->curr_ret_stack;
- if (t->ret_stack && index >= graph) {
- pc = t->ret_stack[index - graph].ret;
+ struct ftrace_ret_stack *ret_stack;
+ ret_stack = ftrace_graph_get_ret_stack(t,
+ graph);
+ if (ret_stack) {
+ pc = ret_stack->ret;
if (trace->nr_entries <
trace->max_entries)
trace->entries[trace->nr_entries++] = pc;
diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c
index 82281a566bb8..b8cd57d9182b 100644
--- a/arch/sparc/kernel/starfire.c
+++ b/arch/sparc/kernel/starfire.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* starfire.c: Starfire/E10000 support.
*
@@ -28,11 +29,6 @@ void check_if_starfire(void)
this_is_starfire = 1;
}
-int starfire_hard_smp_processor_id(void)
-{
- return upa_readl(0x1fff40000d0UL);
-}
-
/*
* Each Starfire board has 32 registers which perform translation
* and delivery of traditional interrupt packets into the extended
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index f8933be3ca8b..9a137c70e8d1 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SS1000/SC2000 interrupt handling.
*
@@ -143,7 +144,7 @@ static void sun4d_sbus_handler_irq(int sbusl)
}
}
-void sun4d_handler_irq(int pil, struct pt_regs *regs)
+void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs)
{
struct pt_regs *old_regs;
/* SBUS IRQ level (1 - 7) */
@@ -188,7 +189,7 @@ void sun4d_handler_irq(int pil, struct pt_regs *regs)
static void sun4d_mask_irq(struct irq_data *data)
{
- struct sun4d_handler_data *handler_data = data->handler_data;
+ struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data);
unsigned int real_irq;
#ifdef CONFIG_SMP
int cpuid = handler_data->cpuid;
@@ -206,7 +207,7 @@ static void sun4d_mask_irq(struct irq_data *data)
static void sun4d_unmask_irq(struct irq_data *data)
{
- struct sun4d_handler_data *handler_data = data->handler_data;
+ struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data);
unsigned int real_irq;
#ifdef CONFIG_SMP
int cpuid = handler_data->cpuid;
@@ -236,7 +237,7 @@ static void sun4d_shutdown_irq(struct irq_data *data)
irq_unlink(data->irq);
}
-struct irq_chip sun4d_irq = {
+static struct irq_chip sun4d_irq = {
.name = "sun4d",
.irq_startup = sun4d_startup_irq,
.irq_shutdown = sun4d_shutdown_irq,
@@ -285,9 +286,9 @@ static void __init sun4d_load_profile_irqs(void)
}
}
-unsigned int _sun4d_build_device_irq(unsigned int real_irq,
- unsigned int pil,
- unsigned int board)
+static unsigned int _sun4d_build_device_irq(unsigned int real_irq,
+ unsigned int pil,
+ unsigned int board)
{
struct sun4d_handler_data *handler_data;
unsigned int irq;
@@ -320,8 +321,8 @@ err_out:
-unsigned int sun4d_build_device_irq(struct platform_device *op,
- unsigned int real_irq)
+static unsigned int sun4d_build_device_irq(struct platform_device *op,
+ unsigned int real_irq)
{
struct device_node *dp = op->dev.of_node;
struct device_node *board_parent, *bus = dp->parent;
@@ -334,12 +335,12 @@ unsigned int sun4d_build_device_irq(struct platform_device *op,
irq = real_irq;
while (bus) {
- if (!strcmp(bus->name, "sbi")) {
+ if (of_node_name_eq(bus, "sbi")) {
bus_connection = "io-unit";
break;
}
- if (!strcmp(bus->name, "bootbus")) {
+ if (of_node_name_eq(bus, "bootbus")) {
bus_connection = "cpu-unit";
break;
}
@@ -359,16 +360,16 @@ unsigned int sun4d_build_device_irq(struct platform_device *op,
* If Bus nodes parent is not io-unit/cpu-unit or the io-unit/cpu-unit
* lacks a "board#" property, something is very wrong.
*/
- if (!bus->parent || strcmp(bus->parent->name, bus_connection)) {
- printk(KERN_ERR "%s: Error, parent is not %s.\n",
- bus->full_name, bus_connection);
+ if (!of_node_name_eq(bus->parent, bus_connection)) {
+ printk(KERN_ERR "%pOF: Error, parent is not %s.\n",
+ bus, bus_connection);
goto err_out;
}
board_parent = bus->parent;
board = of_getintprop_default(board_parent, "board#", -1);
if (board == -1) {
- printk(KERN_ERR "%s: Error, lacks board# property.\n",
- board_parent->full_name);
+ printk(KERN_ERR "%pOF: Error, lacks board# property.\n",
+ board_parent);
goto err_out;
}
@@ -383,7 +384,8 @@ err_out:
return irq;
}
-unsigned int sun4d_build_timer_irq(unsigned int board, unsigned int real_irq)
+static unsigned int sun4d_build_timer_irq(unsigned int board,
+ unsigned int real_irq)
{
return _sun4d_build_device_irq(real_irq, real_irq, board);
}
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index c9eb82f23d92..9a62a5cf3337 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Sparc SS1000/SC2000 SMP support.
*
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -10,7 +11,7 @@
#include <linux/interrupt.h>
#include <linux/profile.h>
#include <linux/delay.h>
-#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h>
@@ -50,7 +51,7 @@ static inline void show_leds(int cpuid)
"i" (ASI_M_CTL));
}
-void __cpuinit sun4d_cpu_pre_starting(void *arg)
+void sun4d_cpu_pre_starting(void *arg)
{
int cpuid = hard_smp_processor_id();
@@ -62,7 +63,7 @@ void __cpuinit sun4d_cpu_pre_starting(void *arg)
cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
}
-void __cpuinit sun4d_cpu_pre_online(void *arg)
+void sun4d_cpu_pre_online(void *arg)
{
unsigned long flags;
int cpuid;
@@ -93,7 +94,7 @@ void __cpuinit sun4d_cpu_pre_online(void *arg)
show_leds(cpuid);
/* Attach to the address space of init_task. */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
local_ops->cache_all();
@@ -118,7 +119,7 @@ void __init smp4d_boot_cpus(void)
local_ops->cache_all();
}
-int __cpuinit smp4d_boot_one_cpu(int i, struct task_struct *idle)
+int smp4d_boot_one_cpu(int i, struct task_struct *idle)
{
unsigned long *entry = &sun4d_cpu_startup;
int timeout;
@@ -204,7 +205,7 @@ static void __init smp4d_ipi_init(void)
void sun4d_ipi_interrupt(void)
{
- struct sun4d_ipi_work *work = &__get_cpu_var(sun4d_ipi_work);
+ struct sun4d_ipi_work *work = this_cpu_ptr(&sun4d_ipi_work);
if (work->single) {
work->single = 0;
@@ -267,7 +268,7 @@ static void sun4d_ipi_resched(int cpu)
}
static struct smp_funcall {
- smpfunc_t func;
+ void *func;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
@@ -280,7 +281,7 @@ static struct smp_funcall {
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
-static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+static void sun4d_cross_call(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
@@ -295,7 +296,7 @@ static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
* If you make changes here, make sure
* gcc generates proper code...
*/
- register smpfunc_t f asm("i0") = func;
+ register void *f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
register unsigned long a3 asm("i3") = arg3;
@@ -352,11 +353,13 @@ static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
/* Running cross calls. */
void smp4d_cross_call_irq(void)
{
+ void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
+ unsigned long) = ccall_info.func;
int i = hard_smp_processor_id();
ccall_info.processors_in[i] = 1;
- ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
- ccall_info.arg4, ccall_info.arg5);
+ func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
+ ccall_info.arg5);
ccall_info.processors_out[i] = 1;
}
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
index c5ade9d27a1d..1079638986b5 100644
--- a/arch/sparc/kernel/sun4m_irq.c
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* sun4m irq support
*
@@ -9,10 +10,12 @@
* Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
*/
+#include <linux/slab.h>
+#include <linux/sched/debug.h>
+#include <linux/pgtable.h>
+
#include <asm/timer.h>
#include <asm/traps.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
@@ -186,9 +189,10 @@ static unsigned long sun4m_imask[0x50] = {
static void sun4m_mask_irq(struct irq_data *data)
{
- struct sun4m_handler_data *handler_data = data->handler_data;
+ struct sun4m_handler_data *handler_data;
int cpu = smp_processor_id();
+ handler_data = irq_data_get_irq_handler_data(data);
if (handler_data->mask) {
unsigned long flags;
@@ -204,9 +208,10 @@ static void sun4m_mask_irq(struct irq_data *data)
static void sun4m_unmask_irq(struct irq_data *data)
{
- struct sun4m_handler_data *handler_data = data->handler_data;
+ struct sun4m_handler_data *handler_data;
int cpu = smp_processor_id();
+ handler_data = irq_data_get_irq_handler_data(data);
if (handler_data->mask) {
unsigned long flags;
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 8a65f158153d..056df034e79e 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* sun4m SMP support.
*
@@ -8,7 +9,7 @@
#include <linux/interrupt.h>
#include <linux/profile.h>
#include <linux/delay.h>
-#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h>
@@ -34,11 +35,11 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val)
return val;
}
-void __cpuinit sun4m_cpu_pre_starting(void *arg)
+void sun4m_cpu_pre_starting(void *arg)
{
}
-void __cpuinit sun4m_cpu_pre_online(void *arg)
+void sun4m_cpu_pre_online(void *arg)
{
int cpuid = hard_smp_processor_id();
@@ -59,7 +60,7 @@ void __cpuinit sun4m_cpu_pre_online(void *arg)
: "memory" /* paranoid */);
/* Attach to the address space of init_task. */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
@@ -75,7 +76,7 @@ void __init smp4m_boot_cpus(void)
local_ops->cache_all();
}
-int __cpuinit smp4m_boot_one_cpu(int i, struct task_struct *idle)
+int smp4m_boot_one_cpu(int i, struct task_struct *idle)
{
unsigned long *entry = &sun4m_cpu_startup;
int timeout;
@@ -156,7 +157,7 @@ static void sun4m_ipi_mask_one(int cpu)
}
static struct smp_funcall {
- smpfunc_t func;
+ void *func;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
@@ -169,7 +170,7 @@ static struct smp_funcall {
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
-static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+static void sun4m_cross_call(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
@@ -229,11 +230,13 @@ static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
/* Running cross calls. */
void smp4m_cross_call_irq(void)
{
+ void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
+ unsigned long) = ccall_info.func;
int i = smp_processor_id();
ccall_info.processors_in[i] = 1;
- ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
- ccall_info.arg4, ccall_info.arg5);
+ func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
+ ccall_info.arg5);
ccall_info.processors_out[i] = 1;
}
@@ -247,7 +250,7 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
ce = &per_cpu(sparc32_clockevent, cpu);
- if (ce->mode & CLOCK_EVT_MODE_PERIODIC)
+ if (clockevent_state_periodic(ce))
sun4m_clear_profile_irq(cpu);
else
sparc_config.load_profile_irq(cpu, 0); /* Is this needless? */
diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
index 559bc5e9c199..6478ef4f6a15 100644
--- a/arch/sparc/kernel/sun4v_ivec.S
+++ b/arch/sparc/kernel/sun4v_ivec.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* sun4v_ivec.S: Sun4v interrupt vector handling.
*
* Copyright (C) 2006 <davem@davemloft.net>
@@ -26,6 +27,21 @@ sun4v_cpu_mondo:
ldxa [%g0] ASI_SCRATCHPAD, %g4
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
+ /* Get smp_processor_id() into %g3 */
+ sethi %hi(trap_block), %g5
+ or %g5, %lo(trap_block), %g5
+ sub %g4, %g5, %g3
+ srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
+
+ /* Increment cpu_mondo_counter[smp_processor_id()] */
+ sethi %hi(cpu_mondo_counter), %g5
+ or %g5, %lo(cpu_mondo_counter), %g5
+ sllx %g3, 3, %g3
+ add %g5, %g3, %g5
+ ldx [%g5], %g3
+ add %g3, 1, %g3
+ stx %g3, [%g5]
+
/* Get CPU mondo queue base phys address into %g7. */
ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
diff --git a/arch/sparc/kernel/sun4v_mcd.S b/arch/sparc/kernel/sun4v_mcd.S
new file mode 100644
index 000000000000..a419b7318406
--- /dev/null
+++ b/arch/sparc/kernel/sun4v_mcd.S
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* sun4v_mcd.S: Sun4v memory corruption detected precise exception handler
+ *
+ * Copyright (c) 2015 Oracle and/or its affiliates. All rights reserved.
+ * Authors: Bob Picco <bob.picco@oracle.com>,
+ * Khalid Aziz <khalid.aziz@oracle.com>
+ */
+ .text
+ .align 32
+
+sun4v_mcd_detect_precise:
+ mov %l4, %o1
+ mov %l5, %o2
+ call sun4v_mem_corrupt_detect_precise
+ add %sp, PTREGS_OFF, %o0
+ ba,a,pt %xcc, rtrap
+ nop
diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S
index bde867fd71e8..7ac9f3367674 100644
--- a/arch/sparc/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc/kernel/sun4v_tlb_miss.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* sun4v_tlb_miss.S: Sun4v TLB miss handlers.
*
* Copyright (C) 2006 <davem@davemloft.net>
@@ -182,7 +183,7 @@ sun4v_tsb_miss_common:
cmp %g5, -1
be,pt %xcc, 80f
nop
- COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7)
+ COMPUTE_TSB_PTR(%g5, %g4, REAL_HPAGE_SHIFT, %g2, %g7)
/* That clobbered %g2, reload it. */
ldxa [%g0] ASI_SCRATCHPAD, %g2
@@ -195,6 +196,11 @@ sun4v_tsb_miss_common:
ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
sun4v_itlb_error:
+ rdpr %tl, %g1
+ cmp %g1, 1
+ ble,pt %icc, sun4v_bad_ra
+ or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_ITLB, %g1
+
sethi %hi(sun4v_err_itlb_vaddr), %g1
stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)]
sethi %hi(sun4v_err_itlb_ctx), %g1
@@ -206,15 +212,10 @@ sun4v_itlb_error:
sethi %hi(sun4v_err_itlb_error), %g1
stx %o0, [%g1 + %lo(sun4v_err_itlb_error)]
+ sethi %hi(1f), %g7
rdpr %tl, %g4
- cmp %g4, 1
- ble,pt %icc, 1f
- sethi %hi(2f), %g7
ba,pt %xcc, etraptl1
- or %g7, %lo(2f), %g7
-
-1: ba,pt %xcc, etrap
-2: or %g7, %lo(2b), %g7
+1: or %g7, %lo(1f), %g7
mov %l4, %o1
call sun4v_itlb_error_report
add %sp, PTREGS_OFF, %o0
@@ -222,6 +223,11 @@ sun4v_itlb_error:
/* NOTREACHED */
sun4v_dtlb_error:
+ rdpr %tl, %g1
+ cmp %g1, 1
+ ble,pt %icc, sun4v_bad_ra
+ or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_DTLB, %g1
+
sethi %hi(sun4v_err_dtlb_vaddr), %g1
stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)]
sethi %hi(sun4v_err_dtlb_ctx), %g1
@@ -233,21 +239,23 @@ sun4v_dtlb_error:
sethi %hi(sun4v_err_dtlb_error), %g1
stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)]
+ sethi %hi(1f), %g7
rdpr %tl, %g4
- cmp %g4, 1
- ble,pt %icc, 1f
- sethi %hi(2f), %g7
ba,pt %xcc, etraptl1
- or %g7, %lo(2f), %g7
-
-1: ba,pt %xcc, etrap
-2: or %g7, %lo(2b), %g7
+1: or %g7, %lo(1f), %g7
mov %l4, %o1
call sun4v_dtlb_error_report
add %sp, PTREGS_OFF, %o0
/* NOTREACHED */
+sun4v_bad_ra:
+ or %g0, %g4, %g5
+ ba,pt %xcc, sparc64_realfault_common
+ or %g1, %g0, %g4
+
+ /* NOTREACHED */
+
/* Instruction Access Exception, tl0. */
sun4v_iacc:
ldxa [%g0] ASI_SCRATCHPAD, %g2
@@ -345,6 +353,7 @@ sun4v_mna:
call sun4v_do_mna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
+ nop
/* Privileged Action. */
sun4v_privact:
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index f7c72b6efc27..a3d308f2043e 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* sys32.S: I-cache tricks for 32-bit compatibility layer simple
* conversions.
@@ -12,266 +13,8 @@
.text
-#define SIGN1(STUB,SYSCALL,REG1) \
- .align 32; \
- .globl STUB; \
-STUB: sethi %hi(SYSCALL), %g1; \
- jmpl %g1 + %lo(SYSCALL), %g0; \
- sra REG1, 0, REG1
-
-#define SIGN2(STUB,SYSCALL,REG1,REG2) \
- .align 32; \
- .globl STUB; \
-STUB: sethi %hi(SYSCALL), %g1; \
- sra REG1, 0, REG1; \
- jmpl %g1 + %lo(SYSCALL), %g0; \
- sra REG2, 0, REG2
-
-#define SIGN3(STUB,SYSCALL,REG1,REG2,REG3) \
- .align 32; \
- .globl STUB; \
-STUB: sra REG1, 0, REG1; \
- sethi %hi(SYSCALL), %g1; \
- sra REG2, 0, REG2; \
- jmpl %g1 + %lo(SYSCALL), %g0; \
- sra REG3, 0, REG3
-
-SIGN1(sys32_readahead, compat_sys_readahead, %o0)
-SIGN2(sys32_fadvise64, compat_sys_fadvise64, %o0, %o4)
-SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5)
-SIGN1(sys32_clock_nanosleep, compat_sys_clock_nanosleep, %o1)
-SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
-SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
-SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
-SIGN1(sys32_select, compat_sys_select, %o0)
-SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
-SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
-SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
-SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
-
.globl sys32_mmap2
sys32_mmap2:
sethi %hi(sys_mmap), %g1
jmpl %g1 + %lo(sys_mmap), %g0
sllx %o5, 12, %o5
-
- .align 32
- .globl sys32_socketcall
-sys32_socketcall: /* %o0=call, %o1=args */
- cmp %o0, 1
- bl,pn %xcc, do_einval
- cmp %o0, 18
- bg,pn %xcc, do_einval
- sub %o0, 1, %o0
- sllx %o0, 5, %o0
- sethi %hi(__socketcall_table_begin), %g2
- or %g2, %lo(__socketcall_table_begin), %g2
- jmpl %g2 + %o0, %g0
- nop
-do_einval:
- retl
- mov -EINVAL, %o0
-
- .align 32
-__socketcall_table_begin:
-
- /* Each entry is exactly 32 bytes. */
-do_sys_socket: /* sys_socket(int, int, int) */
-1: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_socket), %g1
-2: ldswa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_socket), %g0
-3: ldswa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
-4: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_bind), %g1
-5: ldswa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_bind), %g0
-6: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
-7: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_connect), %g1
-8: ldswa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_connect), %g0
-9: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_listen: /* sys_listen(int, int) */
-10: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_listen), %g1
- jmpl %g1 + %lo(sys_listen), %g0
-11: ldswa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
- nop
-do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
-12: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_accept), %g1
-13: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_accept), %g0
-14: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
-15: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_getsockname), %g1
-16: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_getsockname), %g0
-17: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
-18: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_getpeername), %g1
-19: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(sys_getpeername), %g0
-20: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
-21: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_socketpair), %g1
-22: ldswa [%o1 + 0x8] %asi, %o2
-23: lduwa [%o1 + 0xc] %asi, %o3
- jmpl %g1 + %lo(sys_socketpair), %g0
-24: ldswa [%o1 + 0x4] %asi, %o1
- nop
- nop
-do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
-25: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_send), %g1
-26: lduwa [%o1 + 0x8] %asi, %o2
-27: lduwa [%o1 + 0xc] %asi, %o3
- jmpl %g1 + %lo(sys_send), %g0
-28: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
-do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
-29: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_recv), %g1
-30: lduwa [%o1 + 0x8] %asi, %o2
-31: lduwa [%o1 + 0xc] %asi, %o3
- jmpl %g1 + %lo(sys_recv), %g0
-32: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
-do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
-33: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_sendto), %g1
-34: lduwa [%o1 + 0x8] %asi, %o2
-35: lduwa [%o1 + 0xc] %asi, %o3
-36: lduwa [%o1 + 0x10] %asi, %o4
-37: ldswa [%o1 + 0x14] %asi, %o5
- jmpl %g1 + %lo(sys_sendto), %g0
-38: lduwa [%o1 + 0x4] %asi, %o1
-do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
-39: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_recvfrom), %g1
-40: lduwa [%o1 + 0x8] %asi, %o2
-41: lduwa [%o1 + 0xc] %asi, %o3
-42: lduwa [%o1 + 0x10] %asi, %o4
-43: lduwa [%o1 + 0x14] %asi, %o5
- jmpl %g1 + %lo(sys_recvfrom), %g0
-44: lduwa [%o1 + 0x4] %asi, %o1
-do_sys_shutdown: /* sys_shutdown(int, int) */
-45: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_shutdown), %g1
- jmpl %g1 + %lo(sys_shutdown), %g0
-46: ldswa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
- nop
-do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */
-47: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(compat_sys_setsockopt), %g1
-48: ldswa [%o1 + 0x8] %asi, %o2
-49: lduwa [%o1 + 0xc] %asi, %o3
-50: ldswa [%o1 + 0x10] %asi, %o4
- jmpl %g1 + %lo(compat_sys_setsockopt), %g0
-51: ldswa [%o1 + 0x4] %asi, %o1
- nop
-do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */
-52: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(compat_sys_getsockopt), %g1
-53: ldswa [%o1 + 0x8] %asi, %o2
-54: lduwa [%o1 + 0xc] %asi, %o3
-55: lduwa [%o1 + 0x10] %asi, %o4
- jmpl %g1 + %lo(compat_sys_getsockopt), %g0
-56: ldswa [%o1 + 0x4] %asi, %o1
- nop
-do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
-57: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(compat_sys_sendmsg), %g1
-58: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(compat_sys_sendmsg), %g0
-59: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
-60: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(compat_sys_recvmsg), %g1
-61: lduwa [%o1 + 0x8] %asi, %o2
- jmpl %g1 + %lo(compat_sys_recvmsg), %g0
-62: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
- nop
-do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
-63: ldswa [%o1 + 0x0] %asi, %o0
- sethi %hi(sys_accept4), %g1
-64: lduwa [%o1 + 0x8] %asi, %o2
-65: ldswa [%o1 + 0xc] %asi, %o3
- jmpl %g1 + %lo(sys_accept4), %g0
-66: lduwa [%o1 + 0x4] %asi, %o1
- nop
- nop
-
- .section __ex_table,"a"
- .align 4
- .word 1b, __retl_efault, 2b, __retl_efault
- .word 3b, __retl_efault, 4b, __retl_efault
- .word 5b, __retl_efault, 6b, __retl_efault
- .word 7b, __retl_efault, 8b, __retl_efault
- .word 9b, __retl_efault, 10b, __retl_efault
- .word 11b, __retl_efault, 12b, __retl_efault
- .word 13b, __retl_efault, 14b, __retl_efault
- .word 15b, __retl_efault, 16b, __retl_efault
- .word 17b, __retl_efault, 18b, __retl_efault
- .word 19b, __retl_efault, 20b, __retl_efault
- .word 21b, __retl_efault, 22b, __retl_efault
- .word 23b, __retl_efault, 24b, __retl_efault
- .word 25b, __retl_efault, 26b, __retl_efault
- .word 27b, __retl_efault, 28b, __retl_efault
- .word 29b, __retl_efault, 30b, __retl_efault
- .word 31b, __retl_efault, 32b, __retl_efault
- .word 33b, __retl_efault, 34b, __retl_efault
- .word 35b, __retl_efault, 36b, __retl_efault
- .word 37b, __retl_efault, 38b, __retl_efault
- .word 39b, __retl_efault, 40b, __retl_efault
- .word 41b, __retl_efault, 42b, __retl_efault
- .word 43b, __retl_efault, 44b, __retl_efault
- .word 45b, __retl_efault, 46b, __retl_efault
- .word 47b, __retl_efault, 48b, __retl_efault
- .word 49b, __retl_efault, 50b, __retl_efault
- .word 51b, __retl_efault, 52b, __retl_efault
- .word 53b, __retl_efault, 54b, __retl_efault
- .word 55b, __retl_efault, 56b, __retl_efault
- .word 57b, __retl_efault, 58b, __retl_efault
- .word 59b, __retl_efault, 60b, __retl_efault
- .word 61b, __retl_efault, 62b, __retl_efault
- .word 63b, __retl_efault, 64b, __retl_efault
- .word 65b, __retl_efault, 66b, __retl_efault
- .previous
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index 3d0ddbc005fe..f84a02ab6bf9 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -21,7 +22,6 @@
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/uio.h>
-#include <linux/nfs_fs.h>
#include <linux/quota.h>
#include <linux/poll.h>
#include <linux/personality.h>
@@ -44,25 +44,21 @@
#include <linux/slab.h>
#include <asm/types.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/fpumacro.h>
#include <asm/mmu_context.h>
#include <asm/compat_signal.h>
-asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
+#include "systbls.h"
+
+COMPAT_SYSCALL_DEFINE3(truncate64, const char __user *, path, u32, high, u32, low)
{
- if ((int)high < 0)
- return -EINVAL;
- else
- return sys_truncate(path, (high << 32) | low);
+ return ksys_truncate(path, ((u64)high << 32) | low);
}
-asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
+COMPAT_SYSCALL_DEFINE3(ftruncate64, unsigned int, fd, u32, high, u32, low)
{
- if ((int)high < 0)
- return -EINVAL;
- else
- return sys_ftruncate(fd, (high << 32) | low);
+ return ksys_ftruncate(fd, ((u64)high << 32) | low);
}
static int cp_compat_stat64(struct kstat *stat,
@@ -95,8 +91,8 @@ static int cp_compat_stat64(struct kstat *stat,
return err;
}
-asmlinkage long compat_sys_stat64(const char __user * filename,
- struct compat_stat64 __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(stat64, const char __user *, filename,
+ struct compat_stat64 __user *, statbuf)
{
struct kstat stat;
int error = vfs_stat(filename, &stat);
@@ -106,8 +102,8 @@ asmlinkage long compat_sys_stat64(const char __user * filename,
return error;
}
-asmlinkage long compat_sys_lstat64(const char __user * filename,
- struct compat_stat64 __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(lstat64, const char __user *, filename,
+ struct compat_stat64 __user *, statbuf)
{
struct kstat stat;
int error = vfs_lstat(filename, &stat);
@@ -117,8 +113,8 @@ asmlinkage long compat_sys_lstat64(const char __user * filename,
return error;
}
-asmlinkage long compat_sys_fstat64(unsigned int fd,
- struct compat_stat64 __user * statbuf)
+COMPAT_SYSCALL_DEFINE2(fstat64, unsigned int, fd,
+ struct compat_stat64 __user *, statbuf)
{
struct kstat stat;
int error = vfs_fstat(fd, &stat);
@@ -128,9 +124,9 @@ asmlinkage long compat_sys_fstat64(unsigned int fd,
return error;
}
-asmlinkage long compat_sys_fstatat64(unsigned int dfd,
- const char __user *filename,
- struct compat_stat64 __user * statbuf, int flag)
+COMPAT_SYSCALL_DEFINE4(fstatat64, unsigned int, dfd,
+ const char __user *, filename,
+ struct compat_stat64 __user *, statbuf, int, flag)
{
struct kstat stat;
int error;
@@ -157,7 +153,6 @@ COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig,
{
struct k_sigaction new_ka, old_ka;
int ret;
- compat_sigset_t set32;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(compat_sigset_t))
@@ -169,10 +164,9 @@ COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig,
new_ka.ka_restorer = restorer;
ret = get_user(u_handler, &act->sa_handler);
new_ka.sa.sa_handler = compat_ptr(u_handler);
- ret |= __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t));
- sigset_from_compat(&new_ka.sa.sa_mask, &set32);
- ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- ret |= __get_user(u_restorer, &act->sa_restorer);
+ ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
+ ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ ret |= get_user(u_restorer, &act->sa_restorer);
new_ka.sa.sa_restorer = compat_ptr(u_restorer);
if (ret)
return -EFAULT;
@@ -181,11 +175,11 @@ COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig,
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
- sigset_to_compat(&set32, &old_ka.sa.sa_mask);
ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
- ret |= __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t));
- ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
+ ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
+ sizeof(oact->sa_mask));
+ ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
if (ret)
ret = -EFAULT;
}
@@ -193,62 +187,51 @@ COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig,
return ret;
}
-asmlinkage compat_ssize_t sys32_pread64(unsigned int fd,
- char __user *ubuf,
- compat_size_t count,
- unsigned long poshi,
- unsigned long poslo)
+COMPAT_SYSCALL_DEFINE5(pread64, unsigned int, fd, char __user *, ubuf,
+ compat_size_t, count, u32, poshi, u32, poslo)
{
- return sys_pread64(fd, ubuf, count, (poshi << 32) | poslo);
+ return ksys_pread64(fd, ubuf, count, ((u64)poshi << 32) | poslo);
}
-asmlinkage compat_ssize_t sys32_pwrite64(unsigned int fd,
- char __user *ubuf,
- compat_size_t count,
- unsigned long poshi,
- unsigned long poslo)
+COMPAT_SYSCALL_DEFINE5(pwrite64, unsigned int, fd, char __user *, ubuf,
+ compat_size_t, count, u32, poshi, u32, poslo)
{
- return sys_pwrite64(fd, ubuf, count, (poshi << 32) | poslo);
+ return ksys_pwrite64(fd, ubuf, count, ((u64)poshi << 32) | poslo);
}
-asmlinkage long compat_sys_readahead(int fd,
- unsigned long offhi,
- unsigned long offlo,
- compat_size_t count)
+COMPAT_SYSCALL_DEFINE4(readahead, int, fd, u32, offhi, u32, offlo,
+ compat_size_t, count)
{
- return sys_readahead(fd, (offhi << 32) | offlo, count);
+ return ksys_readahead(fd, ((u64)offhi << 32) | offlo, count);
}
-long compat_sys_fadvise64(int fd,
- unsigned long offhi,
- unsigned long offlo,
- compat_size_t len, int advice)
+COMPAT_SYSCALL_DEFINE5(fadvise64, int, fd, u32, offhi, u32, offlo,
+ compat_size_t, len, int, advice)
{
- return sys_fadvise64_64(fd, (offhi << 32) | offlo, len, advice);
+ return ksys_fadvise64_64(fd, ((u64)offhi << 32) | offlo, len, advice);
}
-long compat_sys_fadvise64_64(int fd,
- unsigned long offhi, unsigned long offlo,
- unsigned long lenhi, unsigned long lenlo,
- int advice)
+COMPAT_SYSCALL_DEFINE6(fadvise64_64, int, fd, u32, offhi, u32, offlo,
+ u32, lenhi, u32, lenlo, int, advice)
{
- return sys_fadvise64_64(fd,
- (offhi << 32) | offlo,
- (lenhi << 32) | lenlo,
- advice);
+ return ksys_fadvise64_64(fd,
+ ((u64)offhi << 32) | offlo,
+ ((u64)lenhi << 32) | lenlo,
+ advice);
}
-long sys32_sync_file_range(unsigned int fd, unsigned long off_high, unsigned long off_low, unsigned long nb_high, unsigned long nb_low, unsigned int flags)
+COMPAT_SYSCALL_DEFINE6(sync_file_range, unsigned int, fd, u32, off_high, u32, off_low,
+ u32, nb_high, u32, nb_low, unsigned int, flags)
{
- return sys_sync_file_range(fd,
- (off_high << 32) | off_low,
- (nb_high << 32) | nb_low,
- flags);
+ return ksys_sync_file_range(fd,
+ ((u64)off_high << 32) | off_low,
+ ((u64)nb_high << 32) | nb_low,
+ flags);
}
-asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
- u32 lenhi, u32 lenlo)
+COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, u32, offhi, u32, offlo,
+ u32, lenhi, u32, lenlo)
{
- return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
- ((loff_t)lenhi << 32) | lenlo);
+ return ksys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
+ ((loff_t)lenhi << 32) | lenlo);
}
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 3a8d1844402e..fb31bc0c5b48 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* linux/arch/sparc/kernel/sys_sparc.c
*
* This file contains various random system calls that
@@ -7,7 +8,9 @@
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/file.h>
@@ -20,29 +23,36 @@
#include <linux/utsname.h>
#include <linux/smp.h>
#include <linux/ipc.h>
+#include <linux/hugetlb.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unistd.h>
+#include "systbls.h"
+
/* #define DEBUG_UNIMP_SYSCALL */
/* XXX Make this per-binary type, this way we can detect the type of
* XXX a binary. Every Sparc executable calls this very early on.
*/
-asmlinkage unsigned long sys_getpagesize(void)
+SYSCALL_DEFINE0(getpagesize)
{
return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
}
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
{
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
+ bool file_hugepage = false;
+
+ if (filp && is_file_hugepages(filp))
+ file_hugepage = true;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) &&
+ if (!file_hugepage && (flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
@@ -54,13 +64,16 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
if (!addr)
addr = TASK_UNMAPPED_BASE;
- info.flags = 0;
info.length = len;
info.low_limit = addr;
info.high_limit = TASK_SIZE;
- info.align_mask = (flags & MAP_SHARED) ?
- (PAGE_MASK & (SHMLBA - 1)) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
+ if (!file_hugepage) {
+ info.align_mask = (flags & MAP_SHARED) ?
+ (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ } else {
+ info.align_mask = huge_page_mask_align(filp);
+ }
return vm_unmapped_area(&info);
}
@@ -68,7 +81,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way unix traditionally does this, though.
*/
-asmlinkage int sparc_pipe(struct pt_regs *regs)
+SYSCALL_DEFINE0(sparc_pipe)
{
int fd[2];
int error;
@@ -76,7 +89,7 @@ asmlinkage int sparc_pipe(struct pt_regs *regs)
error = do_pipe_flags(fd, 0);
if (error)
goto out;
- regs->u_regs[UREG_I1] = fd[1];
+ current_pt_regs()->u_regs[UREG_I1] = fd[1];
error = fd[0];
out:
return error;
@@ -93,27 +106,27 @@ int sparc_mmap_check(unsigned long addr, unsigned long len)
/* Linux version of mmap */
-asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags, unsigned long fd,
- unsigned long pgoff)
+SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long, fd,
+ unsigned long, pgoff)
{
/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
we have. */
- return sys_mmap_pgoff(addr, len, prot, flags, fd,
- pgoff >> (PAGE_SHIFT - 12));
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT - 12));
}
-asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags, unsigned long fd,
- unsigned long off)
+SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long, fd,
+ unsigned long, off)
{
/* no alignment check? */
- return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
}
-long sparc_remap_file_pages(unsigned long start, unsigned long size,
- unsigned long prot, unsigned long pgoff,
- unsigned long flags)
+SYSCALL_DEFINE5(sparc_remap_file_pages, unsigned long, start, unsigned long, size,
+ unsigned long, prot, unsigned long, pgoff,
+ unsigned long, flags)
{
/* This works on an existing mmap so we don't need to validate
* the range as that was done at the original mmap call.
@@ -122,11 +135,10 @@ long sparc_remap_file_pages(unsigned long start, unsigned long size,
(pgoff >> (PAGE_SHIFT - 12)), flags);
}
-/* we come to here via sys_nis_syscall so it can setup the regs argument */
-asmlinkage unsigned long
-c_sys_nis_syscall (struct pt_regs *regs)
+SYSCALL_DEFINE0(nis_syscall)
{
static int count = 0;
+ struct pt_regs *regs = current_pt_regs();
if (count++ > 5)
return -ENOSYS;
@@ -143,17 +155,11 @@ c_sys_nis_syscall (struct pt_regs *regs)
asmlinkage void
sparc_breakpoint (struct pt_regs *regs)
{
- siginfo_t info;
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
#endif
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- info.si_code = TRAP_BRKPT;
- info.si_addr = (void __user *)regs->pc;
- info.si_trapno = 0;
- force_sig_info(SIGTRAP, &info, current);
+ force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc);
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
@@ -197,25 +203,29 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
return ret;
}
-asmlinkage int sys_getdomainname(char __user *name, int len)
+SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
{
- int nlen, err;
-
+ int nlen, err;
+ char tmp[__NEW_UTS_LEN + 1];
+
if (len < 0)
return -EINVAL;
- down_read(&uts_sem);
-
+ down_read(&uts_sem);
+
nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL;
if (nlen > len)
- goto out;
+ goto out_unlock;
+ memcpy(tmp, utsname()->domainname, nlen);
- err = -EFAULT;
- if (!copy_to_user(name, utsname()->domainname, nlen))
- err = 0;
+ up_read(&uts_sem);
-out:
+ if (copy_to_user(name, tmp, nlen))
+ return -EFAULT;
+ return 0;
+
+out_unlock:
up_read(&uts_sem);
return err;
}
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 51561b8b15ba..dbf118b40601 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* linux/arch/sparc64/kernel/sys_sparc.c
*
* This file contains various random system calls that
@@ -7,7 +8,9 @@
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
@@ -24,24 +27,25 @@
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/export.h>
+#include <linux/context_tracking.h>
+#include <linux/timex.h>
+#include <linux/uaccess.h>
+#include <linux/hugetlb.h>
-#include <asm/uaccess.h>
#include <asm/utrap.h>
#include <asm/unistd.h>
#include "entry.h"
+#include "kernel.h"
#include "systbls.h"
/* #define DEBUG_UNIMP_SYSCALL */
-asmlinkage unsigned long sys_getpagesize(void)
+SYSCALL_DEFINE0(getpagesize)
{
return PAGE_SIZE;
}
-#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
-#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
-
/* Does addr --> addr+len fall within 4GB of the VA-space hole or
* overflow past the end of the 64-bit address space?
*/
@@ -84,19 +88,33 @@ static inline unsigned long COLOR_ALIGN(unsigned long addr,
return base + off;
}
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+static unsigned long get_align_mask(struct file *filp, unsigned long flags)
+{
+ if (filp && is_file_hugepages(filp))
+ return huge_page_mask_align(filp);
+ if (filp || (flags & MAP_SHARED))
+ return PAGE_MASK & (SHMLBA - 1);
+
+ return 0;
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
unsigned long task_size = TASK_SIZE;
int do_color_align;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
+ bool file_hugepage = false;
+
+ if (filp && is_file_hugepages(filp))
+ file_hugepage = true;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) &&
+ if (!file_hugepage && (flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
@@ -108,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
return -ENOMEM;
do_color_align = 0;
- if (filp || (flags & MAP_SHARED))
+ if ((filp || (flags & MAP_SHARED)) && !file_hugepage)
do_color_align = 1;
if (addr) {
@@ -119,16 +137,16 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
- info.flags = 0;
info.length = len;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = min(task_size, VA_EXCLUDE_START);
- info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
+ info.align_mask = get_align_mask(filp, flags);
+ if (!file_hugepage)
+ info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
@@ -144,23 +162,27 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
+ const unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long task_size = STACK_TOP32;
unsigned long addr = addr0;
int do_color_align;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
+ bool file_hugepage = false;
/* This should only ever run for 32-bit processes. */
BUG_ON(!test_thread_flag(TIF_32BIT));
+ if (filp && is_file_hugepages(filp))
+ file_hugepage = true;
+
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) &&
+ if (!file_hugepage && (flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
@@ -170,7 +192,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return -ENOMEM;
do_color_align = 0;
- if (filp || (flags & MAP_SHARED))
+ if ((filp || (flags & MAP_SHARED)) && !file_hugepage)
do_color_align = 1;
/* requesting a specific address */
@@ -182,7 +204,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -190,8 +212,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
- info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
+ info.align_mask = get_align_mask(filp, flags);
+ if (!file_hugepage)
+ info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
/*
@@ -215,14 +238,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
unsigned long align_goal, addr = -ENOMEM;
- unsigned long (*get_area)(struct file *, unsigned long,
- unsigned long, unsigned long, unsigned long);
-
- get_area = current->mm->get_unmapped_area;
if (flags & MAP_FIXED) {
/* Ok, don't mess with it. */
- return get_area(NULL, orig_addr, len, pgoff, flags);
+ return mm_get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
}
flags &= ~MAP_SHARED;
@@ -235,7 +254,8 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
align_goal = (64UL * 1024);
do {
- addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
+ addr = mm_get_unmapped_area(NULL, orig_addr,
+ len + (align_goal - PAGE_SIZE), pgoff, flags);
if (!(addr & ~PAGE_MASK)) {
addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
break;
@@ -253,7 +273,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
* be obtained.
*/
if (addr & ~PAGE_MASK)
- addr = get_area(NULL, orig_addr, len, pgoff, flags);
+ addr = mm_get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
return addr;
}
@@ -265,7 +285,7 @@ static unsigned long mmap_rnd(void)
unsigned long rnd = 0UL;
if (current->flags & PF_RANDOMIZE) {
- unsigned long val = get_random_int();
+ unsigned long val = get_random_long();
if (test_thread_flag(TIF_32BIT))
rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
else
@@ -274,7 +294,7 @@ static unsigned long mmap_rnd(void)
return rnd << PAGE_SHIFT;
}
-void arch_pick_mmap_layout(struct mm_struct *mm)
+void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
unsigned long random_factor = mmap_rnd();
unsigned long gap;
@@ -283,13 +303,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
* Fall back to the standard layout if the personality
* bit is set, or if the expected stack growth is unlimited:
*/
- gap = rlimit(RLIMIT_STACK);
+ gap = rlim_stack->rlim_cur;
if (!test_thread_flag(TIF_32BIT) ||
(current->personality & ADDR_COMPAT_LAYOUT) ||
gap == RLIM_INFINITY ||
sysctl_legacy_va_layout) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
- mm->get_unmapped_area = arch_get_unmapped_area;
+ mm_flags_clear(MMF_TOPDOWN, mm);
} else {
/* We know it's 32-bit */
unsigned long task_size = STACK_TOP32;
@@ -300,7 +320,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
gap = (task_size / 6 * 5);
mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm_flags_set(MMF_TOPDOWN, mm);
}
}
@@ -308,7 +328,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way unix traditionally does this, though.
*/
-SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs)
+SYSCALL_DEFINE0(sparc_pipe)
{
int fd[2];
int error;
@@ -316,7 +336,7 @@ SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs)
error = do_pipe_flags(fd, 0);
if (error)
goto out;
- regs->u_regs[UREG_I1] = fd[1];
+ current_pt_regs()->u_regs[UREG_I1] = fd[1];
error = fd[0];
out:
return error;
@@ -333,25 +353,28 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
{
long err;
+ if (!IS_ENABLED(CONFIG_SYSVIPC))
+ return -ENOSYS;
+
/* No need for backward compatibility. We can start fresh... */
- if (call <= SEMCTL) {
+ if (call <= SEMTIMEDOP) {
switch (call) {
case SEMOP:
- err = sys_semtimedop(first, ptr,
- (unsigned)second, NULL);
+ err = ksys_semtimedop(first, ptr,
+ (unsigned int)second, NULL);
goto out;
case SEMTIMEDOP:
- err = sys_semtimedop(first, ptr, (unsigned)second,
- (const struct timespec __user *)
- (unsigned long) fifth);
+ err = ksys_semtimedop(first, ptr, (unsigned int)second,
+ (const struct __kernel_timespec __user *)
+ (unsigned long) fifth);
goto out;
case SEMGET:
- err = sys_semget(first, (int)second, (int)third);
+ err = ksys_semget(first, (int)second, (int)third);
goto out;
case SEMCTL: {
- err = sys_semctl(first, second,
- (int)third | IPC_64,
- (unsigned long) ptr);
+ err = ksys_old_semctl(first, second,
+ (int)third | IPC_64,
+ (unsigned long) ptr);
goto out;
}
default:
@@ -362,18 +385,18 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
if (call <= MSGCTL) {
switch (call) {
case MSGSND:
- err = sys_msgsnd(first, ptr, (size_t)second,
+ err = ksys_msgsnd(first, ptr, (size_t)second,
(int)third);
goto out;
case MSGRCV:
- err = sys_msgrcv(first, ptr, (size_t)second, fifth,
+ err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
(int)third);
goto out;
case MSGGET:
- err = sys_msgget((key_t)first, (int)second);
+ err = ksys_msgget((key_t)first, (int)second);
goto out;
case MSGCTL:
- err = sys_msgctl(first, (int)second | IPC_64, ptr);
+ err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
goto out;
default:
err = -ENOSYS;
@@ -393,13 +416,13 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
goto out;
}
case SHMDT:
- err = sys_shmdt(ptr);
+ err = ksys_shmdt(ptr);
goto out;
case SHMGET:
- err = sys_shmget(first, (size_t)second, (int)third);
+ err = ksys_shmget(first, (size_t)second, (int)third);
goto out;
case SHMCTL:
- err = sys_shmctl(first, (int)second | IPC_64, ptr);
+ err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
goto out;
default:
err = -ENOSYS;
@@ -414,7 +437,7 @@ out:
SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
{
- int ret;
+ long ret;
if (personality(current->personality) == PER_LINUX32 &&
personality(personality) == PER_LINUX)
@@ -456,7 +479,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
goto out;
if (off & ~PAGE_MASK)
goto out;
- retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+ retval = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
out:
return retval;
}
@@ -478,10 +501,10 @@ SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
return sys_mremap(addr, old_len, new_len, flags, new_addr);
}
-/* we come to here via sys_nis_syscall so it can setup the regs argument */
-asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
+SYSCALL_DEFINE0(nis_syscall)
{
static int count;
+ struct pt_regs *regs = current_pt_regs();
/* Don't make the system unusable, if someone goes stuck */
if (count++ > 5)
@@ -499,7 +522,7 @@ asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
asmlinkage void sparc_breakpoint(struct pt_regs *regs)
{
- siginfo_t info;
+ enum ctx_state prev_state = exception_enter();
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
@@ -508,42 +531,97 @@ asmlinkage void sparc_breakpoint(struct pt_regs *regs)
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
#endif
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- info.si_code = TRAP_BRKPT;
- info.si_addr = (void __user *)regs->tpc;
- info.si_trapno = 0;
- force_sig_info(SIGTRAP, &info, current);
+ force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->tpc);
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
#endif
+ exception_exit(prev_state);
}
-extern void check_pending(int signum);
-
SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
{
- int nlen, err;
+ int nlen, err;
+ char tmp[__NEW_UTS_LEN + 1];
if (len < 0)
return -EINVAL;
- down_read(&uts_sem);
-
+ down_read(&uts_sem);
+
nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL;
if (nlen > len)
- goto out;
+ goto out_unlock;
+ memcpy(tmp, utsname()->domainname, nlen);
- err = -EFAULT;
- if (!copy_to_user(name, utsname()->domainname, nlen))
- err = 0;
+ up_read(&uts_sem);
-out:
+ if (copy_to_user(name, tmp, nlen))
+ return -EFAULT;
+ return 0;
+
+out_unlock:
up_read(&uts_sem);
return err;
}
+SYSCALL_DEFINE1(sparc_adjtimex, struct __kernel_timex __user *, txc_p)
+{
+ struct __kernel_timex txc;
+ struct __kernel_old_timeval *tv = (void *)&txc.time;
+ int ret;
+
+ /* Copy the user data space into the kernel copy
+ * structure. But bear in mind that the structures
+ * may change
+ */
+ if (copy_from_user(&txc, txc_p, sizeof(txc)))
+ return -EFAULT;
+
+ /*
+ * override for sparc64 specific timeval type: tv_usec
+ * is 32 bit wide instead of 64-bit in __kernel_timex
+ */
+ txc.time.tv_usec = tv->tv_usec;
+ ret = do_adjtimex(&txc);
+ tv->tv_usec = txc.time.tv_usec;
+
+ return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret;
+}
+
+SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock,
+ struct __kernel_timex __user *, txc_p)
+{
+ struct __kernel_timex txc;
+ struct __kernel_old_timeval *tv = (void *)&txc.time;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_POSIX_TIMERS)) {
+ pr_err_once("process %d (%s) attempted a POSIX timer syscall "
+ "while CONFIG_POSIX_TIMERS is not set\n",
+ current->pid, current->comm);
+
+ return -ENOSYS;
+ }
+
+ /* Copy the user data space into the kernel copy
+ * structure. But bear in mind that the structures
+ * may change
+ */
+ if (copy_from_user(&txc, txc_p, sizeof(txc)))
+ return -EFAULT;
+
+ /*
+ * override for sparc64 specific timeval type: tv_usec
+ * is 32 bit wide instead of 64-bit in __kernel_timex
+ */
+ txc.time.tv_usec = tv->tv_usec;
+ ret = do_clock_adjtime(which_clock, &txc);
+ tv->tv_usec = txc.time.tv_usec;
+
+ return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret;
+}
+
SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
utrap_handler_t, new_p, utrap_handler_t, new_d,
utrap_handler_t __user *, old_p,
@@ -569,7 +647,8 @@ SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
}
if (!current_thread_info()->utraps) {
current_thread_info()->utraps =
- kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
+ kcalloc(UT_TRAP_INSTRUCTION_31 + 1, sizeof(long),
+ GFP_KERNEL);
if (!current_thread_info()->utraps)
return -ENOMEM;
current_thread_info()->utraps[0] = 1;
@@ -579,8 +658,9 @@ SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
unsigned long *p = current_thread_info()->utraps;
current_thread_info()->utraps =
- kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
- GFP_KERNEL);
+ kmalloc_array(UT_TRAP_INSTRUCTION_31 + 1,
+ sizeof(long),
+ GFP_KERNEL);
if (!current_thread_info()->utraps) {
current_thread_info()->utraps = p;
return -ENOMEM;
@@ -604,9 +684,9 @@ SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
return 0;
}
-asmlinkage long sparc_memory_ordering(unsigned long model,
- struct pt_regs *regs)
+SYSCALL_DEFINE1(memory_ordering, unsigned long, model)
{
+ struct pt_regs *regs = current_pt_regs();
if (model >= 3)
return -EINVAL;
regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
@@ -640,7 +720,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
return ret;
}
-asmlinkage long sys_kern_features(void)
+SYSCALL_DEFINE0(kern_features)
{
return KERN_FEATURE_MIXED_MODE_STACK;
}
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 22a1098961f5..0e8ab0602c36 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* SunOS's execv() call only specifies the argv argument, the
* environment settings are the same as the calling processes.
*/
@@ -6,6 +7,11 @@ sys64_execve:
jmpl %g1, %g0
flushw
+sys64_execveat:
+ set sys_execveat, %g1
+ jmpl %g1, %g0
+ flushw
+
#ifdef CONFIG_COMPAT
sunos_execv:
mov %g0, %o2
@@ -13,18 +19,14 @@ sys32_execve:
set compat_sys_execve, %g1
jmpl %g1, %g0
flushw
+
+sys32_execveat:
+ set compat_sys_execveat, %g1
+ jmpl %g1, %g0
+ flushw
#endif
.align 32
-sys_sparc_pipe:
- ba,pt %xcc, sys_sparc_pipe_real
- add %sp, PTREGS_OFF, %o0
-sys_nis_syscall:
- ba,pt %xcc, c_sys_nis_syscall
- add %sp, PTREGS_OFF, %o0
-sys_memory_ordering:
- ba,pt %xcc, sparc_memory_ordering
- add %sp, PTREGS_OFF, %o1
#ifdef CONFIG_COMPAT
sys32_sigstack:
ba,pt %xcc, do_sys32_sigstack
@@ -52,7 +54,7 @@ sys32_rt_sigreturn:
#endif
.align 32
1: ldx [%g6 + TI_FLAGS], %l5
- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
+ andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
be,pt %icc, rtrap
nop
call syscall_trace_leave
@@ -84,22 +86,25 @@ sys32_rt_sigreturn:
* during system calls...
*/
.align 32
-sys_vfork: /* Under Linux, vfork and fork are just special cases of clone. */
- sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
- or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
- ba,pt %xcc, sys_clone
+sys_vfork:
+ flushw
+ ba,pt %xcc, sparc_vfork
+ add %sp, PTREGS_OFF, %o0
+
+ .align 32
sys_fork:
- clr %o1
- mov SIGCHLD, %o0
+ flushw
+ ba,pt %xcc, sparc_fork
+ add %sp, PTREGS_OFF, %o0
+
+ .align 32
sys_clone:
flushw
- movrz %o1, %fp, %o1
- mov 0, %o3
- ba,pt %xcc, sparc_do_fork
- add %sp, PTREGS_OFF, %o2
+ ba,pt %xcc, sparc_clone
+ add %sp, PTREGS_OFF, %o0
- .globl ret_from_syscall
-ret_from_syscall:
+ .globl ret_from_fork
+ret_from_fork:
/* Clear current_thread_info()->new_child. */
stb %g0, [%g6 + TI_NEW_CHILD]
call schedule_tail
@@ -148,11 +153,29 @@ linux_syscall_trace32:
add %sp, PTREGS_OFF, %o0
brnz,pn %o0, 3f
mov -ENOSYS, %o0
+
+ /* Syscall tracing can modify the registers. */
+ ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
+ sethi %hi(sys_call_table32), %l7
+ ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
+ or %l7, %lo(sys_call_table32), %l7
+ ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
+ ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
+ ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
+ ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
+ ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
+
+ cmp %g1, NR_syscalls
+ bgeu,pn %xcc, 3f
+ mov -ENOSYS, %o0
+
+ sll %g1, 2, %l4
srl %i0, 0, %o0
+ lduw [%l7 + %l4], %l7
srl %i4, 0, %o4
srl %i1, 0, %o1
srl %i2, 0, %o2
- ba,pt %xcc, 2f
+ ba,pt %xcc, 5f
srl %i3, 0, %o3
linux_syscall_trace:
@@ -160,7 +183,25 @@ linux_syscall_trace:
add %sp, PTREGS_OFF, %o0
brnz,pn %o0, 3f
mov -ENOSYS, %o0
+
+ /* Syscall tracing can modify the registers. */
+ ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
+ sethi %hi(sys_call_table64), %l7
+ ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
+ or %l7, %lo(sys_call_table64), %l7
+ ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
+ ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
+ ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
+ ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
+ ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
+
+ cmp %g1, NR_syscalls
+ bgeu,pn %xcc, 3f
+ mov -ENOSYS, %o0
+
+ sll %g1, 2, %l4
mov %i0, %o0
+ lduw [%l7 + %l4], %l7
mov %i1, %o1
mov %i2, %o2
mov %i3, %o3
@@ -182,14 +223,15 @@ linux_sparc_syscall32:
srl %i1, 0, %o1 ! IEU0 Group
ldx [%g6 + TI_FLAGS], %l0 ! Load
- srl %i5, 0, %o5 ! IEU1
+ srl %i3, 0, %o3 ! IEU0
srl %i2, 0, %o2 ! IEU0 Group
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
bne,pn %icc, linux_syscall_trace32 ! CTI
mov %i0, %l5 ! IEU1
- call %l7 ! CTI Group brk forced
- srl %i3, 0, %o3 ! IEU0
- ba,a,pt %xcc, 3f
+5: call %l7 ! CTI Group brk forced
+ srl %i5, 0, %o5 ! IEU1
+ ba,pt %xcc, 3f
+ sra %o0, 0, %o0
/* Linux native system calls enter here... */
.align 32
@@ -207,7 +249,7 @@ linux_sparc_syscall:
mov %i3, %o3 ! IEU1
mov %i4, %o4 ! IEU0 Group
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
bne,pn %icc, linux_syscall_trace ! CTI Group
mov %i0, %l5 ! IEU0
2: call %l7 ! CTI Group brk forced
@@ -217,13 +259,12 @@ linux_sparc_syscall:
3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
ret_sys_call:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
- sra %o0, 0, %o0
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
sllx %g2, 32, %g2
cmp %o0, -ERESTART_RESTARTBLOCK
bgeu,pn %xcc, 1f
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
2:
diff --git a/arch/sparc/kernel/syscalls/Makefile b/arch/sparc/kernel/syscalls/Makefile
new file mode 100644
index 000000000000..8440c16dfb22
--- /dev/null
+++ b/arch/sparc/kernel/syscalls/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
+
+$(shell mkdir -p $(uapi) $(kapi))
+
+syscall := $(src)/syscall.tbl
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis common,$* $< $@
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis common,$* $< $@
+
+$(uapi)/unistd_%.h: $(syscall) $(syshdr) FORCE
+ $(call if_changed,syshdr)
+
+$(kapi)/syscall_table_%.h: $(syscall) $(systbl) FORCE
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_32.h unistd_64.h
+kapisyshdr-y += syscall_table_32.h \
+ syscall_table_64.h
+
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
+
+PHONY += all
+all: $(uapisyshdr-y) $(kapisyshdr-y)
+ @:
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
new file mode 100644
index 000000000000..39aa26b6a50b
--- /dev/null
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -0,0 +1,518 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for sparc
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> can be common, 64, or 32 for this file.
+#
+0 common restart_syscall sys_restart_syscall
+1 32 exit sys_exit sparc_exit
+1 64 exit sparc_exit
+2 common fork sys_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open compat_sys_open
+6 common close sys_close
+7 common wait4 sys_wait4 compat_sys_wait4
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 32 execv sunos_execv
+11 64 execv sys_nis_syscall
+12 common chdir sys_chdir
+13 32 chown sys_chown16
+13 64 chown sys_chown
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 32 lchown sys_lchown16
+16 64 lchown sys_lchown
+17 common brk sys_brk
+18 common perfctr sys_nis_syscall
+19 common lseek sys_lseek compat_sys_lseek
+20 common getpid sys_getpid
+21 common capget sys_capget
+22 common capset sys_capset
+23 32 setuid sys_setuid16
+23 64 setuid sys_setuid
+24 32 getuid sys_getuid16
+24 64 getuid sys_getuid
+25 common vmsplice sys_vmsplice
+26 common ptrace sys_ptrace compat_sys_ptrace
+27 common alarm sys_alarm
+28 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+29 32 pause sys_pause
+29 64 pause sys_nis_syscall
+30 32 utime sys_utime32
+30 64 utime sys_utime
+31 32 lchown32 sys_lchown
+32 32 fchown32 sys_fchown
+33 common access sys_access
+34 common nice sys_nice
+35 32 chown32 sys_chown
+36 common sync sys_sync
+37 common kill sys_kill
+38 common stat sys_newstat compat_sys_newstat
+39 32 sendfile sys_sendfile compat_sys_sendfile
+39 64 sendfile sys_sendfile64
+40 common lstat sys_newlstat compat_sys_newlstat
+41 common dup sys_dup
+42 common pipe sys_sparc_pipe
+43 common times sys_times compat_sys_times
+44 32 getuid32 sys_getuid
+45 common umount2 sys_umount
+46 32 setgid sys_setgid16
+46 64 setgid sys_setgid
+47 32 getgid sys_getgid16
+47 64 getgid sys_getgid
+48 common signal sys_signal
+49 32 geteuid sys_geteuid16
+49 64 geteuid sys_geteuid
+50 32 getegid sys_getegid16
+50 64 getegid sys_getegid
+51 common acct sys_acct
+52 64 memory_ordering sys_memory_ordering
+53 32 getgid32 sys_getgid
+54 common ioctl sys_ioctl compat_sys_ioctl
+55 common reboot sys_reboot
+56 32 mmap2 sys_mmap2 sys32_mmap2
+57 common symlink sys_symlink
+58 common readlink sys_readlink
+59 32 execve sys_execve sys32_execve
+59 64 execve sys64_execve
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common fstat sys_newfstat compat_sys_newfstat
+63 common fstat64 sys_fstat64 compat_sys_fstat64
+64 common getpagesize sys_getpagesize
+65 common msync sys_msync
+66 common vfork sys_vfork
+67 common pread64 sys_pread64 compat_sys_pread64
+68 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+69 32 geteuid32 sys_geteuid
+70 32 getegid32 sys_getegid
+71 common mmap sys_mmap
+72 32 setreuid32 sys_setreuid
+73 32 munmap sys_munmap
+73 64 munmap sys_64_munmap
+74 common mprotect sys_mprotect
+75 common madvise sys_madvise
+76 common vhangup sys_vhangup
+77 32 truncate64 sys_truncate64 compat_sys_truncate64
+78 common mincore sys_mincore
+79 32 getgroups sys_getgroups16
+79 64 getgroups sys_getgroups
+80 32 setgroups sys_setgroups16
+80 64 setgroups sys_setgroups
+81 common getpgrp sys_getpgrp
+82 32 setgroups32 sys_setgroups
+83 common setitimer sys_setitimer compat_sys_setitimer
+84 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+85 common swapon sys_swapon
+86 common getitimer sys_getitimer compat_sys_getitimer
+87 32 setuid32 sys_setuid
+88 common sethostname sys_sethostname
+89 32 setgid32 sys_setgid
+90 common dup2 sys_dup2
+91 32 setfsuid32 sys_setfsuid
+92 common fcntl sys_fcntl compat_sys_fcntl
+93 common select sys_select compat_sys_select
+94 32 setfsgid32 sys_setfsgid
+95 common fsync sys_fsync
+96 common setpriority sys_setpriority
+97 common socket sys_socket
+98 common connect sys_connect
+99 common accept sys_accept
+100 common getpriority sys_getpriority
+101 common rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn
+102 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+103 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+104 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+105 32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+105 64 rt_sigtimedwait sys_rt_sigtimedwait
+106 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+107 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+108 32 setresuid32 sys_setresuid
+108 64 setresuid sys_setresuid
+109 32 getresuid32 sys_getresuid
+109 64 getresuid sys_getresuid
+110 32 setresgid32 sys_setresgid
+110 64 setresgid sys_setresgid
+111 32 getresgid32 sys_getresgid
+111 64 getresgid sys_getresgid
+112 32 setregid32 sys_setregid
+113 common recvmsg sys_recvmsg compat_sys_recvmsg
+114 common sendmsg sys_sendmsg compat_sys_sendmsg
+115 32 getgroups32 sys_getgroups
+116 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
+117 common getrusage sys_getrusage compat_sys_getrusage
+118 common getsockopt sys_getsockopt sys_getsockopt
+119 common getcwd sys_getcwd
+120 common readv sys_readv
+121 common writev sys_writev
+122 common settimeofday sys_settimeofday compat_sys_settimeofday
+123 32 fchown sys_fchown16
+123 64 fchown sys_fchown
+124 common fchmod sys_fchmod
+125 common recvfrom sys_recvfrom compat_sys_recvfrom
+126 32 setreuid sys_setreuid16
+126 64 setreuid sys_setreuid
+127 32 setregid sys_setregid16
+127 64 setregid sys_setregid
+128 common rename sys_rename
+129 common truncate sys_truncate compat_sys_truncate
+130 common ftruncate sys_ftruncate compat_sys_ftruncate
+131 common flock sys_flock
+132 common lstat64 sys_lstat64 compat_sys_lstat64
+133 common sendto sys_sendto
+134 common shutdown sys_shutdown
+135 common socketpair sys_socketpair
+136 common mkdir sys_mkdir
+137 common rmdir sys_rmdir
+138 32 utimes sys_utimes_time32
+138 64 utimes sys_utimes
+139 common stat64 sys_stat64 compat_sys_stat64
+140 common sendfile64 sys_sendfile64
+141 common getpeername sys_getpeername
+142 32 futex sys_futex_time32
+142 64 futex sys_futex
+143 common gettid sys_gettid
+144 common getrlimit sys_getrlimit compat_sys_getrlimit
+145 common setrlimit sys_setrlimit compat_sys_setrlimit
+146 common pivot_root sys_pivot_root
+147 common prctl sys_prctl
+148 common pciconfig_read sys_pciconfig_read
+149 common pciconfig_write sys_pciconfig_write
+150 common getsockname sys_getsockname
+151 common inotify_init sys_inotify_init
+152 common inotify_add_watch sys_inotify_add_watch
+153 common poll sys_poll
+154 common getdents64 sys_getdents64
+155 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+156 common inotify_rm_watch sys_inotify_rm_watch
+157 common statfs sys_statfs compat_sys_statfs
+158 common fstatfs sys_fstatfs compat_sys_fstatfs
+159 common umount sys_oldumount
+160 common sched_set_affinity sys_sched_setaffinity compat_sys_sched_setaffinity
+161 common sched_get_affinity sys_sched_getaffinity compat_sys_sched_getaffinity
+162 common getdomainname sys_getdomainname
+163 common setdomainname sys_setdomainname
+164 64 utrap_install sys_utrap_install
+165 common quotactl sys_quotactl
+166 common set_tid_address sys_set_tid_address
+167 common mount sys_mount
+168 common ustat sys_ustat compat_sys_ustat
+169 common setxattr sys_setxattr
+170 common lsetxattr sys_lsetxattr
+171 common fsetxattr sys_fsetxattr
+172 common getxattr sys_getxattr
+173 common lgetxattr sys_lgetxattr
+174 common getdents sys_getdents compat_sys_getdents
+175 common setsid sys_setsid
+176 common fchdir sys_fchdir
+177 common fgetxattr sys_fgetxattr
+178 common listxattr sys_listxattr
+179 common llistxattr sys_llistxattr
+180 common flistxattr sys_flistxattr
+181 common removexattr sys_removexattr
+182 common lremovexattr sys_lremovexattr
+183 32 sigpending sys_sigpending compat_sys_sigpending
+183 64 sigpending sys_nis_syscall
+184 common query_module sys_ni_syscall
+185 common setpgid sys_setpgid
+186 common fremovexattr sys_fremovexattr
+187 common tkill sys_tkill
+188 32 exit_group sys_exit_group sparc_exit_group
+188 64 exit_group sparc_exit_group
+189 common uname sys_newuname
+190 common init_module sys_init_module
+191 32 personality sys_personality sys_sparc64_personality
+191 64 personality sys_sparc64_personality
+192 32 remap_file_pages sys_sparc_remap_file_pages sys_remap_file_pages
+192 64 remap_file_pages sys_remap_file_pages
+193 common epoll_create sys_epoll_create
+194 common epoll_ctl sys_epoll_ctl
+195 common epoll_wait sys_epoll_wait
+196 common ioprio_set sys_ioprio_set
+197 common getppid sys_getppid
+198 32 sigaction sys_sparc_sigaction compat_sys_sparc_sigaction
+198 64 sigaction sys_nis_syscall
+199 common sgetmask sys_sgetmask
+200 common ssetmask sys_ssetmask
+201 32 sigsuspend sys_sigsuspend
+201 64 sigsuspend sys_nis_syscall
+202 common oldlstat sys_newlstat compat_sys_newlstat
+203 common uselib sys_uselib
+204 32 readdir sys_old_readdir compat_sys_old_readdir
+204 64 readdir sys_nis_syscall
+205 common readahead sys_readahead compat_sys_readahead
+206 common socketcall sys_socketcall compat_sys_socketcall
+207 common syslog sys_syslog
+208 common lookup_dcookie sys_ni_syscall
+209 common fadvise64 sys_fadvise64 compat_sys_fadvise64
+210 common fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64
+211 common tgkill sys_tgkill
+212 common waitpid sys_waitpid
+213 common swapoff sys_swapoff
+214 common sysinfo sys_sysinfo compat_sys_sysinfo
+215 32 ipc sys_ipc compat_sys_ipc
+215 64 ipc sys_sparc_ipc
+216 32 sigreturn sys_sigreturn sys32_sigreturn
+216 64 sigreturn sys_nis_syscall
+217 common clone sys_clone
+218 common ioprio_get sys_ioprio_get
+219 32 adjtimex sys_adjtimex_time32
+219 64 adjtimex sys_sparc_adjtimex
+220 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
+220 64 sigprocmask sys_nis_syscall
+221 common create_module sys_ni_syscall
+222 common delete_module sys_delete_module
+223 common get_kernel_syms sys_ni_syscall
+224 common getpgid sys_getpgid
+225 common bdflush sys_ni_syscall
+226 common sysfs sys_sysfs
+227 common afs_syscall sys_nis_syscall
+228 common setfsuid sys_setfsuid16
+229 common setfsgid sys_setfsgid16
+230 common _newselect sys_select compat_sys_select
+231 32 time sys_time32
+232 common splice sys_splice
+233 32 stime sys_stime32
+233 64 stime sys_stime
+234 common statfs64 sys_statfs64 compat_sys_statfs64
+235 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+236 common _llseek sys_llseek
+237 common mlock sys_mlock
+238 common munlock sys_munlock
+239 common mlockall sys_mlockall
+240 common munlockall sys_munlockall
+241 common sched_setparam sys_sched_setparam
+242 common sched_getparam sys_sched_getparam
+243 common sched_setscheduler sys_sched_setscheduler
+244 common sched_getscheduler sys_sched_getscheduler
+245 common sched_yield sys_sched_yield
+246 common sched_get_priority_max sys_sched_get_priority_max
+247 common sched_get_priority_min sys_sched_get_priority_min
+248 32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+248 64 sched_rr_get_interval sys_sched_rr_get_interval
+249 32 nanosleep sys_nanosleep_time32
+249 64 nanosleep sys_nanosleep
+250 32 mremap sys_mremap
+250 64 mremap sys_64_mremap
+251 common _sysctl sys_ni_syscall
+252 common getsid sys_getsid
+253 common fdatasync sys_fdatasync
+254 32 nfsservctl sys_ni_syscall sys_nis_syscall
+254 64 nfsservctl sys_nis_syscall
+255 common sync_file_range sys_sync_file_range compat_sys_sync_file_range
+256 32 clock_settime sys_clock_settime32
+256 64 clock_settime sys_clock_settime
+257 32 clock_gettime sys_clock_gettime32
+257 64 clock_gettime sys_clock_gettime
+258 32 clock_getres sys_clock_getres_time32
+258 64 clock_getres sys_clock_getres
+259 32 clock_nanosleep sys_clock_nanosleep_time32
+259 64 clock_nanosleep sys_clock_nanosleep
+260 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+261 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+262 32 timer_settime sys_timer_settime32
+262 64 timer_settime sys_timer_settime
+263 32 timer_gettime sys_timer_gettime32
+263 64 timer_gettime sys_timer_gettime
+264 common timer_getoverrun sys_timer_getoverrun
+265 common timer_delete sys_timer_delete
+266 common timer_create sys_timer_create compat_sys_timer_create
+# 267 was vserver
+267 common vserver sys_nis_syscall
+268 common io_setup sys_io_setup compat_sys_io_setup
+269 common io_destroy sys_io_destroy
+270 common io_submit sys_io_submit compat_sys_io_submit
+271 common io_cancel sys_io_cancel
+272 32 io_getevents sys_io_getevents_time32
+272 64 io_getevents sys_io_getevents
+273 common mq_open sys_mq_open compat_sys_mq_open
+274 common mq_unlink sys_mq_unlink
+275 32 mq_timedsend sys_mq_timedsend_time32
+275 64 mq_timedsend sys_mq_timedsend
+276 32 mq_timedreceive sys_mq_timedreceive_time32
+276 64 mq_timedreceive sys_mq_timedreceive
+277 common mq_notify sys_mq_notify compat_sys_mq_notify
+278 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+279 common waitid sys_waitid compat_sys_waitid
+280 common tee sys_tee
+281 common add_key sys_add_key
+282 common request_key sys_request_key
+283 common keyctl sys_keyctl compat_sys_keyctl
+284 common openat sys_openat compat_sys_openat
+285 common mkdirat sys_mkdirat
+286 common mknodat sys_mknodat
+287 common fchownat sys_fchownat
+288 32 futimesat sys_futimesat_time32
+288 64 futimesat sys_futimesat
+289 common fstatat64 sys_fstatat64 compat_sys_fstatat64
+290 common unlinkat sys_unlinkat
+291 common renameat sys_renameat
+292 common linkat sys_linkat
+293 common symlinkat sys_symlinkat
+294 common readlinkat sys_readlinkat
+295 common fchmodat sys_fchmodat
+296 common faccessat sys_faccessat
+297 32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+297 64 pselect6 sys_pselect6
+298 32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+298 64 ppoll sys_ppoll
+299 common unshare sys_unshare
+300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+301 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+302 common migrate_pages sys_migrate_pages
+303 common mbind sys_mbind
+304 common get_mempolicy sys_get_mempolicy
+305 common set_mempolicy sys_set_mempolicy
+306 common kexec_load sys_kexec_load compat_sys_kexec_load
+307 common move_pages sys_move_pages
+308 common getcpu sys_getcpu
+309 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+310 32 utimensat sys_utimensat_time32
+310 64 utimensat sys_utimensat
+311 common signalfd sys_signalfd compat_sys_signalfd
+312 common timerfd_create sys_timerfd_create
+313 common eventfd sys_eventfd
+314 common fallocate sys_fallocate compat_sys_fallocate
+315 32 timerfd_settime sys_timerfd_settime32
+315 64 timerfd_settime sys_timerfd_settime
+316 32 timerfd_gettime sys_timerfd_gettime32
+316 64 timerfd_gettime sys_timerfd_gettime
+317 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+318 common eventfd2 sys_eventfd2
+319 common epoll_create1 sys_epoll_create1
+320 common dup3 sys_dup3
+321 common pipe2 sys_pipe2
+322 common inotify_init1 sys_inotify_init1
+323 common accept4 sys_accept4
+324 common preadv sys_preadv compat_sys_preadv
+325 common pwritev sys_pwritev compat_sys_pwritev
+326 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+327 common perf_event_open sys_perf_event_open
+328 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+328 64 recvmmsg sys_recvmmsg
+329 common fanotify_init sys_fanotify_init
+330 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+331 common prlimit64 sys_prlimit64
+332 common name_to_handle_at sys_name_to_handle_at
+333 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+334 32 clock_adjtime sys_clock_adjtime32
+334 64 clock_adjtime sys_sparc_clock_adjtime
+335 common syncfs sys_syncfs
+336 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+337 common setns sys_setns
+338 common process_vm_readv sys_process_vm_readv
+339 common process_vm_writev sys_process_vm_writev
+340 32 kern_features sys_ni_syscall sys_kern_features
+340 64 kern_features sys_kern_features
+341 common kcmp sys_kcmp
+342 common finit_module sys_finit_module
+343 common sched_setattr sys_sched_setattr
+344 common sched_getattr sys_sched_getattr
+345 common renameat2 sys_renameat2
+346 common seccomp sys_seccomp
+347 common getrandom sys_getrandom
+348 common memfd_create sys_memfd_create
+349 common bpf sys_bpf
+350 32 execveat sys_execveat sys32_execveat
+350 64 execveat sys64_execveat
+351 common membarrier sys_membarrier
+352 common userfaultfd sys_userfaultfd
+353 common bind sys_bind
+354 common listen sys_listen
+355 common setsockopt sys_setsockopt sys_setsockopt
+356 common mlock2 sys_mlock2
+357 common copy_file_range sys_copy_file_range
+358 common preadv2 sys_preadv2 compat_sys_preadv2
+359 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+360 common statx sys_statx
+361 32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+361 64 io_pgetevents sys_io_pgetevents
+362 common pkey_mprotect sys_pkey_mprotect
+363 common pkey_alloc sys_pkey_alloc
+364 common pkey_free sys_pkey_free
+365 common rseq sys_rseq
+# room for arch specific syscalls
+392 64 semtimedop sys_semtimedop
+393 common semget sys_semget
+394 common semctl sys_semctl compat_sys_semctl
+395 common shmget sys_shmget
+396 common shmctl sys_shmctl compat_sys_shmctl
+397 common shmat sys_shmat compat_sys_shmat
+398 common shmdt sys_shmdt
+399 common msgget sys_msgget
+400 common msgsnd sys_msgsnd compat_sys_msgsnd
+401 common msgrcv sys_msgrcv compat_sys_msgrcv
+402 common msgctl sys_msgctl compat_sys_msgctl
+403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime
+404 32 clock_settime64 sys_clock_settime sys_clock_settime
+405 32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime
+406 32 clock_getres_time64 sys_clock_getres sys_clock_getres
+407 32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep
+408 32 timer_gettime64 sys_timer_gettime sys_timer_gettime
+409 32 timer_settime64 sys_timer_settime sys_timer_settime
+410 32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime
+411 32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime
+412 32 utimensat_time64 sys_utimensat sys_utimensat
+413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
+419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
+420 32 semtimedop_time64 sys_semtimedop sys_semtimedop
+421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 sys_futex sys_futex
+423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+# 435 reserved for clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
+463 common setxattrat sys_setxattrat
+464 common getxattrat sys_getxattrat
+465 common listxattrat sys_listxattrat
+466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
+470 common listns sys_listns
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index 654e8aad3bbe..f19487e4cc71 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -1,4 +1,5 @@
-/* sysfs.c: Toplogy sysfs support code for sparc64.
+// SPDX-License-Identifier: GPL-2.0
+/* sysfs.c: Topology sysfs support code for sparc64.
*
* Copyright (C) 2007 David S. Miller <davem@davemloft.net>
*/
@@ -98,27 +99,7 @@ static struct attribute_group mmu_stat_group = {
.name = "mmu_stats",
};
-/* XXX convert to rusty's on_one_cpu */
-static unsigned long run_on_cpu(unsigned long cpu,
- unsigned long (*func)(unsigned long),
- unsigned long arg)
-{
- cpumask_t old_affinity;
- unsigned long ret;
-
- cpumask_copy(&old_affinity, tsk_cpus_allowed(current));
- /* should return -EINVAL to userspace */
- if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
- return 0;
-
- ret = func(arg);
-
- set_cpus_allowed_ptr(current, &old_affinity);
-
- return ret;
-}
-
-static unsigned long read_mmustat_enable(unsigned long junk)
+static long read_mmustat_enable(void *data __maybe_unused)
{
unsigned long ra = 0;
@@ -127,11 +108,11 @@ static unsigned long read_mmustat_enable(unsigned long junk)
return ra != 0;
}
-static unsigned long write_mmustat_enable(unsigned long val)
+static long write_mmustat_enable(void *data)
{
- unsigned long ra, orig_ra;
+ unsigned long ra, orig_ra, *val = data;
- if (val)
+ if (*val)
ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
else
ra = 0UL;
@@ -142,7 +123,8 @@ static unsigned long write_mmustat_enable(unsigned long val)
static ssize_t show_mmustat_enable(struct device *s,
struct device_attribute *attr, char *buf)
{
- unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0);
+ long val = work_on_cpu(s->id, read_mmustat_enable, NULL);
+
return sprintf(buf, "%lx\n", val);
}
@@ -150,13 +132,15 @@ static ssize_t store_mmustat_enable(struct device *s,
struct device_attribute *attr, const char *buf,
size_t count)
{
- unsigned long val, err;
- int ret = sscanf(buf, "%ld", &val);
+ unsigned long val;
+ long err;
+ int ret;
+ ret = sscanf(buf, "%lu", &val);
if (ret != 1)
return -EINVAL;
- err = run_on_cpu(s->id, write_mmustat_enable, val);
+ err = work_on_cpu(s->id, write_mmustat_enable, &val);
if (err)
return -EIO;
@@ -221,7 +205,7 @@ static struct device_attribute cpu_core_attrs[] = {
static DEFINE_PER_CPU(struct cpu, cpu_devices);
-static void register_cpu_online(unsigned int cpu)
+static int register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
@@ -231,11 +215,12 @@ static void register_cpu_online(unsigned int cpu)
device_create_file(s, &cpu_core_attrs[i]);
register_mmu_stats(s);
+ return 0;
}
-#ifdef CONFIG_HOTPLUG_CPU
-static void unregister_cpu_online(unsigned int cpu)
+static int unregister_cpu_online(unsigned int cpu)
{
+#ifdef CONFIG_HOTPLUG_CPU
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
int i;
@@ -243,33 +228,10 @@ static void unregister_cpu_online(unsigned int cpu)
unregister_mmu_stats(s);
for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
device_remove_file(s, &cpu_core_attrs[i]);
-}
-#endif
-
-static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned int)(long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- register_cpu_online(cpu);
- break;
-#ifdef CONFIG_HOTPLUG_CPU
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- unregister_cpu_online(cpu);
- break;
#endif
- }
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
- .notifier_call = sysfs_cpu_notify,
-};
-
static void __init check_mmu_stats(void)
{
unsigned long dummy1, err;
@@ -282,34 +244,21 @@ static void __init check_mmu_stats(void)
mmu_stats_supported = 1;
}
-static void register_nodes(void)
-{
-#ifdef CONFIG_NUMA
- int i;
-
- for (i = 0; i < MAX_NUMNODES; i++)
- register_one_node(i);
-#endif
-}
-
static int __init topology_init(void)
{
- int cpu;
-
- register_nodes();
+ int cpu, ret;
check_mmu_stats();
- register_cpu_notifier(&sysfs_cpu_nb);
-
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
register_cpu(c, cpu);
- if (cpu_online(cpu))
- register_cpu_online(cpu);
}
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "sparc/topology:online",
+ register_cpu_online, unregister_cpu_online);
+ WARN_ON(ret < 0);
return 0;
}
diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h
index 26e6dd72e92a..bf014267d619 100644
--- a/arch/sparc/kernel/systbls.h
+++ b/arch/sparc/kernel/systbls.h
@@ -1,41 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SYSTBLS_H
#define _SYSTBLS_H
+#include <linux/signal.h>
#include <linux/kernel.h>
+#include <linux/compat.h>
#include <linux/types.h>
-#include <linux/signal.h>
+
#include <asm/utrap.h>
-extern asmlinkage unsigned long sys_getpagesize(void);
-extern asmlinkage long sparc_pipe(struct pt_regs *regs);
-extern asmlinkage long sys_sparc_ipc(unsigned int call, int first,
- unsigned long second,
- unsigned long third,
- void __user *ptr, long fifth);
-extern asmlinkage long sparc64_personality(unsigned long personality);
-extern asmlinkage long sys64_munmap(unsigned long addr, size_t len);
-extern asmlinkage unsigned long sys64_mremap(unsigned long addr,
- unsigned long old_len,
- unsigned long new_len,
- unsigned long flags,
- unsigned long new_addr);
-extern asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs);
-extern asmlinkage long sys_getdomainname(char __user *name, int len);
-extern asmlinkage long sys_utrap_install(utrap_entry_t type,
- utrap_handler_t new_p,
- utrap_handler_t new_d,
- utrap_handler_t __user *old_p,
- utrap_handler_t __user *old_d);
-extern asmlinkage long sparc_memory_ordering(unsigned long model,
- struct pt_regs *regs);
-extern asmlinkage long sys_rt_sigaction(int sig,
- const struct sigaction __user *act,
- struct sigaction __user *oact,
- void __user *restorer,
- size_t sigsetsize);
+asmlinkage long sys_getpagesize(void);
+asmlinkage long sys_sparc_pipe(void);
+asmlinkage long sys_nis_syscall(void);
+asmlinkage long sys_getdomainname(char __user *name, int len);
+void do_rt_sigreturn(struct pt_regs *regs);
+asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long off);
+asmlinkage void sparc_breakpoint(struct pt_regs *regs);
+
+#ifdef CONFIG_SPARC32
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+long sys_sparc_remap_file_pages(unsigned long start, unsigned long size,
+ unsigned long prot, unsigned long pgoff,
+ unsigned long flags);
-extern asmlinkage void sparc64_set_context(struct pt_regs *regs);
-extern asmlinkage void sparc64_get_context(struct pt_regs *regs);
-extern void do_rt_sigreturn(struct pt_regs *regs);
+#endif /* CONFIG_SPARC32 */
+#ifdef CONFIG_SPARC64
+asmlinkage long sys_sparc_ipc(unsigned int call, int first,
+ unsigned long second,
+ unsigned long third,
+ void __user *ptr, long fifth);
+asmlinkage long sparc64_personality(unsigned long personality);
+asmlinkage long sys64_munmap(unsigned long addr, size_t len);
+asmlinkage unsigned long sys64_mremap(unsigned long addr,
+ unsigned long old_len,
+ unsigned long new_len,
+ unsigned long flags,
+ unsigned long new_addr);
+asmlinkage long sys_utrap_install(utrap_entry_t type,
+ utrap_handler_t new_p,
+ utrap_handler_t new_d,
+ utrap_handler_t __user *old_p,
+ utrap_handler_t __user *old_d);
+asmlinkage long sys_memory_ordering(unsigned long model);
+asmlinkage void sparc64_set_context(struct pt_regs *regs);
+asmlinkage void sparc64_get_context(struct pt_regs *regs);
+asmlinkage long compat_sys_truncate64(const char __user * path,
+ u32 high,
+ u32 low);
+asmlinkage long compat_sys_ftruncate64(unsigned int fd,
+ u32 high,
+ u32 low);
+struct compat_stat64;
+asmlinkage long compat_sys_stat64(const char __user * filename,
+ struct compat_stat64 __user *statbuf);
+asmlinkage long compat_sys_lstat64(const char __user * filename,
+ struct compat_stat64 __user *statbuf);
+asmlinkage long compat_sys_fstat64(unsigned int fd,
+ struct compat_stat64 __user * statbuf);
+asmlinkage long compat_sys_fstatat64(unsigned int dfd,
+ const char __user *filename,
+ struct compat_stat64 __user * statbuf, int flag);
+asmlinkage long compat_sys_pread64(unsigned int fd,
+ char __user *ubuf,
+ compat_size_t count,
+ u32 poshi,
+ u32 poslo);
+asmlinkage long compat_sys_pwrite64(unsigned int fd,
+ char __user *ubuf,
+ compat_size_t count,
+ u32 poshi,
+ u32 poslo);
+asmlinkage long compat_sys_readahead(int fd,
+ unsigned offhi,
+ unsigned offlo,
+ compat_size_t count);
+long compat_sys_fadvise64(int fd,
+ unsigned offhi,
+ unsigned offlo,
+ compat_size_t len, int advice);
+long compat_sys_fadvise64_64(int fd,
+ unsigned offhi, unsigned offlo,
+ unsigned lenhi, unsigned lenlo,
+ int advice);
+long compat_sys_sync_file_range(unsigned int fd,
+ unsigned off_high, unsigned off_low,
+ unsigned nb_high, unsigned nb_low,
+ unsigned int flags);
+asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
+ u32 lenhi, u32 lenlo);
+asmlinkage long compat_sys_fstat64(unsigned int fd,
+ struct compat_stat64 __user * statbuf);
+asmlinkage long compat_sys_fstatat64(unsigned int dfd,
+ const char __user *filename,
+ struct compat_stat64 __user * statbuf,
+ int flag);
+#endif /* CONFIG_SPARC64 */
#endif /* _SYSTBLS_H */
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 7b87171ecf1e..3aaffa017706 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
@@ -8,81 +9,10 @@
* Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
*/
-
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#define __SYSCALL(nr, entry) .long entry
.data
.align 4
-
- /* First, the Linux native syscall table. */
-
.globl sys_call_table
sys_call_table:
-/*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write
-/*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link
-/*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
-/*15*/ .long sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek
-/*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
-/*25*/ .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause
-/*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
-/*35*/ .long sys_chown, sys_sync, sys_kill, sys_newstat, sys_sendfile
-/*40*/ .long sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_getuid
-/*45*/ .long sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16
-/*50*/ .long sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, sys_ioctl
-/*55*/ .long sys_reboot, sys_mmap2, sys_symlink, sys_readlink, sys_execve
-/*60*/ .long sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize
-/*65*/ .long sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_geteuid
-/*70*/ .long sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect
-/*75*/ .long sys_madvise, sys_vhangup, sys_truncate64, sys_mincore, sys_getgroups16
-/*80*/ .long sys_setgroups16, sys_getpgrp, sys_setgroups, sys_setitimer, sys_ftruncate64
-/*85*/ .long sys_swapon, sys_getitimer, sys_setuid, sys_sethostname, sys_setgid
-/*90*/ .long sys_dup2, sys_setfsuid, sys_fcntl, sys_select, sys_setfsgid
-/*95*/ .long sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
-/*100*/ .long sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
-/*105*/ .long sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
-/*110*/ .long sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
-/*115*/ .long sys_getgroups, sys_gettimeofday, sys_getrusage, sys_nis_syscall, sys_getcwd
-/*120*/ .long sys_readv, sys_writev, sys_settimeofday, sys_fchown16, sys_fchmod
-/*125*/ .long sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
-/*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
-/*135*/ .long sys_nis_syscall, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
-/*140*/ .long sys_sendfile64, sys_nis_syscall, sys_futex, sys_gettid, sys_getrlimit
-/*145*/ .long sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
-/*150*/ .long sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
-/*155*/ .long sys_fcntl64, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount
-/*160*/ .long sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
-/*165*/ .long sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
-/*170*/ .long sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
-/*175*/ .long sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
-/*180*/ .long sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_sigpending, sys_ni_syscall
-/*185*/ .long sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname
-/*190*/ .long sys_init_module, sys_personality, sparc_remap_file_pages, sys_epoll_create, sys_epoll_ctl
-/*195*/ .long sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_sparc_sigaction, sys_sgetmask
-/*200*/ .long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, sys_old_readdir
-/*205*/ .long sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
-/*210*/ .long sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
-/*215*/ .long sys_ipc, sys_sigreturn, sys_clone, sys_ioprio_get, sys_adjtimex
-/*220*/ .long sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
-/*225*/ .long sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16
-/*230*/ .long sys_select, sys_time, sys_splice, sys_stime, sys_statfs64
- /* "We are the Knights of the Forest of Ni!!" */
-/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
-/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
-/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
-/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_ni_syscall
-/*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
-/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
-/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
-/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
-/*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
-/*280*/ .long sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
-/*285*/ .long sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64
-/*290*/ .long sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
-/*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
-/*300*/ .long sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
-/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
-/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
-/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
-/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
-/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
-/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module
+#include <asm/syscall_table_32.h> /* 32-bit native syscalls */
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 6d81597064b6..398fe449dd34 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
@@ -9,159 +10,20 @@
* Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
*/
-
+#define __SYSCALL(nr, entry) .word entry
.text
.align 4
-
#ifdef CONFIG_COMPAT
- /* First, the 32-bit Linux native syscall table. */
-
.globl sys_call_table32
sys_call_table32:
-/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
-/*5*/ .word compat_sys_open, sys_close, compat_sys_wait4, sys_creat, sys_link
-/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
-/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, compat_sys_lseek
-/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
-/*25*/ .word compat_sys_vmsplice, compat_sys_ptrace, sys_alarm, compat_sys_sigaltstack, sys_pause
-/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
- .word sys_chown, sys_sync, sys_kill, compat_sys_newstat, compat_sys_sendfile
-/*40*/ .word compat_sys_newlstat, sys_dup, sys_sparc_pipe, compat_sys_times, sys_getuid
- .word sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16
-/*50*/ .word sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl
- .word sys_reboot, sys32_mmap2, sys_symlink, sys_readlink, sys32_execve
-/*60*/ .word sys_umask, sys_chroot, compat_sys_newfstat, compat_sys_fstat64, sys_getpagesize
- .word sys_msync, sys_vfork, sys32_pread64, sys32_pwrite64, sys_geteuid
-/*70*/ .word sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect
- .word sys_madvise, sys_vhangup, sys32_truncate64, sys_mincore, sys_getgroups16
-/*80*/ .word sys_setgroups16, sys_getpgrp, sys_setgroups, compat_sys_setitimer, sys32_ftruncate64
- .word sys_swapon, compat_sys_getitimer, sys_setuid, sys_sethostname, sys_setgid
-/*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid
- .word sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
-/*100*/ .word sys_getpriority, sys32_rt_sigreturn, compat_sys_rt_sigaction, compat_sys_rt_sigprocmask, compat_sys_rt_sigpending
- .word compat_sys_rt_sigtimedwait, compat_sys_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid
-/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
- .word sys_getgroups, compat_sys_gettimeofday, compat_sys_getrusage, sys_nis_syscall, sys_getcwd
-/*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod
- .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
-/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall
- .word sys_nis_syscall, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
-/*140*/ .word sys_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit
- .word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
-/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
- .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
-/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
- .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys_setxattr
-/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
- .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
-/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
- .word sys_setpgid, sys_fremovexattr, sys_tkill, sparc_exit_group, sys_newuname
-/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
- .word sys_epoll_wait, sys_ioprio_set, sys_getppid, compat_sys_sparc_sigaction, sys_sgetmask
-/*200*/ .word sys_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir
- .word sys32_readahead, sys32_socketcall, sys_syslog, compat_sys_lookup_dcookie, sys32_fadvise64
-/*210*/ .word sys32_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, compat_sys_sysinfo
- .word compat_sys_ipc, sys32_sigreturn, sys_clone, sys_ioprio_get, compat_sys_adjtimex
-/*220*/ .word compat_sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
- .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16
-/*230*/ .word sys32_select, compat_sys_time, sys_splice, compat_sys_stime, compat_sys_statfs64
- .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
-/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
- .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, compat_sys_sched_rr_get_interval, compat_sys_nanosleep
-/*250*/ .word sys_mremap, compat_sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall
- .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
-/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
- .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
-/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
- .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
-/*280*/ .word sys_tee, sys_add_key, sys_request_key, compat_sys_keyctl, compat_sys_openat
- .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_fstatat64
-/*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
- .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare
-/*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list, compat_sys_migrate_pages, compat_sys_mbind, compat_sys_get_mempolicy
- .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
-/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
- .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
- .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
-/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
- .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
-/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module
-
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
+#include <asm/syscall_table_32.h> /* Compat syscalls */
+#undef __SYSCALL_WITH_COMPAT
#endif /* CONFIG_COMPAT */
- /* Now the 64-bit native Linux syscall table. */
-
.align 4
.globl sys_call_table64, sys_call_table
sys_call_table64:
sys_call_table:
-/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
-/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
-/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
-/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_nis_syscall, sys_lseek
-/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
-/*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
-/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
- .word sys_nis_syscall, sys_sync, sys_kill, sys_newstat, sys_sendfile64
-/*40*/ .word sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_nis_syscall
- .word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid
-/*50*/ .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl
- .word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys64_execve
-/*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize
- .word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall
-/*70*/ .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys_64_munmap, sys_mprotect
- .word sys_madvise, sys_vhangup, sys_nis_syscall, sys_mincore, sys_getgroups
-/*80*/ .word sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_nis_syscall
- .word sys_swapon, sys_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall
-/*90*/ .word sys_dup2, sys_nis_syscall, sys_fcntl, sys_select, sys_nis_syscall
- .word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
-/*100*/ .word sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
- .word sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
-/*110*/ .word sys_setresgid, sys_getresgid, sys_nis_syscall, sys_recvmsg, sys_sendmsg
- .word sys_nis_syscall, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd
-/*120*/ .word sys_readv, sys_writev, sys_settimeofday, sys_fchown, sys_fchmod
- .word sys_recvfrom, sys_setreuid, sys_setregid, sys_rename, sys_truncate
-/*130*/ .word sys_ftruncate, sys_flock, sys_lstat64, sys_sendto, sys_shutdown
- .word sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
-/*140*/ .word sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit
- .word sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
-/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
- .word sys_nis_syscall, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount
-/*160*/ .word sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_utrap_install
- .word sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
-/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
- .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
-/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall
- .word sys_setpgid, sys_fremovexattr, sys_tkill, sparc_exit_group, sys_newuname
-/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
- .word sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_nis_syscall, sys_sgetmask
-/*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
- .word sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
-/*210*/ .word sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
- .word sys_sparc_ipc, sys_nis_syscall, sys_clone, sys_ioprio_get, sys_adjtimex
-/*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
- .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
-/*230*/ .word sys_select, sys_nis_syscall, sys_splice, sys_stime, sys_statfs64
- .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
-/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
- .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
-/*250*/ .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall
- .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
-/*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
- .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
-/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
- .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
-/*280*/ .word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
- .word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64
-/*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
- .word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
-/*300*/ .word sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
- .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
-/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
- .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
- .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
-/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
- .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#include <asm/syscall_table_64.h> /* 64-bit native syscalls */
diff --git a/arch/sparc/kernel/tadpole.c b/arch/sparc/kernel/tadpole.c
deleted file mode 100644
index 9aba8bd5a78b..000000000000
--- a/arch/sparc/kernel/tadpole.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/* tadpole.c: Probing for the tadpole clock stopping h/w at boot time.
- *
- * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
- */
-
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-
-#include <asm/asi.h>
-#include <asm/oplib.h>
-#include <asm/io.h>
-
-#define MACIO_SCSI_CSR_ADDR 0x78400000
-#define MACIO_EN_DMA 0x00000200
-#define CLOCK_INIT_DONE 1
-
-static int clk_state;
-static volatile unsigned char *clk_ctrl;
-void (*cpu_pwr_save)(void);
-
-static inline unsigned int ldphys(unsigned int addr)
-{
- unsigned long data;
-
- __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" :
- "=r" (data) :
- "r" (addr), "i" (ASI_M_BYPASS));
- return data;
-}
-
-static void clk_init(void)
-{
- __asm__ __volatile__("mov 0x6c, %%g1\n\t"
- "mov 0x4c, %%g2\n\t"
- "mov 0xdf, %%g3\n\t"
- "stb %%g1, [%0+3]\n\t"
- "stb %%g2, [%0+3]\n\t"
- "stb %%g3, [%0+3]\n\t" : :
- "r" (clk_ctrl) :
- "g1", "g2", "g3");
-}
-
-static void clk_slow(void)
-{
- __asm__ __volatile__("mov 0xcc, %%g2\n\t"
- "mov 0x4c, %%g3\n\t"
- "mov 0xcf, %%g4\n\t"
- "mov 0xdf, %%g5\n\t"
- "stb %%g2, [%0+3]\n\t"
- "stb %%g3, [%0+3]\n\t"
- "stb %%g4, [%0+3]\n\t"
- "stb %%g5, [%0+3]\n\t" : :
- "r" (clk_ctrl) :
- "g2", "g3", "g4", "g5");
-}
-
-/*
- * Tadpole is guaranteed to be UP, using local_irq_save.
- */
-static void tsu_clockstop(void)
-{
- unsigned int mcsr;
- unsigned long flags;
-
- if (!clk_ctrl)
- return;
- if (!(clk_state & CLOCK_INIT_DONE)) {
- local_irq_save(flags);
- clk_init();
- clk_state |= CLOCK_INIT_DONE; /* all done */
- local_irq_restore(flags);
- return;
- }
- if (!(clk_ctrl[2] & 1))
- return; /* no speed up yet */
-
- local_irq_save(flags);
-
- /* if SCSI DMA in progress, don't slow clock */
- mcsr = ldphys(MACIO_SCSI_CSR_ADDR);
- if ((mcsr&MACIO_EN_DMA) != 0) {
- local_irq_restore(flags);
- return;
- }
- /* TODO... the minimum clock setting ought to increase the
- * memory refresh interval..
- */
- clk_slow();
- local_irq_restore(flags);
-}
-
-static void swift_clockstop(void)
-{
- if (!clk_ctrl)
- return;
- clk_ctrl[0] = 0;
-}
-
-void __init clock_stop_probe(void)
-{
- phandle node, clk_nd;
- char name[20];
-
- prom_getstring(prom_root_node, "name", name, sizeof(name));
- if (strncmp(name, "Tadpole", 7))
- return;
- node = prom_getchild(prom_root_node);
- node = prom_searchsiblings(node, "obio");
- node = prom_getchild(node);
- clk_nd = prom_searchsiblings(node, "clk-ctrl");
- if (!clk_nd)
- return;
- printk("Clock Stopping h/w detected... ");
- clk_ctrl = (char *) prom_getint(clk_nd, "address");
- clk_state = 0;
- if (name[10] == '\0') {
- cpu_pwr_save = tsu_clockstop;
- printk("enabled (S3)\n");
- } else if ((name[10] == 'X') || (name[10] == 'G')) {
- cpu_pwr_save = swift_clockstop;
- printk("enabled (%s)\n",name+7);
- } else
- printk("disabled %s\n",name+7);
-}
diff --git a/arch/sparc/kernel/termios.c b/arch/sparc/kernel/termios.c
new file mode 100644
index 000000000000..ee64965c27cd
--- /dev/null
+++ b/arch/sparc/kernel/termios.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/termios_internal.h>
+
+/*
+ * c_cc characters in the termio structure. Oh, how I love being
+ * backwardly compatible. Notice that character 4 and 5 are
+ * interpreted differently depending on whether ICANON is set in
+ * c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise
+ * as _VMIN and V_TIME. This is for compatibility with OSF/1 (which
+ * is compatible with sysV)...
+ */
+#define _VMIN 4
+#define _VTIME 5
+
+int kernel_termios_to_user_termio(struct termio __user *termio,
+ struct ktermios *termios)
+{
+ struct termio v;
+ memset(&v, 0, sizeof(struct termio));
+ v.c_iflag = termios->c_iflag;
+ v.c_oflag = termios->c_oflag;
+ v.c_cflag = termios->c_cflag;
+ v.c_lflag = termios->c_lflag;
+ v.c_line = termios->c_line;
+ memcpy(v.c_cc, termios->c_cc, NCC);
+ if (!(v.c_lflag & ICANON)) {
+ v.c_cc[_VMIN] = termios->c_cc[VMIN];
+ v.c_cc[_VTIME] = termios->c_cc[VTIME];
+ }
+ return copy_to_user(termio, &v, sizeof(struct termio));
+}
+
+int user_termios_to_kernel_termios(struct ktermios *k,
+ struct termios2 __user *u)
+{
+ int err;
+ err = get_user(k->c_iflag, &u->c_iflag);
+ err |= get_user(k->c_oflag, &u->c_oflag);
+ err |= get_user(k->c_cflag, &u->c_cflag);
+ err |= get_user(k->c_lflag, &u->c_lflag);
+ err |= get_user(k->c_line, &u->c_line);
+ err |= copy_from_user(k->c_cc, u->c_cc, NCCS);
+ if (k->c_lflag & ICANON) {
+ err |= get_user(k->c_cc[VEOF], &u->c_cc[VEOF]);
+ err |= get_user(k->c_cc[VEOL], &u->c_cc[VEOL]);
+ } else {
+ err |= get_user(k->c_cc[VMIN], &u->c_cc[_VMIN]);
+ err |= get_user(k->c_cc[VTIME], &u->c_cc[_VTIME]);
+ }
+ err |= get_user(k->c_ispeed, &u->c_ispeed);
+ err |= get_user(k->c_ospeed, &u->c_ospeed);
+ return err;
+}
+
+int kernel_termios_to_user_termios(struct termios2 __user *u,
+ struct ktermios *k)
+{
+ int err;
+ err = put_user(k->c_iflag, &u->c_iflag);
+ err |= put_user(k->c_oflag, &u->c_oflag);
+ err |= put_user(k->c_cflag, &u->c_cflag);
+ err |= put_user(k->c_lflag, &u->c_lflag);
+ err |= put_user(k->c_line, &u->c_line);
+ err |= copy_to_user(u->c_cc, k->c_cc, NCCS);
+ if (!(k->c_lflag & ICANON)) {
+ err |= put_user(k->c_cc[VMIN], &u->c_cc[_VMIN]);
+ err |= put_user(k->c_cc[VTIME], &u->c_cc[_VTIME]);
+ } else {
+ err |= put_user(k->c_cc[VEOF], &u->c_cc[VEOF]);
+ err |= put_user(k->c_cc[VEOL], &u->c_cc[VEOL]);
+ }
+ err |= put_user(k->c_ispeed, &u->c_ispeed);
+ err |= put_user(k->c_ospeed, &u->c_ospeed);
+ return err;
+}
+
+int user_termios_to_kernel_termios_1(struct ktermios *k,
+ struct termios __user *u)
+{
+ int err;
+ err = get_user(k->c_iflag, &u->c_iflag);
+ err |= get_user(k->c_oflag, &u->c_oflag);
+ err |= get_user(k->c_cflag, &u->c_cflag);
+ err |= get_user(k->c_lflag, &u->c_lflag);
+ err |= get_user(k->c_line, &u->c_line);
+ err |= copy_from_user(k->c_cc, u->c_cc, NCCS);
+ if (k->c_lflag & ICANON) {
+ err |= get_user(k->c_cc[VEOF], &u->c_cc[VEOF]);
+ err |= get_user(k->c_cc[VEOL], &u->c_cc[VEOL]);
+ } else {
+ err |= get_user(k->c_cc[VMIN], &u->c_cc[_VMIN]);
+ err |= get_user(k->c_cc[VTIME], &u->c_cc[_VTIME]);
+ }
+ return err;
+}
+
+int kernel_termios_to_user_termios_1(struct termios __user *u,
+ struct ktermios *k)
+{
+ int err;
+ err = put_user(k->c_iflag, &u->c_iflag);
+ err |= put_user(k->c_oflag, &u->c_oflag);
+ err |= put_user(k->c_cflag, &u->c_cflag);
+ err |= put_user(k->c_lflag, &u->c_lflag);
+ err |= put_user(k->c_line, &u->c_line);
+ err |= copy_to_user(u->c_cc, k->c_cc, NCCS);
+ if (!(k->c_lflag & ICANON)) {
+ err |= put_user(k->c_cc[VMIN], &u->c_cc[_VMIN]);
+ err |= put_user(k->c_cc[VTIME], &u->c_cc[_VTIME]);
+ } else {
+ err |= put_user(k->c_cc[VEOF], &u->c_cc[VEOF]);
+ err |= put_user(k->c_cc[VEOL], &u->c_cc[VEOL]);
+ }
+ return err;
+}
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index c4c27b0f9063..578fd0d49f30 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* linux/arch/sparc/kernel/time.c
*
* Copyright (C) 1995 David S. Miller (davem@davemloft.net)
@@ -23,7 +24,6 @@
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/time.h>
-#include <linux/rtc.h>
#include <linux/rtc/m48t59.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
@@ -33,9 +33,9 @@
#include <linux/ioport.h>
#include <linux/profile.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <asm/mc146818rtc.h>
#include <asm/oplib.h>
#include <asm/timex.h>
#include <asm/timer.h>
@@ -47,6 +47,7 @@
#include <asm/irq_regs.h>
#include <asm/setup.h>
+#include "kernel.h"
#include "irq.h"
static __cacheline_aligned_in_smp DEFINE_SEQLOCK(timer_cs_lock);
@@ -63,8 +64,6 @@ DEFINE_PER_CPU(struct clock_event_device, sparc32_clockevent);
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
-static int set_rtc_mmss(unsigned long);
-
unsigned long profile_pc(struct pt_regs *regs)
{
extern char __copy_user_begin[], __copy_user_end[];
@@ -83,12 +82,7 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
-__volatile__ unsigned int *master_l10_counter;
-
-int update_persistent_clock(struct timespec now)
-{
- return set_rtc_mmss(now.tv_sec);
-}
+volatile u32 __iomem *master_l10_counter;
irqreturn_t notrace timer_interrupt(int dummy, void *dev_id)
{
@@ -107,21 +101,18 @@ irqreturn_t notrace timer_interrupt(int dummy, void *dev_id)
return IRQ_HANDLED;
}
-static void timer_ce_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int timer_ce_shutdown(struct clock_event_device *evt)
{
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- case CLOCK_EVT_MODE_RESUME:
- timer_ce_enabled = 1;
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- timer_ce_enabled = 0;
- break;
- default:
- break;
- }
+ timer_ce_enabled = 0;
smp_mb();
+ return 0;
+}
+
+static int timer_ce_set_periodic(struct clock_event_device *evt)
+{
+ timer_ce_enabled = 1;
+ smp_mb();
+ return 0;
}
static __init void setup_timer_ce(void)
@@ -133,7 +124,9 @@ static __init void setup_timer_ce(void)
ce->name = "timer_ce";
ce->rating = 100;
ce->features = CLOCK_EVT_FEAT_PERIODIC;
- ce->set_mode = timer_ce_set_mode;
+ ce->set_state_shutdown = timer_ce_shutdown;
+ ce->set_state_periodic = timer_ce_set_periodic;
+ ce->tick_resume = timer_ce_set_periodic;
ce->cpumask = cpu_possible_mask;
ce->shift = 32;
ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC,
@@ -143,9 +136,9 @@ static __init void setup_timer_ce(void)
static unsigned int sbus_cycles_offset(void)
{
- unsigned int val, offset;
+ u32 val, offset;
- val = *master_l10_counter;
+ val = sbus_readl(master_l10_counter);
offset = (val >> TIMER_VALUE_SHIFT) & TIMER_VALUE_MASK;
/* Limit hit? */
@@ -155,7 +148,7 @@ static unsigned int sbus_cycles_offset(void)
return offset;
}
-static cycle_t timer_cs_read(struct clocksource *cs)
+static u64 timer_cs_read(struct clocksource *cs)
{
unsigned int seq, offset;
u64 cycles;
@@ -179,44 +172,36 @@ static struct clocksource timer_cs = {
.rating = 100,
.read = timer_cs_read,
.mask = CLOCKSOURCE_MASK(64),
- .shift = 2,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static __init int setup_timer_cs(void)
{
timer_cs_enabled = 1;
- timer_cs.mult = clocksource_hz2mult(sparc_config.clock_rate,
- timer_cs.shift);
-
- return clocksource_register(&timer_cs);
+ return clocksource_register_hz(&timer_cs, sparc_config.clock_rate);
}
#ifdef CONFIG_SMP
-static void percpu_ce_setup(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int percpu_ce_shutdown(struct clock_event_device *evt)
{
- int cpu = __first_cpu(evt->cpumask);
-
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- sparc_config.load_profile_irq(cpu,
- SBUS_CLOCK_RATE / HZ);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- sparc_config.load_profile_irq(cpu, 0);
- break;
- default:
- break;
- }
+ int cpu = cpumask_first(evt->cpumask);
+
+ sparc_config.load_profile_irq(cpu, 0);
+ return 0;
+}
+
+static int percpu_ce_set_periodic(struct clock_event_device *evt)
+{
+ int cpu = cpumask_first(evt->cpumask);
+
+ sparc_config.load_profile_irq(cpu, SBUS_CLOCK_RATE / HZ);
+ return 0;
}
static int percpu_ce_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- int cpu = __first_cpu(evt->cpumask);
+ int cpu = cpumask_first(evt->cpumask);
unsigned int next = (unsigned int)delta;
sparc_config.load_profile_irq(cpu, next);
@@ -234,14 +219,18 @@ void register_percpu_ce(int cpu)
ce->name = "percpu_ce";
ce->rating = 200;
ce->features = features;
- ce->set_mode = percpu_ce_setup;
+ ce->set_state_shutdown = percpu_ce_shutdown;
+ ce->set_state_periodic = percpu_ce_set_periodic;
+ ce->set_state_oneshot = percpu_ce_shutdown;
ce->set_next_event = percpu_ce_set_next_event;
ce->cpumask = cpumask_of(cpu);
ce->shift = 32;
ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC,
ce->shift);
ce->max_delta_ns = clockevent_delta2ns(sparc_config.clock_rate, ce);
+ ce->max_delta_ticks = (unsigned long)sparc_config.clock_rate;
ce->min_delta_ns = clockevent_delta2ns(100, ce);
+ ce->min_delta_ticks = 100;
clockevents_register_device(ce);
}
@@ -266,6 +255,7 @@ static void mostek_write_byte(struct device *dev, u32 ofs, u8 val)
static struct m48t59_plat_data m48t59_data = {
.read_byte = mostek_read_byte,
.write_byte = mostek_write_byte,
+ .yy_offset = 68,
};
/* resource is set at runtime */
@@ -287,7 +277,7 @@ static int clock_probe(struct platform_device *op)
return -ENODEV;
/* Only the primary RTC has an address property */
- if (!of_find_property(dp, "address", NULL))
+ if (!of_property_present(dp, "address"))
return -ENODEV;
m48t59_rtc.resource = &op->resource[0];
@@ -309,7 +299,7 @@ static int clock_probe(struct platform_device *op)
return 0;
}
-static struct of_device_id clock_match[] = {
+static const struct of_device_id clock_match[] = {
{
.name = "eeprom",
},
@@ -320,7 +310,6 @@ static struct platform_driver clock_driver = {
.probe = clock_probe,
.driver = {
.name = "rtc",
- .owner = THIS_MODULE,
.of_match_table = clock_match,
},
};
@@ -365,16 +354,3 @@ void __init time_init(void)
sbus_time_init();
}
-
-static int set_rtc_mmss(unsigned long secs)
-{
- struct rtc_device *rtc = rtc_class_open("rtc0");
- int err = -1;
-
- if (rtc) {
- err = rtc_set_mmss(rtc, secs);
- rtc_class_close(rtc);
- }
-
- return err;
-}
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index c3d82b5f54ca..b32f27f929d1 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* time.c: UltraSparc timer and TOD clock support.
*
* Copyright (C) 1997, 2008 David S. Miller (davem@davemloft.net)
@@ -27,14 +28,12 @@
#include <linux/jiffies.h>
#include <linux/cpufreq.h>
#include <linux/percpu.h>
-#include <linux/miscdevice.h>
-#include <linux/rtc.h>
#include <linux/rtc/m48t59.h>
#include <linux/kernel_stat.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/sched/clock.h>
#include <linux/ftrace.h>
#include <asm/oplib.h>
@@ -46,16 +45,15 @@
#include <asm/smp.h>
#include <asm/sections.h>
#include <asm/cpudata.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/irq_regs.h>
+#include <asm/cacheflush.h>
#include "entry.h"
+#include "kernel.h"
DEFINE_SPINLOCK(rtc_lock);
-#define TICK_PRIV_BIT (1UL << 63)
-#define TICKCMP_IRQ_BIT (1UL << 63)
-
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
@@ -165,13 +163,44 @@ static unsigned long tick_add_tick(unsigned long adj)
return new_tick;
}
-static struct sparc64_tick_ops tick_operations __read_mostly = {
+/* Searches for cpu clock frequency with given cpuid in OpenBoot tree */
+static unsigned long cpuid_to_freq(phandle node, int cpuid)
+{
+ bool is_cpu_node = false;
+ unsigned long freq = 0;
+ char type[128];
+
+ if (!node)
+ return freq;
+
+ if (prom_getproperty(node, "device_type", type, sizeof(type)) != -1)
+ is_cpu_node = (strcmp(type, "cpu") == 0);
+
+ /* try upa-portid then cpuid to get cpuid, see prom_64.c */
+ if (is_cpu_node && (prom_getint(node, "upa-portid") == cpuid ||
+ prom_getint(node, "cpuid") == cpuid))
+ freq = prom_getintdefault(node, "clock-frequency", 0);
+ if (!freq)
+ freq = cpuid_to_freq(prom_getchild(node), cpuid);
+ if (!freq)
+ freq = cpuid_to_freq(prom_getsibling(node), cpuid);
+
+ return freq;
+}
+
+static unsigned long tick_get_frequency(void)
+{
+ return cpuid_to_freq(prom_root_node, hard_smp_processor_id());
+}
+
+static struct sparc64_tick_ops tick_operations __cacheline_aligned = {
.name = "tick",
.init_tick = tick_init_tick,
.disable_irq = tick_disable_irq,
.get_tick = tick_get_tick,
.add_tick = tick_add_tick,
.add_compare = tick_add_compare,
+ .get_frequency = tick_get_frequency,
.softint_mask = 1UL << 0,
};
@@ -251,6 +280,11 @@ static int stick_add_compare(unsigned long adj)
return ((long)(new_tick - (orig_tick+adj))) > 0L;
}
+static unsigned long stick_get_frequency(void)
+{
+ return prom_getintdefault(prom_root_node, "stick-frequency", 0);
+}
+
static struct sparc64_tick_ops stick_operations __read_mostly = {
.name = "stick",
.init_tick = stick_init_tick,
@@ -258,6 +292,7 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
.get_tick = stick_get_tick,
.add_tick = stick_add_tick,
.add_compare = stick_add_compare,
+ .get_frequency = stick_get_frequency,
.softint_mask = 1UL << 16,
};
@@ -278,9 +313,6 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
* 2) write high
* 3) write low
*/
-#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
-#define HBIRD_STICK_ADDR 0x1fe0000f070UL
-
static unsigned long __hbird_read_stick(void)
{
unsigned long ret, tmp1, tmp2, tmp3;
@@ -382,6 +414,11 @@ static int hbtick_add_compare(unsigned long adj)
return ((long)(val2 - val)) > 0L;
}
+static unsigned long hbtick_get_frequency(void)
+{
+ return prom_getintdefault(prom_root_node, "stick-frequency", 0);
+}
+
static struct sparc64_tick_ops hbtick_operations __read_mostly = {
.name = "hbtick",
.init_tick = hbtick_init_tick,
@@ -389,24 +426,10 @@ static struct sparc64_tick_ops hbtick_operations __read_mostly = {
.get_tick = hbtick_get_tick,
.add_tick = hbtick_add_tick,
.add_compare = hbtick_add_compare,
+ .get_frequency = hbtick_get_frequency,
.softint_mask = 1UL << 0,
};
-static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
-
-int update_persistent_clock(struct timespec now)
-{
- struct rtc_device *rtc = rtc_class_open("rtc0");
- int err = -1;
-
- if (rtc) {
- err = rtc_set_mmss(rtc, now.tv_sec);
- rtc_class_close(rtc);
- }
-
- return err;
-}
-
unsigned long cmos_regs;
EXPORT_SYMBOL(cmos_regs);
@@ -423,8 +446,8 @@ static int rtc_probe(struct platform_device *op)
{
struct resource *r;
- printk(KERN_INFO "%s: RTC regs at 0x%llx\n",
- op->dev.of_node->full_name, op->resource[0].start);
+ printk(KERN_INFO "%pOF: RTC regs at 0x%llx\n",
+ op->dev.of_node, op->resource[0].start);
/* The CMOS RTC driver only accepts IORESOURCE_IO, so cons
* up a fake resource so that the probe works for all cases.
@@ -466,7 +489,6 @@ static struct platform_driver rtc_driver = {
.probe = rtc_probe,
.driver = {
.name = "rtc",
- .owner = THIS_MODULE,
.of_match_table = rtc_match,
},
};
@@ -480,8 +502,8 @@ static struct platform_device rtc_bq4802_device = {
static int bq4802_probe(struct platform_device *op)
{
- printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n",
- op->dev.of_node->full_name, op->resource[0].start);
+ printk(KERN_INFO "%pOF: BQ4802 regs at 0x%llx\n",
+ op->dev.of_node, op->resource[0].start);
rtc_bq4802_device.resource = &op->resource[0];
return platform_device_register(&rtc_bq4802_device);
@@ -499,7 +521,6 @@ static struct platform_driver bq4802_driver = {
.probe = bq4802_probe,
.driver = {
.name = "bq4802",
- .owner = THIS_MODULE,
.of_match_table = bq4802_match,
},
};
@@ -523,6 +544,7 @@ static void mostek_write_byte(struct device *dev, u32 ofs, u8 val)
static struct m48t59_plat_data m48t59_data = {
.read_byte = mostek_read_byte,
.write_byte = mostek_write_byte,
+ .yy_offset = 68,
};
static struct platform_device m48t59_rtc = {
@@ -541,12 +563,12 @@ static int mostek_probe(struct platform_device *op)
/* On an Enterprise system there can be multiple mostek clocks.
* We should only match the one that is on the central FHC bus.
*/
- if (!strcmp(dp->parent->name, "fhc") &&
- strcmp(dp->parent->parent->name, "central") != 0)
+ if (of_node_name_eq(dp->parent, "fhc") &&
+ !of_node_name_eq(dp->parent->parent, "central"))
return -ENODEV;
- printk(KERN_INFO "%s: Mostek regs at 0x%llx\n",
- dp->full_name, op->resource[0].start);
+ printk(KERN_INFO "%pOF: Mostek regs at 0x%llx\n",
+ dp, op->resource[0].start);
m48t59_rtc.resource = &op->resource[0];
return platform_device_register(&m48t59_rtc);
@@ -563,7 +585,6 @@ static struct platform_driver mostek_driver = {
.probe = mostek_probe,
.driver = {
.name = "mostek",
- .owner = THIS_MODULE,
.of_match_table = mostek_match,
},
};
@@ -599,34 +620,17 @@ static int __init clock_init(void)
*/
fs_initcall(clock_init);
-/* This is gets the master TICK_INT timer going. */
-static unsigned long sparc64_init_timers(void)
+/* Return true if this is Hummingbird, aka Ultra-IIe */
+static bool is_hummingbird(void)
{
- struct device_node *dp;
- unsigned long freq;
+ unsigned long ver, manuf, impl;
- dp = of_find_node_by_path("/");
- if (tlb_type == spitfire) {
- unsigned long ver, manuf, impl;
-
- __asm__ __volatile__ ("rdpr %%ver, %0"
- : "=&r" (ver));
- manuf = ((ver >> 48) & 0xffff);
- impl = ((ver >> 32) & 0xffff);
- if (manuf == 0x17 && impl == 0x13) {
- /* Hummingbird, aka Ultra-IIe */
- tick_ops = &hbtick_operations;
- freq = of_getintprop_default(dp, "stick-frequency", 0);
- } else {
- tick_ops = &tick_operations;
- freq = local_cpu_data().clock_tick;
- }
- } else {
- tick_ops = &stick_operations;
- freq = of_getintprop_default(dp, "stick-frequency", 0);
- }
+ __asm__ __volatile__ ("rdpr %%ver, %0"
+ : "=&r" (ver));
+ manuf = ((ver >> 48) & 0xffff);
+ impl = ((ver >> 32) & 0xffff);
- return freq;
+ return (manuf == 0x17 && impl == 0x13);
}
struct freq_table {
@@ -651,20 +655,23 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val
void *data)
{
struct cpufreq_freqs *freq = data;
- unsigned int cpu = freq->cpu;
- struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
+ unsigned int cpu;
+ struct freq_table *ft;
- if (!ft->ref_freq) {
- ft->ref_freq = freq->old;
- ft->clock_tick_ref = cpu_data(cpu).clock_tick;
- }
- if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
- (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
- (val == CPUFREQ_RESUMECHANGE)) {
- cpu_data(cpu).clock_tick =
- cpufreq_scale(ft->clock_tick_ref,
- ft->ref_freq,
- freq->new);
+ for_each_cpu(cpu, freq->policy->cpus) {
+ ft = &per_cpu(sparc64_freq_table, cpu);
+
+ if (!ft->ref_freq) {
+ ft->ref_freq = freq->old;
+ ft->clock_tick_ref = cpu_data(cpu).clock_tick;
+ }
+
+ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
+ cpu_data(cpu).clock_tick =
+ cpufreq_scale(ft->clock_tick_ref, ft->ref_freq,
+ freq->new);
+ }
}
return 0;
@@ -689,42 +696,29 @@ core_initcall(register_sparc64_cpufreq_notifier);
static int sparc64_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- return tick_ops->add_compare(delta) ? -ETIME : 0;
+ return tick_operations.add_compare(delta) ? -ETIME : 0;
}
-static void sparc64_timer_setup(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int sparc64_timer_shutdown(struct clock_event_device *evt)
{
- switch (mode) {
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_RESUME:
- break;
-
- case CLOCK_EVT_MODE_SHUTDOWN:
- tick_ops->disable_irq();
- break;
-
- case CLOCK_EVT_MODE_PERIODIC:
- case CLOCK_EVT_MODE_UNUSED:
- WARN_ON(1);
- break;
- }
+ tick_operations.disable_irq();
+ return 0;
}
static struct clock_event_device sparc64_clockevent = {
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = sparc64_timer_setup,
- .set_next_event = sparc64_next_event,
- .rating = 100,
- .shift = 30,
- .irq = -1,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = sparc64_timer_shutdown,
+ .set_next_event = sparc64_next_event,
+ .rating = 100,
+ .shift = 30,
+ .irq = -1,
};
static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- unsigned long tick_mask = tick_ops->softint_mask;
+ unsigned long tick_mask = tick_operations.softint_mask;
int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
@@ -733,7 +727,7 @@ void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
irq_enter();
local_cpu_data().irq0_irqs++;
- kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
+ kstat_incr_irq_this_cpu(0);
if (unlikely(!evt->event_handler)) {
printk(KERN_WARNING
@@ -759,14 +753,14 @@ void setup_sparc64_timer(void)
: "=r" (pstate)
: "i" (PSTATE_IE));
- tick_ops->init_tick();
+ tick_operations.init_tick();
/* Restore PSTATE_IE. */
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
: /* no outputs */
: "r" (pstate));
- sevt = &__get_cpu_var(sparc64_events);
+ sevt = this_cpu_ptr(&sparc64_events);
memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
sevt->cpumask = cpumask_of(smp_processor_id());
@@ -786,12 +780,10 @@ static unsigned long tb_ticks_per_usec __read_mostly;
void __delay(unsigned long loops)
{
- unsigned long bclock, now;
+ unsigned long bclock = get_tick();
- bclock = tick_ops->get_tick();
- do {
- now = tick_ops->get_tick();
- } while ((now-bclock) < loops);
+ while ((get_tick() - bclock) < loops)
+ ;
}
EXPORT_SYMBOL(__delay);
@@ -801,34 +793,85 @@ void udelay(unsigned long usecs)
}
EXPORT_SYMBOL(udelay);
-static cycle_t clocksource_tick_read(struct clocksource *cs)
+static u64 clocksource_tick_read(struct clocksource *cs)
+{
+ return get_tick();
+}
+
+static void __init get_tick_patch(void)
+{
+ unsigned int *addr, *instr, i;
+ struct get_tick_patch *p;
+
+ if (tlb_type == spitfire && is_hummingbird())
+ return;
+
+ for (p = &__get_tick_patch; p < &__get_tick_patch_end; p++) {
+ instr = (tlb_type == spitfire) ? p->tick : p->stick;
+ addr = (unsigned int *)(unsigned long)p->addr;
+ for (i = 0; i < GET_TICK_NINSTR; i++) {
+ addr[i] = instr[i];
+ /* ensure that address is modified before flush */
+ wmb();
+ flushi(&addr[i]);
+ }
+ }
+}
+
+static void __init init_tick_ops(struct sparc64_tick_ops *ops)
{
- return tick_ops->get_tick();
+ unsigned long freq, quotient, tick;
+
+ freq = ops->get_frequency();
+ quotient = clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
+ tick = ops->get_tick();
+
+ ops->offset = (tick * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT;
+ ops->ticks_per_nsec_quotient = quotient;
+ ops->frequency = freq;
+ tick_operations = *ops;
+ get_tick_patch();
+}
+
+void __init time_init_early(void)
+{
+ if (tlb_type == spitfire) {
+ if (is_hummingbird()) {
+ init_tick_ops(&hbtick_operations);
+ clocksource_tick.archdata.vclock_mode = VCLOCK_NONE;
+ } else {
+ init_tick_ops(&tick_operations);
+ clocksource_tick.archdata.vclock_mode = VCLOCK_TICK;
+ }
+ } else {
+ init_tick_ops(&stick_operations);
+ clocksource_tick.archdata.vclock_mode = VCLOCK_STICK;
+ }
}
void __init time_init(void)
{
- unsigned long freq = sparc64_init_timers();
+ unsigned long freq;
+ freq = tick_operations.frequency;
tb_ticks_per_usec = freq / USEC_PER_SEC;
- timer_ticks_per_nsec_quotient =
- clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
-
- clocksource_tick.name = tick_ops->name;
+ clocksource_tick.name = tick_operations.name;
clocksource_tick.read = clocksource_tick_read;
clocksource_register_hz(&clocksource_tick, freq);
printk("clocksource: mult[%x] shift[%d]\n",
clocksource_tick.mult, clocksource_tick.shift);
- sparc64_clockevent.name = tick_ops->name;
+ sparc64_clockevent.name = tick_operations.name;
clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
sparc64_clockevent.max_delta_ns =
clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent);
+ sparc64_clockevent.max_delta_ticks = 0x7fffffffffffffffUL;
sparc64_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &sparc64_clockevent);
+ sparc64_clockevent.min_delta_ticks = 0xF;
printk("clockevent: mult[%x] shift[%d]\n",
sparc64_clockevent.mult, sparc64_clockevent.shift);
@@ -838,14 +881,21 @@ void __init time_init(void)
unsigned long long sched_clock(void)
{
- unsigned long ticks = tick_ops->get_tick();
+ unsigned long quotient = tick_operations.ticks_per_nsec_quotient;
+ unsigned long offset = tick_operations.offset;
+
+ /* Use barrier so the compiler emits the loads first and overlaps load
+ * latency with reading tick, because reading %tick/%stick is a
+ * post-sync instruction that will flush and restart subsequent
+ * instructions after it commits.
+ */
+ barrier();
- return (ticks * timer_ticks_per_nsec_quotient)
- >> SPARC64_NSEC_PER_CYC_SHIFT;
+ return ((get_tick() * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT) - offset;
}
int read_current_timer(unsigned long *timer_val)
{
- *timer_val = tick_ops->get_tick();
+ *timer_val = get_tick();
return 0;
}
diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S
index 6cdb08cdabf0..82fafeeb3a62 100644
--- a/arch/sparc/kernel/trampoline_32.S
+++ b/arch/sparc/kernel/trampoline_32.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* trampoline.S: SMP cpu boot-up trampoline code.
*
@@ -5,7 +6,6 @@
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
-#include <linux/init.h>
#include <asm/head.h>
#include <asm/psr.h>
#include <asm/page.h>
@@ -18,7 +18,6 @@
.globl sun4m_cpu_startup
.globl sun4d_cpu_startup
- __CPUINIT
.align 4
/* When we start up a cpu for the first time it enters this routine.
@@ -94,7 +93,6 @@ smp_panic:
/* CPUID in bootbus can be found at PA 0xff0140000 */
#define SUN4D_BOOTBUS_CPUID 0xf0140000
- __CPUINIT
.align 4
sun4d_cpu_startup:
@@ -146,7 +144,6 @@ sun4d_cpu_startup:
b,a smp_panic
- __CPUINIT
.align 4
.global leon_smp_cpu_startup, smp_penguin_ctable
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index 2e973a26fbda..51bf1eb92a36 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -1,11 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* trampoline.S: Jump start slave processors on sparc64.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
-#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/lsu.h>
@@ -13,7 +14,6 @@
#include <asm/dcu.h>
#include <asm/pstate.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
@@ -32,13 +32,11 @@ itlb_load:
dtlb_load:
.asciz "SUNW,dtlb-load"
- /* XXX __cpuinit this thing XXX */
#define TRAMP_STACK_SIZE 1024
.align 16
tramp_stack:
.skip TRAMP_STACK_SIZE
- __CPUINIT
.align 8
.globl sparc64_cpu_startup, sparc64_cpu_startup_end
sparc64_cpu_startup:
@@ -112,10 +110,13 @@ startup_continue:
brnz,pn %g1, 1b
nop
- sethi %hi(p1275buf), %g2
- or %g2, %lo(p1275buf), %g2
- ldx [%g2 + 0x10], %l2
- add %l2, -(192 + 128), %sp
+ /* Get onto temporary stack which will be in the locked
+ * kernel image.
+ */
+ sethi %hi(tramp_stack), %g1
+ or %g1, %lo(tramp_stack), %g1
+ add %g1, TRAMP_STACK_SIZE, %g1
+ sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
flushw
/* Setup the loop variables:
@@ -131,7 +132,6 @@ startup_continue:
clr %l5
sethi %hi(num_kernel_image_mappings), %l6
lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
- add %l6, 1, %l6
mov 15, %l7
BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
@@ -224,7 +224,6 @@ niagara_lock_tlb:
clr %l5
sethi %hi(num_kernel_image_mappings), %l6
lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
- add %l6, 1, %l6
1:
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
@@ -399,7 +398,6 @@ after_lock_tlb:
sllx %g5, THREAD_SHIFT, %g5
sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
add %g6, %g5, %sp
- mov 0, %fp
rdpr %pstate, %o1
or %o1, PSTATE_IE, %o1
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 662982946a89..bb149f6cc34b 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* arch/sparc/kernel/traps.c
*
@@ -9,18 +10,21 @@
* I hate traps on the sparc, grrr...
*/
-#include <linux/sched.h> /* for jiffies */
+#include <linux/cpu.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
+#include <linux/mm_types.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/kdebug.h>
#include <linux/export.h>
+#include <linux/pgtable.h>
#include <asm/delay.h>
#include <asm/ptrace.h>
#include <asm/oplib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/unistd.h>
#include <asm/traps.h>
@@ -44,7 +48,7 @@ static void instruction_dump(unsigned long *pc)
#define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
#define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
-void die_if_kernel(char *str, struct pt_regs *regs)
+void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
{
static int die_counter;
int count = 0;
@@ -83,15 +87,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
}
printk("Instruction DUMP:");
instruction_dump ((unsigned long *) regs->pc);
- if(regs->psr & PSR_PS)
- do_exit(SIGKILL);
- do_exit(SIGSEGV);
+ make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV);
}
void do_hw_interrupt(struct pt_regs *regs, unsigned long type)
{
- siginfo_t info;
-
if(type < 0x80) {
/* Sun OS's puke from bad traps, Linux survives! */
printk("Unimplemented Sparc TRAP, type = %02lx\n", type);
@@ -101,19 +101,13 @@ void do_hw_interrupt(struct pt_regs *regs, unsigned long type)
if(regs->psr & PSR_PS)
die_if_kernel("Kernel bad trap", regs);
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)regs->pc;
- info.si_trapno = type - 0x80;
- force_sig_info(SIGILL, &info, current);
+ force_sig_fault_trapno(SIGILL, ILL_ILLTRP,
+ (void __user *)regs->pc, type - 0x80);
}
void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
- siginfo_t info;
-
if(psr & PSR_PS)
die_if_kernel("Kernel illegal instruction", regs);
#ifdef TRAP_DEBUG
@@ -121,27 +115,15 @@ void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned lon
regs->pc, *(unsigned long *)regs->pc);
#endif
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPC;
- info.si_addr = (void __user *)pc;
- info.si_trapno = 0;
- send_sig_info(SIGILL, &info, current);
+ send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc, current);
}
void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
- siginfo_t info;
-
if(psr & PSR_PS)
die_if_kernel("Penguin instruction from Penguin mode??!?!", regs);
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_PRVOPC;
- info.si_addr = (void __user *)pc;
- info.si_trapno = 0;
- send_sig_info(SIGILL, &info, current);
+ send_sig_fault(SIGILL, ILL_PRVOPC, (void __user *)pc, current);
}
/* XXX User may want to be allowed to do this. XXX */
@@ -149,8 +131,6 @@ void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long n
void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
- siginfo_t info;
-
if(regs->psr & PSR_PS) {
printk("KERNEL MNA at pc %08lx npc %08lx called by %08lx\n", pc, npc,
regs->u_regs[UREG_RETPC]);
@@ -162,12 +142,9 @@ void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned lon
instruction_dump ((unsigned long *) regs->pc);
printk ("do_MNA!\n");
#endif
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRALN;
- info.si_addr = /* FIXME: Should dig out mna address */ (void *)0;
- info.si_trapno = 0;
- send_sig_info(SIGBUS, &info, current);
+ send_sig_fault(SIGBUS, BUS_ADRALN,
+ /* FIXME: Should dig out mna address */ (void *)0,
+ current);
}
static unsigned long init_fsr = 0x0UL;
@@ -219,15 +196,13 @@ static unsigned long fake_fsr;
static unsigned long fake_queue[32] __attribute__ ((aligned (8)));
static unsigned long fake_depth;
-extern int do_mathemu(struct pt_regs *, struct task_struct *);
-
void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
static int calls;
- siginfo_t info;
unsigned long fsr;
int ret = 0;
+ int code;
#ifndef CONFIG_SMP
struct task_struct *fpt = last_task_used_math;
#else
@@ -302,24 +277,20 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
}
fsr = fpt->thread.fsr;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_addr = (void __user *)pc;
- info.si_trapno = 0;
- info.si_code = __SI_FAULT;
+ code = FPE_FLTUNK;
if ((fsr & 0x1c000) == (1 << 14)) {
if (fsr & 0x10)
- info.si_code = FPE_FLTINV;
+ code = FPE_FLTINV;
else if (fsr & 0x08)
- info.si_code = FPE_FLTOVF;
+ code = FPE_FLTOVF;
else if (fsr & 0x04)
- info.si_code = FPE_FLTUND;
+ code = FPE_FLTUND;
else if (fsr & 0x02)
- info.si_code = FPE_FLTDIV;
+ code = FPE_FLTDIV;
else if (fsr & 0x01)
- info.si_code = FPE_FLTRES;
+ code = FPE_FLTRES;
}
- send_sig_info(SIGFPE, &info, fpt);
+ send_sig_fault(SIGFPE, code, (void __user *)pc, fpt);
#ifndef CONFIG_SMP
last_task_used_math = NULL;
#endif
@@ -331,16 +302,9 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
- siginfo_t info;
-
if(psr & PSR_PS)
die_if_kernel("Penguin overflow trap from kernel mode", regs);
- info.si_signo = SIGEMT;
- info.si_errno = 0;
- info.si_code = EMT_TAGOVF;
- info.si_addr = (void __user *)pc;
- info.si_trapno = 0;
- send_sig_info(SIGEMT, &info, current);
+ send_sig_fault(SIGEMT, EMT_TAGOVF, (void __user *)pc, current);
}
void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc,
@@ -358,61 +322,33 @@ void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc
void handle_reg_access(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
- siginfo_t info;
-
#ifdef TRAP_DEBUG
printk("Register Access Exception at PC %08lx NPC %08lx PSR %08lx\n",
pc, npc, psr);
#endif
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_OBJERR;
- info.si_addr = (void __user *)pc;
- info.si_trapno = 0;
- force_sig_info(SIGBUS, &info, current);
+ force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)pc);
}
void handle_cp_disabled(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
- siginfo_t info;
-
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_COPROC;
- info.si_addr = (void __user *)pc;
- info.si_trapno = 0;
- send_sig_info(SIGILL, &info, current);
+ send_sig_fault(SIGILL, ILL_COPROC, (void __user *)pc, current);
}
void handle_cp_exception(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
- siginfo_t info;
-
#ifdef TRAP_DEBUG
printk("Co-Processor Exception at PC %08lx NPC %08lx PSR %08lx\n",
pc, npc, psr);
#endif
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_COPROC;
- info.si_addr = (void __user *)pc;
- info.si_trapno = 0;
- send_sig_info(SIGILL, &info, current);
+ send_sig_fault(SIGILL, ILL_COPROC, (void __user *)pc, current);
}
void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
- siginfo_t info;
-
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_code = FPE_INTDIV;
- info.si_addr = (void __user *)pc;
- info.si_trapno = 0;
- send_sig_info(SIGFPE, &info, current);
+ send_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)pc, current);
}
#ifdef CONFIG_DEBUG_BUGVERBOSE
@@ -435,7 +371,6 @@ void trap_init(void)
/* Force linker to barf if mismatched */
if (TI_UWINMASK != offsetof(struct thread_info, uwinmask) ||
TI_TASK != offsetof(struct thread_info, task) ||
- TI_EXECDOMAIN != offsetof(struct thread_info, exec_domain) ||
TI_FLAGS != offsetof(struct thread_info, flags) ||
TI_CPU != offsetof(struct thread_info, cpu) ||
TI_PREEMPT != offsetof(struct thread_info, preempt_count) ||
@@ -451,7 +386,7 @@ void trap_init(void)
thread_info_offsets_are_bolixed_pete();
/* Attach to the address space of init_task. */
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
/* NOTE: Other cpus have this done as they are started
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index b3f833ab90eb..28cb0d66ab40 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* arch/sparc64/kernel/traps.c
*
* Copyright (C) 1995,1997,2008,2009,2012 David S. Miller (davem@davemloft.net)
@@ -8,27 +9,30 @@
* I like traps on v9, :))))
*/
-#include <linux/module.h>
-#include <linux/sched.h>
+#include <linux/cpu.h>
+#include <linux/extable.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
#include <linux/linkage.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/init.h>
+#include <linux/kallsyms.h>
#include <linux/kdebug.h>
#include <linux/ftrace.h>
#include <linux/reboot.h>
#include <linux/gfp.h>
+#include <linux/context_tracking.h>
#include <asm/smp.h>
#include <asm/delay.h>
#include <asm/ptrace.h>
#include <asm/oplib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/unistd.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/fpumacro.h>
#include <asm/lsu.h>
#include <asm/dcu.h>
@@ -42,8 +46,10 @@
#include <asm/prom.h>
#include <asm/memctrl.h>
#include <asm/cacheflush.h>
+#include <asm/setup.h>
#include "entry.h"
+#include "kernel.h"
#include "kstack.h"
/* When an irrecoverable trap occurs at tl > 0, the trap entry
@@ -82,8 +88,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
void bad_trap(struct pt_regs *regs, long lvl)
{
- char buffer[32];
- siginfo_t info;
+ char buffer[36];
if (notify_die(DIE_TRAP, "bad trap", regs,
0, lvl, SIGTRAP) == NOTIFY_STOP)
@@ -103,17 +108,13 @@ void bad_trap(struct pt_regs *regs, long lvl)
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)regs->tpc;
- info.si_trapno = lvl;
- force_sig_info(SIGILL, &info, current);
+ force_sig_fault_trapno(SIGILL, ILL_ILLTRP,
+ (void __user *)regs->tpc, lvl);
}
void bad_trap_tl1(struct pt_regs *regs, long lvl)
{
- char buffer[32];
+ char buffer[36];
if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
0, lvl, SIGTRAP) == NOTIFY_STOP)
@@ -186,11 +187,11 @@ EXPORT_SYMBOL_GPL(unregister_dimm_printer);
void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
{
- siginfo_t info;
+ enum ctx_state prev_state = exception_enter();
if (notify_die(DIE_TRAP, "instruction access exception", regs,
0, 0x8, SIGTRAP) == NOTIFY_STOP)
- return;
+ goto out;
if (regs->tstate & TSTATE_PRIV) {
printk("spitfire_insn_access_exception: SFSR[%016lx] "
@@ -201,12 +202,9 @@ void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, un
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = SEGV_MAPERR;
- info.si_addr = (void __user *)regs->tpc;
- info.si_trapno = 0;
- force_sig_info(SIGSEGV, &info, current);
+ force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)regs->tpc);
+out:
+ exception_exit(prev_state);
}
void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
@@ -223,7 +221,6 @@ void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsig
{
unsigned short type = (type_ctx >> 16);
unsigned short ctx = (type_ctx & 0xffff);
- siginfo_t info;
if (notify_die(DIE_TRAP, "instruction access exception", regs,
0, 0x8, SIGTRAP) == NOTIFY_STOP)
@@ -240,12 +237,7 @@ void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsig
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = SEGV_MAPERR;
- info.si_addr = (void __user *) addr;
- info.si_trapno = 0;
- force_sig_info(SIGSEGV, &info, current);
+ force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *) addr);
}
void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
@@ -258,13 +250,51 @@ void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, u
sun4v_insn_access_exception(regs, addr, type_ctx);
}
+static bool is_no_fault_exception(struct pt_regs *regs)
+{
+ unsigned char asi;
+ u32 insn;
+
+ if (get_user(insn, (u32 __user *)regs->tpc) == -EFAULT)
+ return false;
+
+ /*
+ * Must do a little instruction decoding here in order to
+ * decide on a course of action. The bits of interest are:
+ * insn[31:30] = op, where 3 indicates the load/store group
+ * insn[24:19] = op3, which identifies individual opcodes
+ * insn[13] indicates an immediate offset
+ * op3[4]=1 identifies alternate space instructions
+ * op3[5:4]=3 identifies floating point instructions
+ * op3[2]=1 identifies stores
+ * See "Opcode Maps" in the appendix of any Sparc V9
+ * architecture spec for full details.
+ */
+ if ((insn & 0xc0800000) == 0xc0800000) { /* op=3, op3[4]=1 */
+ if (insn & 0x2000) /* immediate offset */
+ asi = (regs->tstate >> 24); /* saved %asi */
+ else
+ asi = (insn >> 5); /* immediate asi */
+ if ((asi & 0xf6) == ASI_PNF) {
+ if (insn & 0x200000) /* op3[2], stores */
+ return false;
+ if (insn & 0x1000000) /* op3[5:4]=3 (fp) */
+ handle_ldf_stq(insn, regs);
+ else
+ handle_ld_nf(insn, regs);
+ return true;
+ }
+ }
+ return false;
+}
+
void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
{
- siginfo_t info;
+ enum ctx_state prev_state = exception_enter();
if (notify_die(DIE_TRAP, "data access exception", regs,
0, 0x30, SIGTRAP) == NOTIFY_STOP)
- return;
+ goto out;
if (regs->tstate & TSTATE_PRIV) {
/* Test if this comes from uaccess places. */
@@ -280,7 +310,7 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un
#endif
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
- return;
+ goto out;
}
/* Shit... */
printk("spitfire_data_access_exception: SFSR[%016lx] "
@@ -288,12 +318,12 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un
die_if_kernel("Dax", regs);
}
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = SEGV_MAPERR;
- info.si_addr = (void __user *)sfar;
- info.si_trapno = 0;
- force_sig_info(SIGSEGV, &info, current);
+ if (is_no_fault_exception(regs))
+ return;
+
+ force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)sfar);
+out:
+ exception_exit(prev_state);
}
void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
@@ -310,7 +340,6 @@ void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsig
{
unsigned short type = (type_ctx >> 16);
unsigned short ctx = (type_ctx & 0xffff);
- siginfo_t info;
if (notify_die(DIE_TRAP, "data access exception", regs,
0, 0x8, SIGTRAP) == NOTIFY_STOP)
@@ -342,12 +371,29 @@ void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsig
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = SEGV_MAPERR;
- info.si_addr = (void __user *) addr;
- info.si_trapno = 0;
- force_sig_info(SIGSEGV, &info, current);
+ if (is_no_fault_exception(regs))
+ return;
+
+ /* MCD (Memory Corruption Detection) disabled trap (TT=0x19) in HV
+ * is vectored thorugh data access exception trap with fault type
+ * set to HV_FAULT_TYPE_MCD_DIS. Check for MCD disabled trap.
+ * Accessing an address with invalid ASI for the address, for
+ * example setting an ADI tag on an address with ASI_MCD_PRIMARY
+ * when TTE.mcd is not set for the VA, is also vectored into
+ * kerbel by HV as data access exception with fault type set to
+ * HV_FAULT_TYPE_INV_ASI.
+ */
+ switch (type) {
+ case HV_FAULT_TYPE_INV_ASI:
+ force_sig_fault(SIGILL, ILL_ILLADR, (void __user *)addr);
+ break;
+ case HV_FAULT_TYPE_MCD_DIS:
+ force_sig_fault(SIGSEGV, SEGV_ACCADI, (void __user *)addr);
+ break;
+ default:
+ force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)addr);
+ break;
+ }
}
void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
@@ -488,8 +534,6 @@ static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned lo
static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
{
- siginfo_t info;
-
printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
"AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
@@ -524,12 +568,7 @@ static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned lon
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_OBJERR;
- info.si_addr = (void *)0;
- info.si_trapno = 0;
- force_sig_info(SIGBUS, &info, current);
+ force_sig_fault(SIGBUS, BUS_OBJERR, (void *)0);
}
void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
@@ -859,7 +898,7 @@ void __init cheetah_ecache_flush_init(void)
/* Now allocate error trap reporting scoreboard. */
sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
- for (order = 0; order < MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
if ((PAGE_SIZE << order) >= sz)
break;
}
@@ -1792,6 +1831,7 @@ struct sun4v_error_entry {
#define SUN4V_ERR_ATTRS_ASI 0x00000080
#define SUN4V_ERR_ATTRS_PRIV_REG 0x00000100
#define SUN4V_ERR_ATTRS_SPSTATE_MSK 0x00000600
+#define SUN4V_ERR_ATTRS_MCD 0x00000800
#define SUN4V_ERR_ATTRS_SPSTATE_SHFT 9
#define SUN4V_ERR_ATTRS_MODE_MSK 0x03000000
#define SUN4V_ERR_ATTRS_MODE_SHFT 24
@@ -1989,11 +2029,55 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
}
}
+/* Handle memory corruption detected error which is vectored in
+ * through resumable error trap.
+ */
+static void do_mcd_err(struct pt_regs *regs, struct sun4v_error_entry ent)
+{
+ if (notify_die(DIE_TRAP, "MCD error", regs, 0, 0x34,
+ SIGSEGV) == NOTIFY_STOP)
+ return;
+
+ if (regs->tstate & TSTATE_PRIV) {
+ /* MCD exception could happen because the task was
+ * running a system call with MCD enabled and passed a
+ * non-versioned pointer or pointer with bad version
+ * tag to the system call. In such cases, hypervisor
+ * places the address of offending instruction in the
+ * resumable error report. This is a deferred error,
+ * so the read/write that caused the trap was potentially
+ * retired long time back and we may have no choice
+ * but to send SIGSEGV to the process.
+ */
+ const struct exception_table_entry *entry;
+
+ entry = search_exception_tables(regs->tpc);
+ if (entry) {
+ /* Looks like a bad syscall parameter */
+#ifdef DEBUG_EXCEPTIONS
+ pr_emerg("Exception: PC<%016lx> faddr<UNKNOWN>\n",
+ regs->tpc);
+ pr_emerg("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
+ ent.err_raddr, entry->fixup);
+#endif
+ regs->tpc = entry->fixup;
+ regs->tnpc = regs->tpc + 4;
+ return;
+ }
+ }
+
+ /* Send SIGSEGV to the userspace process with the right signal
+ * code
+ */
+ force_sig_fault(SIGSEGV, SEGV_ADIDERR, (void __user *)ent.err_raddr);
+}
+
/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
* Log the event and clear the first word of the entry.
*/
void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
{
+ enum ctx_state prev_state = exception_enter();
struct sun4v_error_entry *ent, local_copy;
struct trap_per_cpu *tb;
unsigned long paddr;
@@ -2022,12 +2106,22 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
pr_info("Shutdown request, %u seconds...\n",
local_copy.err_secs);
orderly_poweroff(true);
+ goto out;
+ }
+
+ /* If this is a memory corruption detected error vectored in
+ * by HV through resumable error trap, call the handler
+ */
+ if (local_copy.err_attrs & SUN4V_ERR_ATTRS_MCD) {
+ do_mcd_err(regs, local_copy);
return;
}
sun4v_log_error(regs, &local_copy, cpu,
KERN_ERR "RESUMABLE ERROR",
&sun4v_resum_oflow_cnt);
+out:
+ exception_exit(prev_state);
}
/* If we try to printk() we'll probably make matters worse, by trying
@@ -2039,6 +2133,64 @@ void sun4v_resum_overflow(struct pt_regs *regs)
atomic_inc(&sun4v_resum_oflow_cnt);
}
+/* Given a set of registers, get the virtual addressi that was being accessed
+ * by the faulting instructions at tpc.
+ */
+static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
+{
+ unsigned int insn;
+
+ if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
+ return compute_effective_address(regs, insn,
+ (insn >> 25) & 0x1f);
+ }
+ return 0;
+}
+
+/* Attempt to handle non-resumable errors generated from userspace.
+ * Returns true if the signal was handled, false otherwise.
+ */
+static bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
+ struct sun4v_error_entry *ent)
+{
+ unsigned int attrs = ent->err_attrs;
+
+ if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
+ unsigned long addr = ent->err_raddr;
+
+ if (addr == ~(u64)0) {
+ /* This seems highly unlikely to ever occur */
+ pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
+ } else {
+ unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
+ PAGE_SIZE);
+
+ /* Break the unfortunate news. */
+ pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
+ addr);
+ pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
+ page_cnt);
+
+ while (page_cnt-- > 0) {
+ if (pfn_valid(addr >> PAGE_SHIFT))
+ get_page(pfn_to_page(addr >> PAGE_SHIFT));
+ addr += PAGE_SIZE;
+ }
+ }
+ force_sig(SIGKILL);
+
+ return true;
+ }
+ if (attrs & SUN4V_ERR_ATTRS_PIO) {
+ force_sig_fault(SIGBUS, BUS_ADRERR,
+ (void __user *)sun4v_get_vaddr(regs));
+ return true;
+ }
+
+ /* Default to doing nothing */
+ return false;
+}
+
/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
* Log the event, clear the first word of the entry, and die.
*/
@@ -2063,6 +2215,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
put_cpu();
+ if (!(regs->tstate & TSTATE_PRIV) &&
+ sun4v_nonresum_error_user_handled(regs, &local_copy)) {
+ /* DON'T PANIC: This userspace error was handled. */
+ return;
+ }
+
#ifdef CONFIG_PCI
/* Check for the special PCI poke sequence. */
if (pci_poke_in_progress && pci_poke_cpu == cpu) {
@@ -2092,6 +2250,11 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
atomic_inc(&sun4v_nonresum_oflow_cnt);
}
+static void sun4v_tlb_error(struct pt_regs *regs)
+{
+ die_if_kernel("TLB/TSB error", regs);
+}
+
unsigned long sun4v_err_itlb_vaddr;
unsigned long sun4v_err_itlb_ctx;
unsigned long sun4v_err_itlb_pte;
@@ -2099,8 +2262,7 @@ unsigned long sun4v_err_itlb_error;
void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
{
- if (tl > 1)
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
regs->tpc, tl);
@@ -2113,7 +2275,7 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
sun4v_err_itlb_pte, sun4v_err_itlb_error);
- prom_halt();
+ sun4v_tlb_error(regs);
}
unsigned long sun4v_err_dtlb_vaddr;
@@ -2123,8 +2285,7 @@ unsigned long sun4v_err_dtlb_error;
void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
{
- if (tl > 1)
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
regs->tpc, tl);
@@ -2137,7 +2298,7 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
- prom_halt();
+ sun4v_tlb_error(regs);
}
void hypervisor_tlbop_error(unsigned long err, unsigned long op)
@@ -2152,59 +2313,58 @@ void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
err, op);
}
-void do_fpe_common(struct pt_regs *regs)
+static void do_fpe_common(struct pt_regs *regs)
{
if (regs->tstate & TSTATE_PRIV) {
regs->tpc = regs->tnpc;
regs->tnpc += 4;
} else {
unsigned long fsr = current_thread_info()->xfsr[0];
- siginfo_t info;
+ int code;
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_addr = (void __user *)regs->tpc;
- info.si_trapno = 0;
- info.si_code = __SI_FAULT;
+ code = FPE_FLTUNK;
if ((fsr & 0x1c000) == (1 << 14)) {
if (fsr & 0x10)
- info.si_code = FPE_FLTINV;
+ code = FPE_FLTINV;
else if (fsr & 0x08)
- info.si_code = FPE_FLTOVF;
+ code = FPE_FLTOVF;
else if (fsr & 0x04)
- info.si_code = FPE_FLTUND;
+ code = FPE_FLTUND;
else if (fsr & 0x02)
- info.si_code = FPE_FLTDIV;
+ code = FPE_FLTDIV;
else if (fsr & 0x01)
- info.si_code = FPE_FLTRES;
+ code = FPE_FLTRES;
}
- force_sig_info(SIGFPE, &info, current);
+ force_sig_fault(SIGFPE, code, (void __user *)regs->tpc);
}
}
void do_fpieee(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
+
if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
0, 0x24, SIGFPE) == NOTIFY_STOP)
- return;
+ goto out;
do_fpe_common(regs);
+out:
+ exception_exit(prev_state);
}
-extern int do_mathemu(struct pt_regs *, struct fpustate *, bool);
-
void do_fpother(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
struct fpustate *f = FPUSTATE;
int ret = 0;
if (notify_die(DIE_TRAP, "fpu exception other", regs,
0, 0x25, SIGFPE) == NOTIFY_STOP)
- return;
+ goto out;
switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
case (2 << 14): /* unfinished_FPop */
@@ -2213,17 +2373,19 @@ void do_fpother(struct pt_regs *regs)
break;
}
if (ret)
- return;
+ goto out;
do_fpe_common(regs);
+out:
+ exception_exit(prev_state);
}
void do_tof(struct pt_regs *regs)
{
- siginfo_t info;
+ enum ctx_state prev_state = exception_enter();
if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
0, 0x26, SIGEMT) == NOTIFY_STOP)
- return;
+ goto out;
if (regs->tstate & TSTATE_PRIV)
die_if_kernel("Penguin overflow trap from kernel mode", regs);
@@ -2231,21 +2393,18 @@ void do_tof(struct pt_regs *regs)
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
- info.si_signo = SIGEMT;
- info.si_errno = 0;
- info.si_code = EMT_TAGOVF;
- info.si_addr = (void __user *)regs->tpc;
- info.si_trapno = 0;
- force_sig_info(SIGEMT, &info, current);
+ force_sig_fault(SIGEMT, EMT_TAGOVF, (void __user *)regs->tpc);
+out:
+ exception_exit(prev_state);
}
void do_div0(struct pt_regs *regs)
{
- siginfo_t info;
+ enum ctx_state prev_state = exception_enter();
if (notify_die(DIE_TRAP, "integer division by zero", regs,
0, 0x28, SIGFPE) == NOTIFY_STOP)
- return;
+ goto out;
if (regs->tstate & TSTATE_PRIV)
die_if_kernel("TL0: Kernel divide by zero.", regs);
@@ -2253,12 +2412,9 @@ void do_div0(struct pt_regs *regs)
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_code = FPE_INTDIV;
- info.si_addr = (void __user *)regs->tpc;
- info.si_trapno = 0;
- force_sig_info(SIGFPE, &info, current);
+ force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->tpc);
+out:
+ exception_exit(prev_state);
}
static void instruction_dump(unsigned int *pc)
@@ -2291,7 +2447,7 @@ static void user_instruction_dump(unsigned int __user *pc)
printk("\n");
}
-void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl)
{
unsigned long fp, ksp;
struct thread_info *tp;
@@ -2315,7 +2471,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
fp = ksp + STACK_BIAS;
- printk("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
do {
struct sparc_stackf *sf;
struct pt_regs *regs;
@@ -2336,13 +2492,14 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
fp = (unsigned long)sf->fp + STACK_BIAS;
}
- printk(" [%016lx] %pS\n", pc, (void *) pc);
+ print_ip_sym(loglvl, pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
- int index = tsk->curr_ret_stack;
- if (tsk->ret_stack && index >= graph) {
- pc = tsk->ret_stack[index - graph].ret;
- printk(" [%016lx] %pS\n", pc, (void *) pc);
+ struct ftrace_ret_stack *ret_stack;
+ ret_stack = ftrace_graph_get_ret_stack(tsk, graph);
+ if (ret_stack) {
+ pc = ret_stack->ret;
+ print_ip_sym(loglvl, pc);
graph++;
}
}
@@ -2360,7 +2517,7 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
return (struct reg_window *) (fp + STACK_BIAS);
}
-void die_if_kernel(char *str, struct pt_regs *regs)
+void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
{
static int die_counter;
int count = 0;
@@ -2401,28 +2558,25 @@ void die_if_kernel(char *str, struct pt_regs *regs)
}
user_instruction_dump ((unsigned int __user *) regs->tpc);
}
- if (regs->tstate & TSTATE_PRIV)
- do_exit(SIGKILL);
- do_exit(SIGSEGV);
+ if (panic_on_oops)
+ panic("Fatal exception");
+ make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV);
}
EXPORT_SYMBOL(die_if_kernel);
#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
-extern int handle_popc(u32 insn, struct pt_regs *regs);
-extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
-
void do_illegal_instruction(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn;
- siginfo_t info;
if (notify_die(DIE_TRAP, "illegal instruction", regs,
0, 0x10, SIGILL) == NOTIFY_STOP)
- return;
+ goto out;
if (tstate & TSTATE_PRIV)
die_if_kernel("Kernel illegal instruction", regs);
@@ -2431,14 +2585,14 @@ void do_illegal_instruction(struct pt_regs *regs)
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
if (handle_popc(insn, regs))
- return;
+ goto out;
} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
if (handle_ldf_stq(insn, regs))
- return;
+ goto out;
} else if (tlb_type == hypervisor) {
if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
if (!vis_emul(regs, insn))
- return;
+ goto out;
} else {
struct fpustate *f = FPUSTATE;
@@ -2448,44 +2602,37 @@ void do_illegal_instruction(struct pt_regs *regs)
* Trap in the %fsr to unimplemented_FPop.
*/
if (do_mathemu(regs, f, true))
- return;
+ goto out;
}
}
}
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPC;
- info.si_addr = (void __user *)pc;
- info.si_trapno = 0;
- force_sig_info(SIGILL, &info, current);
+ force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc);
+out:
+ exception_exit(prev_state);
}
-extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
-
void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
{
- siginfo_t info;
+ enum ctx_state prev_state = exception_enter();
if (notify_die(DIE_TRAP, "memory address unaligned", regs,
0, 0x34, SIGSEGV) == NOTIFY_STOP)
- return;
+ goto out;
if (regs->tstate & TSTATE_PRIV) {
kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
- return;
+ goto out;
}
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRALN;
- info.si_addr = (void __user *)sfar;
- info.si_trapno = 0;
- force_sig_info(SIGBUS, &info, current);
+ if (is_no_fault_exception(regs))
+ return;
+
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)sfar);
+out:
+ exception_exit(prev_state);
}
void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
{
- siginfo_t info;
-
if (notify_die(DIE_TRAP, "memory address unaligned", regs,
0, 0x34, SIGSEGV) == NOTIFY_STOP)
return;
@@ -2494,32 +2641,74 @@ void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_c
kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
return;
}
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRALN;
- info.si_addr = (void __user *) addr;
- info.si_trapno = 0;
- force_sig_info(SIGBUS, &info, current);
+ if (is_no_fault_exception(regs))
+ return;
+
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) addr);
+}
+
+/* sun4v_mem_corrupt_detect_precise() - Handle precise exception on an ADI
+ * tag mismatch.
+ *
+ * ADI version tag mismatch on a load from memory always results in a
+ * precise exception. Tag mismatch on a store to memory will result in
+ * precise exception if MCDPER or PMCDPER is set to 1.
+ */
+void sun4v_mem_corrupt_detect_precise(struct pt_regs *regs, unsigned long addr,
+ unsigned long context)
+{
+ if (notify_die(DIE_TRAP, "memory corruption precise exception", regs,
+ 0, 0x8, SIGSEGV) == NOTIFY_STOP)
+ return;
+
+ if (regs->tstate & TSTATE_PRIV) {
+ /* MCD exception could happen because the task was running
+ * a system call with MCD enabled and passed a non-versioned
+ * pointer or pointer with bad version tag to the system
+ * call.
+ */
+ const struct exception_table_entry *entry;
+
+ entry = search_exception_tables(regs->tpc);
+ if (entry) {
+ /* Looks like a bad syscall parameter */
+#ifdef DEBUG_EXCEPTIONS
+ pr_emerg("Exception: PC<%016lx> faddr<UNKNOWN>\n",
+ regs->tpc);
+ pr_emerg("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
+ regs->tpc, entry->fixup);
+#endif
+ regs->tpc = entry->fixup;
+ regs->tnpc = regs->tpc + 4;
+ return;
+ }
+ pr_emerg("%s: ADDR[%016lx] CTX[%lx], going.\n",
+ __func__, addr, context);
+ die_if_kernel("MCD precise", regs);
+ }
+
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ force_sig_fault(SIGSEGV, SEGV_ADIPERR, (void __user *)addr);
}
void do_privop(struct pt_regs *regs)
{
- siginfo_t info;
+ enum ctx_state prev_state = exception_enter();
if (notify_die(DIE_TRAP, "privileged operation", regs,
0, 0x11, SIGILL) == NOTIFY_STOP)
- return;
+ goto out;
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_PRVOPC;
- info.si_addr = (void __user *)regs->tpc;
- info.si_trapno = 0;
- force_sig_info(SIGILL, &info, current);
+ force_sig_fault(SIGILL, ILL_PRVOPC, (void __user *)regs->tpc);
+out:
+ exception_exit(prev_state);
}
void do_privact(struct pt_regs *regs)
@@ -2530,99 +2719,88 @@ void do_privact(struct pt_regs *regs)
/* Trap level 1 stuff or other traps we should never see... */
void do_cee(struct pt_regs *regs)
{
+ exception_enter();
die_if_kernel("TL0: Cache Error Exception", regs);
}
-void do_cee_tl1(struct pt_regs *regs)
-{
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- die_if_kernel("TL1: Cache Error Exception", regs);
-}
-
-void do_dae_tl1(struct pt_regs *regs)
-{
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- die_if_kernel("TL1: Data Access Exception", regs);
-}
-
-void do_iae_tl1(struct pt_regs *regs)
-{
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- die_if_kernel("TL1: Instruction Access Exception", regs);
-}
-
void do_div0_tl1(struct pt_regs *regs)
{
+ exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: DIV0 Exception", regs);
}
-void do_fpdis_tl1(struct pt_regs *regs)
-{
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- die_if_kernel("TL1: FPU Disabled", regs);
-}
-
void do_fpieee_tl1(struct pt_regs *regs)
{
+ exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: FPU IEEE Exception", regs);
}
void do_fpother_tl1(struct pt_regs *regs)
{
+ exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: FPU Other Exception", regs);
}
void do_ill_tl1(struct pt_regs *regs)
{
+ exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: Illegal Instruction Exception", regs);
}
void do_irq_tl1(struct pt_regs *regs)
{
+ exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: IRQ Exception", regs);
}
void do_lddfmna_tl1(struct pt_regs *regs)
{
+ exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: LDDF Exception", regs);
}
void do_stdfmna_tl1(struct pt_regs *regs)
{
+ exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: STDF Exception", regs);
}
void do_paw(struct pt_regs *regs)
{
+ exception_enter();
die_if_kernel("TL0: Phys Watchpoint Exception", regs);
}
void do_paw_tl1(struct pt_regs *regs)
{
+ exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: Phys Watchpoint Exception", regs);
}
void do_vaw(struct pt_regs *regs)
{
+ exception_enter();
die_if_kernel("TL0: Virt Watchpoint Exception", regs);
}
void do_vaw_tl1(struct pt_regs *regs)
{
+ exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: Virt Watchpoint Exception", regs);
}
void do_tof_tl1(struct pt_regs *regs)
{
+ exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: Tag Overflow Exception", regs);
}
@@ -2638,6 +2816,7 @@ void do_getpsr(struct pt_regs *regs)
}
}
+u64 cpu_mondo_counter[NR_CPUS] = {0};
struct trap_per_cpu trap_block[NR_CPUS];
EXPORT_SYMBOL(trap_block);
@@ -2670,8 +2849,6 @@ void __init trap_init(void)
fault_address) ||
TI_KREGS != offsetof(struct thread_info, kregs) ||
TI_UTRAPS != offsetof(struct thread_info, utraps) ||
- TI_EXEC_DOMAIN != offsetof(struct thread_info,
- exec_domain) ||
TI_REG_WINDOW != offsetof(struct thread_info,
reg_window) ||
TI_RWIN_SPTRS != offsetof(struct thread_info,
@@ -2681,10 +2858,6 @@ void __init trap_init(void)
TI_PRE_COUNT != offsetof(struct thread_info,
preempt_count) ||
TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
- TI_CURRENT_DS != offsetof(struct thread_info,
- current_ds) ||
- TI_RESTART_BLOCK != offsetof(struct thread_info,
- restart_block) ||
TI_KUNA_REGS != offsetof(struct thread_info,
kern_una_regs) ||
TI_KUNA_INSN != offsetof(struct thread_info,
@@ -2747,6 +2920,6 @@ void __init trap_init(void)
/* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus.
*/
- atomic_inc(&init_mm.mm_count);
+ mmgrab(&init_mm);
current->active_mm = &init_mm;
}
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index a313e4a9399b..eaed39ce8938 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* tsb.S: Sparc64 TSB table handling.
*
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
@@ -29,13 +30,17 @@
*/
tsb_miss_dtlb:
mov TLB_TAG_ACCESS, %g4
+ ldxa [%g4] ASI_DMMU, %g4
+ srlx %g4, PAGE_SHIFT, %g4
ba,pt %xcc, tsb_miss_page_table_walk
- ldxa [%g4] ASI_DMMU, %g4
+ sllx %g4, PAGE_SHIFT, %g4
tsb_miss_itlb:
mov TLB_TAG_ACCESS, %g4
+ ldxa [%g4] ASI_IMMU, %g4
+ srlx %g4, PAGE_SHIFT, %g4
ba,pt %xcc, tsb_miss_page_table_walk
- ldxa [%g4] ASI_IMMU, %g4
+ sllx %g4, PAGE_SHIFT, %g4
/* At this point we have:
* %g1 -- PAGE_SIZE TSB entry address
@@ -75,7 +80,7 @@ tsb_miss_page_table_walk:
mov 512, %g7
andn %g5, 0x7, %g5
sllx %g7, %g6, %g7
- srlx %g4, HPAGE_SHIFT, %g6
+ srlx %g4, REAL_HPAGE_SHIFT, %g6
sub %g7, 1, %g7
and %g6, %g7, %g6
sllx %g6, 4, %g6
@@ -113,26 +118,11 @@ tsb_miss_page_table_walk_sun4v_fastpath:
/* Valid PTE is now in %g5. */
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-661: sethi %uhi(_PAGE_SZALL_4U), %g7
+ sethi %uhi(_PAGE_PMD_HUGE | _PAGE_PUD_HUGE), %g7
sllx %g7, 32, %g7
- .section .sun4v_2insn_patch, "ax"
- .word 661b
- mov _PAGE_SZALL_4V, %g7
- nop
- .previous
-
- and %g5, %g7, %g2
-
-661: sethi %uhi(_PAGE_SZHUGE_4U), %g7
- sllx %g7, 32, %g7
- .section .sun4v_2insn_patch, "ax"
- .word 661b
- mov _PAGE_SZHUGE_4V, %g7
- nop
- .previous
- cmp %g2, %g7
- bne,pt %xcc, 60f
+ andcc %g5, %g7, %g0
+ be,pt %xcc, 60f
nop
/* It is a huge page, use huge page TSB entry address we
@@ -162,10 +152,10 @@ tsb_miss_page_table_walk_sun4v_fastpath:
nop
.previous
- rdpr %tl, %g3
- cmp %g3, 1
+ rdpr %tl, %g7
+ cmp %g7, 1
bne,pn %xcc, winfix_trampoline
- nop
+ mov %g3, %g4
ba,pt %xcc, etrap
rd %pc, %g7
call hugetlb_setup
@@ -284,6 +274,10 @@ tsb_do_dtlb_fault:
nop
.previous
+ /* Clear context ID bits. */
+ srlx %g5, PAGE_SHIFT, %g5
+ sllx %g5, PAGE_SHIFT, %g5
+
be,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_DTLB, %g4
ba,pt %xcc, winfix_trampoline
@@ -367,6 +361,7 @@ tsb_flush:
* %o1: TSB base config pointer
* %o2: TSB huge config pointer, or NULL if none
* %o3: Hypervisor TSB descriptor physical address
+ * %o4: Secondary context to load, if non-zero
*
* We have to run this whole thing with interrupts
* disabled so that the current cpu doesn't change
@@ -379,6 +374,17 @@ __tsb_context_switch:
rdpr %pstate, %g1
wrpr %g1, PSTATE_IE, %pstate
+ brz,pn %o4, 1f
+ mov SECONDARY_CONTEXT, %o5
+
+661: stxa %o4, [%o5] ASI_DMMU
+ .section .sun4v_1insn_patch, "ax"
+ .word 661b
+ stxa %o4, [%o5] ASI_MMU
+ .previous
+ flush %g6
+
+1:
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
@@ -462,13 +468,16 @@ __tsb_context_switch:
.type copy_tsb,#function
copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
* %o2=new_tsb_base, %o3=new_tsb_size
+ * %o4=page_size_shift
*/
sethi %uhi(TSB_PASS_BITS), %g7
srlx %o3, 4, %o3
- add %o0, %o1, %g1 /* end of old tsb */
+ add %o0, %o1, %o1 /* end of old tsb */
sllx %g7, 32, %g7
sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
+ mov %o4, %g1 /* page_size_shift */
+
661: prefetcha [%o0] ASI_N, #one_read
.section .tsb_phys_patch, "ax"
.word 661b
@@ -493,9 +502,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
/* This can definitely be computed faster... */
srlx %o0, 4, %o5 /* Build index */
and %o5, 511, %o5 /* Mask index */
- sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
+ sllx %o5, %g1, %o5 /* Put into vaddr position */
or %o4, %o5, %o4 /* Full VADDR. */
- srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
+ srlx %o4, %g1, %o4 /* Shift down to create index */
and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
@@ -503,7 +512,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
80: add %o0, 16, %o0
- cmp %o0, %g1
+ cmp %o0, %o1
bne,pt %xcc, 90b
nop
diff --git a/arch/sparc/kernel/ttable_32.S b/arch/sparc/kernel/ttable_32.S
index 8a7a96ca676f..e79fd786fbbb 100644
--- a/arch/sparc/kernel/ttable_32.S
+++ b/arch/sparc/kernel/ttable_32.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* The Sparc trap table, bootloader gives us control at _start. */
__HEAD
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
index c6dfdaa29e20..86e737e59c7e 100644
--- a/arch/sparc/kernel/ttable_64.S
+++ b/arch/sparc/kernel/ttable_64.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah/SUN4V extensions.
*
* Copyright (C) 1996, 2001, 2006 David S. Miller (davem@davemloft.net)
@@ -25,8 +26,10 @@ tl0_ill: membar #Sync
TRAP_7INSNS(do_illegal_instruction)
tl0_privop: TRAP(do_privop)
tl0_resv012: BTRAP(0x12) BTRAP(0x13) BTRAP(0x14) BTRAP(0x15) BTRAP(0x16) BTRAP(0x17)
-tl0_resv018: BTRAP(0x18) BTRAP(0x19) BTRAP(0x1a) BTRAP(0x1b) BTRAP(0x1c) BTRAP(0x1d)
-tl0_resv01e: BTRAP(0x1e) BTRAP(0x1f)
+tl0_resv018: BTRAP(0x18) BTRAP(0x19)
+tl0_mcd: SUN4V_MCD_PRECISE
+tl0_resv01b: BTRAP(0x1b)
+tl0_resv01c: BTRAP(0x1c) BTRAP(0x1d) BTRAP(0x1e) BTRAP(0x1f)
tl0_fpdis: TRAP_NOSAVE(do_fpdis)
tl0_fpieee: TRAP_SAVEFPU(do_fpieee)
tl0_fpother: TRAP_NOSAVE(do_fpother_check_fitos)
@@ -50,7 +53,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
-tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4)
+tl0_irq4: BTRAP(0x44)
#else
tl0_irq1: BTRAP(0x41)
tl0_irq2: BTRAP(0x42)
@@ -165,7 +168,7 @@ tl0_resv169: BTRAP(0x169) BTRAP(0x16a) BTRAP(0x16b) BTRAP(0x16c)
tl0_linux64: LINUX_64BIT_SYSCALL_TRAP
tl0_gsctx: TRAP(sparc64_get_context) TRAP(sparc64_set_context)
tl0_resv170: KPROBES_TRAP(0x170) KPROBES_TRAP(0x171) KGDB_TRAP(0x172)
-tl0_resv173: BTRAP(0x173) BTRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177)
+tl0_resv173: UPROBES_TRAP(0x173) UPROBES_TRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177)
tl0_resv178: BTRAP(0x178) BTRAP(0x179) BTRAP(0x17a) BTRAP(0x17b) BTRAP(0x17c)
tl0_resv17d: BTRAP(0x17d) BTRAP(0x17e) BTRAP(0x17f)
#define BTRAPS(x) BTRAP(x) BTRAP(x+1) BTRAP(x+2) BTRAP(x+3) BTRAP(x+4) BTRAP(x+5) BTRAP(x+6) BTRAP(x+7)
diff --git a/arch/sparc/kernel/una_asm_32.S b/arch/sparc/kernel/una_asm_32.S
index 8f096e84a937..f8bf839289fb 100644
--- a/arch/sparc/kernel/una_asm_32.S
+++ b/arch/sparc/kernel/una_asm_32.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* una_asm.S: Kernel unaligned trap assembler helpers.
*
* Copyright (C) 1996,2005,2008 David S. Miller (davem@davemloft.net)
diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
index 1c8d33228b2a..e256f395e9f6 100644
--- a/arch/sparc/kernel/una_asm_64.S
+++ b/arch/sparc/kernel/una_asm_64.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* una_asm.S: Kernel unaligned trap assembler helpers.
*
* Copyright (C) 1996,2005 David S. Miller (davem@davemloft.net)
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index c0ec89786193..455f0258c745 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* unaligned.c: Unaligned load/store trap handling with special
* cases for the kernel to do them more quickly.
@@ -8,13 +9,18 @@
#include <linux/kernel.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
+#include <linux/extable.h>
+
+#include <asm/setup.h>
+
+#include "kernel.h"
enum direction {
load, /* ld, ldd, ldh, ldsh */
@@ -162,7 +168,7 @@ unsigned long safe_compute_effective_address(struct pt_regs *regs,
/* This is just to make gcc think panic does return... */
static void unaligned_panic(char *str)
{
- panic(str);
+ panic("%s", str);
}
/* una_asm.S */
@@ -208,10 +214,10 @@ static inline int ok_for_kernel(unsigned int insn)
static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
{
- unsigned long g2 = regs->u_regs [UREG_G2];
- unsigned long fixup = search_extables_range(regs->pc, &g2);
+ const struct exception_table_entry *entry;
- if (!fixup) {
+ entry = search_exception_tables(regs->pc);
+ if (!entry) {
unsigned long address = compute_effective_address(regs, insn);
if(address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
@@ -227,9 +233,8 @@ static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
die_if_kernel("Oops", regs);
/* Not reached */
}
- regs->pc = fixup;
+ regs->pc = entry->fixup;
regs->npc = regs->pc + 4;
- regs->u_regs [UREG_G2] = g2;
}
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
@@ -269,109 +274,9 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
}
}
-static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
- enum direction dir)
-{
- unsigned int reg;
- int check = (dir == load) ? VERIFY_READ : VERIFY_WRITE;
- int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
-
- if ((regs->pc | regs->npc) & 3)
- return 0;
-
- /* Must access_ok() in all the necessary places. */
-#define WINREG_ADDR(regnum) \
- ((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
-
- reg = (insn >> 25) & 0x1f;
- if (reg >= 16) {
- if (!access_ok(check, WINREG_ADDR(reg - 16), size))
- return -EFAULT;
- }
- reg = (insn >> 14) & 0x1f;
- if (reg >= 16) {
- if (!access_ok(check, WINREG_ADDR(reg - 16), size))
- return -EFAULT;
- }
- if (!(insn & 0x2000)) {
- reg = (insn & 0x1f);
- if (reg >= 16) {
- if (!access_ok(check, WINREG_ADDR(reg - 16), size))
- return -EFAULT;
- }
- }
-#undef WINREG_ADDR
- return 0;
-}
-
-static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
-{
- siginfo_t info;
-
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRALN;
- info.si_addr = (void __user *)safe_compute_effective_address(regs, insn);
- info.si_trapno = 0;
- send_sig_info(SIGBUS, &info, current);
-}
-
asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
- enum direction dir;
-
- if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
- (((insn >> 30) & 3) != 3))
- goto kill_user;
- dir = decode_direction(insn);
- if(!ok_for_user(regs, insn, dir)) {
- goto kill_user;
- } else {
- int err, size = decode_access_size(insn);
- unsigned long addr;
-
- if(floating_point_load_or_store_p(insn)) {
- printk("User FPU load/store unaligned unsupported.\n");
- goto kill_user;
- }
-
- addr = compute_effective_address(regs, insn);
- perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
- switch(dir) {
- case load:
- err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
- regs),
- size, (unsigned long *) addr,
- decode_signedness(insn));
- break;
-
- case store:
- err = do_int_store(((insn>>25)&0x1f), size,
- (unsigned long *) addr, regs);
- break;
-
- case both:
- /*
- * This was supported in 2.4. However, we question
- * the value of SWAP instruction across word boundaries.
- */
- printk("Unaligned SWAP unsupported.\n");
- err = -EFAULT;
- break;
-
- default:
- unaligned_panic("Impossible user unaligned trap.");
- goto out;
- }
- if (err)
- goto kill_user;
- else
- advance(regs);
- goto out;
- }
-
-kill_user:
- user_mna_trap_fault(regs, insn);
-out:
- ;
+ send_sig_fault(SIGBUS, BUS_ADRALN,
+ (void __user *)safe_compute_effective_address(regs, insn),
+ current);
}
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 8201c25e7669..23db2efda570 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* unaligned.c: Unaligned load/store trap handling with special
* cases for the kernel to do them more quickly.
@@ -11,18 +12,23 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <asm/asi.h>
#include <asm/ptrace.h>
#include <asm/pstate.h>
#include <asm/processor.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/perf_event.h>
#include <linux/ratelimit.h>
+#include <linux/context_tracking.h>
#include <asm/fpumacro.h>
#include <asm/cacheflush.h>
+#include <asm/setup.h>
+
+#include "entry.h"
+#include "kernel.h"
enum direction {
load, /* ld, ldd, ldh, ldsh */
@@ -163,17 +169,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
unsigned long compute_effective_address(struct pt_regs *regs,
unsigned int insn, unsigned int rd)
{
+ int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
- int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+ unsigned long addr;
if (insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd, from_kernel);
- return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
+ addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
maybe_flush_windows(rs1, rs2, rd, from_kernel);
- return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
+ addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
}
+
+ if (!from_kernel && test_thread_flag(TIF_32BIT))
+ addr &= 0xffffffff;
+
+ return addr;
}
/* This is just to make gcc think die_if_kernel does return... */
@@ -198,8 +210,8 @@ static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
if (size == 16) {
size = 8;
zero = (((long)(reg_num ?
- (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
- (unsigned)fetch_reg(reg_num + 1, regs);
+ (unsigned int)fetch_reg(reg_num, regs) : 0)) << 32) |
+ (unsigned int)fetch_reg(reg_num + 1, regs);
} else if (reg_num) {
src_val_p = fetch_reg_addr(reg_num, regs);
}
@@ -418,9 +430,6 @@ int handle_popc(u32 insn, struct pt_regs *regs)
extern void do_fpother(struct pt_regs *regs);
extern void do_privact(struct pt_regs *regs);
-extern void spitfire_data_access_exception(struct pt_regs *regs,
- unsigned long sfsr,
- unsigned long sfar);
extern void sun4v_data_access_exception(struct pt_regs *regs,
unsigned long addr,
unsigned long type_ctx);
@@ -428,24 +437,26 @@ extern void sun4v_data_access_exception(struct pt_regs *regs,
int handle_ldf_stq(u32 insn, struct pt_regs *regs)
{
unsigned long addr = compute_effective_address(regs, insn, 0);
- int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
+ int freg;
struct fpustate *f = FPUSTATE;
int asi = decode_asi(insn, regs);
- int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+ int flag;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
save_and_clear_fpu();
current_thread_info()->xfsr[0] &= ~0x1c000;
- if (freg & 3) {
- current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
- do_fpother(regs);
- return 0;
- }
if (insn & 0x200000) {
/* STQ */
u64 first = 0, second = 0;
+ freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
+ flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+ if (freg & 3) {
+ current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
+ do_fpother(regs);
+ return 0;
+ }
if (current_thread_info()->fpsaved[0] & flag) {
first = *(u64 *)&f->regs[freg];
second = *(u64 *)&f->regs[freg+2];
@@ -505,6 +516,12 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
case 0x100000: size = 4; break;
default: size = 2; break;
}
+ if (size == 1)
+ freg = (insn >> 25) & 0x1f;
+ else
+ freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
+ flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+
for (i = 0; i < size; i++)
data[i] = 0;
@@ -578,6 +595,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
{
+ enum ctx_state prev_state = exception_enter();
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn;
@@ -632,13 +650,16 @@ daex:
sun4v_data_access_exception(regs, sfar, sfsr);
else
spitfire_data_access_exception(regs, sfsr, sfar);
- return;
+ goto out;
}
advance(regs);
+out:
+ exception_exit(prev_state);
}
void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
{
+ enum ctx_state prev_state = exception_enter();
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn;
@@ -680,7 +701,9 @@ daex:
sun4v_data_access_exception(regs, sfar, sfsr);
else
spitfire_data_access_exception(regs, sfsr, sfar);
- return;
+ goto out;
}
advance(regs);
+out:
+ exception_exit(prev_state);
}
diff --git a/arch/sparc/kernel/uprobes.c b/arch/sparc/kernel/uprobes.c
new file mode 100644
index 000000000000..305017bec164
--- /dev/null
+++ b/arch/sparc/kernel/uprobes.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * User-space Probes (UProbes) for sparc
+ *
+ * Copyright (C) 2013 Oracle Inc.
+ *
+ * Authors:
+ * Jose E. Marchesi <jose.marchesi@oracle.com>
+ * Eric Saint Etienne <eric.saint.etienne@oracle.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/highmem.h>
+#include <linux/uprobes.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h> /* For struct task_struct */
+#include <linux/kdebug.h>
+
+#include <asm/cacheflush.h>
+
+#include "kernel.h"
+
+/* Compute the address of the breakpoint instruction and return it.
+ *
+ * Note that uprobe_get_swbp_addr is defined as a weak symbol in
+ * kernel/events/uprobe.c.
+ */
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+static void copy_to_page(struct page *page, unsigned long vaddr,
+ const void *src, int len)
+{
+ void *kaddr = kmap_atomic(page);
+
+ memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
+ kunmap_atomic(kaddr);
+}
+
+/* Fill in the xol area with the probed instruction followed by the
+ * single-step trap. Some fixups in the copied instruction are
+ * performed at this point.
+ *
+ * Note that uprobe_xol_copy is defined as a weak symbol in
+ * kernel/events/uprobe.c.
+ */
+void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ void *src, unsigned long len)
+{
+ const u32 stp_insn = UPROBE_STP_INSN;
+ u32 insn = *(u32 *) src;
+
+ /* Branches annulling their delay slot must be fixed to not do
+ * so. Clearing the annul bit on these instructions we can be
+ * sure the single-step breakpoint in the XOL slot will be
+ * executed.
+ */
+
+ u32 op = (insn >> 30) & 0x3;
+ u32 op2 = (insn >> 22) & 0x7;
+
+ if (op == 0 &&
+ (op2 == 1 || op2 == 2 || op2 == 3 || op2 == 5 || op2 == 6) &&
+ (insn & ANNUL_BIT) == ANNUL_BIT)
+ insn &= ~ANNUL_BIT;
+
+ copy_to_page(page, vaddr, &insn, len);
+ copy_to_page(page, vaddr+len, &stp_insn, 4);
+}
+
+
+/* Instruction analysis/validity.
+ *
+ * This function returns 0 on success or a -ve number on error.
+ */
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
+ struct mm_struct *mm, unsigned long addr)
+{
+ /* Any unsupported instruction? Then return -EINVAL */
+ return 0;
+}
+
+/* If INSN is a relative control transfer instruction, return the
+ * corrected branch destination value.
+ *
+ * Note that regs->tpc and regs->tnpc still hold the values of the
+ * program counters at the time of the single-step trap due to the
+ * execution of the UPROBE_STP_INSN at utask->xol_vaddr + 4.
+ *
+ */
+static unsigned long relbranch_fixup(u32 insn, struct uprobe_task *utask,
+ struct pt_regs *regs)
+{
+ /* Branch not taken, no mods necessary. */
+ if (regs->tnpc == regs->tpc + 0x4UL)
+ return utask->autask.saved_tnpc + 0x4UL;
+
+ /* The three cases are call, branch w/prediction,
+ * and traditional branch.
+ */
+ if ((insn & 0xc0000000) == 0x40000000 ||
+ (insn & 0xc1c00000) == 0x00400000 ||
+ (insn & 0xc1c00000) == 0x00800000) {
+ unsigned long real_pc = (unsigned long) utask->vaddr;
+ unsigned long ixol_addr = utask->xol_vaddr;
+
+ /* The instruction did all the work for us
+ * already, just apply the offset to the correct
+ * instruction location.
+ */
+ return (real_pc + (regs->tnpc - ixol_addr));
+ }
+
+ /* It is jmpl or some other absolute PC modification instruction,
+ * leave NPC as-is.
+ */
+ return regs->tnpc;
+}
+
+/* If INSN is an instruction which writes its PC location
+ * into a destination register, fix that up.
+ */
+static int retpc_fixup(struct pt_regs *regs, u32 insn,
+ unsigned long real_pc)
+{
+ unsigned long *slot = NULL;
+ int rc = 0;
+
+ /* Simplest case is 'call', which always uses %o7 */
+ if ((insn & 0xc0000000) == 0x40000000)
+ slot = &regs->u_regs[UREG_I7];
+
+ /* 'jmpl' encodes the register inside of the opcode */
+ if ((insn & 0xc1f80000) == 0x81c00000) {
+ unsigned long rd = ((insn >> 25) & 0x1f);
+
+ if (rd <= 15) {
+ slot = &regs->u_regs[rd];
+ } else {
+ unsigned long fp = regs->u_regs[UREG_FP];
+ /* Hard case, it goes onto the stack. */
+ flushw_all();
+
+ rd -= 16;
+ if (test_thread_64bit_stack(fp)) {
+ unsigned long __user *uslot =
+ (unsigned long __user *) (fp + STACK_BIAS) + rd;
+ rc = __put_user(real_pc, uslot);
+ } else {
+ unsigned int __user *uslot = (unsigned int
+ __user *) fp + rd;
+ rc = __put_user((u32) real_pc, uslot);
+ }
+ }
+ }
+ if (slot != NULL)
+ *slot = real_pc;
+ return rc;
+}
+
+/* Single-stepping can be avoided for certain instructions: NOPs and
+ * instructions that can be emulated. This function determines
+ * whether the instruction where the uprobe is installed falls in one
+ * of these cases and emulates it.
+ *
+ * This function returns true if the single-stepping can be skipped,
+ * false otherwise.
+ */
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ /* We currently only emulate NOP instructions.
+ */
+
+ if (auprobe->ixol == (1 << 24)) {
+ regs->tnpc += 4;
+ regs->tpc += 4;
+ return true;
+ }
+
+ return false;
+}
+
+/* Prepare to execute out of line. At this point
+ * current->utask->xol_vaddr points to an allocated XOL slot properly
+ * initialized with the original instruction and the single-stepping
+ * trap instruction.
+ *
+ * This function returns 0 on success, any other number on error.
+ */
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+ struct arch_uprobe_task *autask = &current->utask->autask;
+
+ /* Save the current program counters so they can be restored
+ * later.
+ */
+ autask->saved_tpc = regs->tpc;
+ autask->saved_tnpc = regs->tnpc;
+
+ /* Adjust PC and NPC so the first instruction in the XOL slot
+ * will be executed by the user task.
+ */
+ instruction_pointer_set(regs, utask->xol_vaddr);
+
+ return 0;
+}
+
+/* Prepare to resume execution after the single-step. Called after
+ * single-stepping. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction.
+ *
+ * This function returns 0 on success, any other number on error.
+ */
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+ struct arch_uprobe_task *autask = &utask->autask;
+ u32 insn = auprobe->ixol;
+ int rc = 0;
+
+ if (utask->state == UTASK_SSTEP_ACK) {
+ regs->tnpc = relbranch_fixup(insn, utask, regs);
+ regs->tpc = autask->saved_tnpc;
+ rc = retpc_fixup(regs, insn, (unsigned long) utask->vaddr);
+ } else {
+ regs->tnpc = utask->vaddr+4;
+ regs->tpc = autask->saved_tnpc+4;
+ }
+ return rc;
+}
+
+/* Handler for uprobe traps. This is called from the traps table and
+ * triggers the proper die notification.
+ */
+asmlinkage void uprobe_trap(struct pt_regs *regs,
+ unsigned long trap_level)
+{
+ BUG_ON(trap_level != 0x173 && trap_level != 0x174);
+
+ /* We are only interested in user-mode code. Uprobe traps
+ * shall not be present in kernel code.
+ */
+ if (!user_mode(regs)) {
+ local_irq_enable();
+ bad_trap(regs, trap_level);
+ return;
+ }
+
+ /* trap_level == 0x173 --> ta 0x73
+ * trap_level == 0x174 --> ta 0x74
+ */
+ if (notify_die((trap_level == 0x173) ? DIE_BPT : DIE_SSTEP,
+ (trap_level == 0x173) ? "bpt" : "sstep",
+ regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
+ bad_trap(regs, trap_level);
+}
+
+/* Callback routine for handling die notifications.
+*/
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ int ret = NOTIFY_DONE;
+ struct die_args *args = (struct die_args *)data;
+
+ /* We are only interested in userspace traps */
+ if (args->regs && !user_mode(args->regs))
+ return NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_BPT:
+ if (uprobe_pre_sstep_notifier(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+
+ case DIE_SSTEP:
+ if (uprobe_post_sstep_notifier(args->regs))
+ ret = NOTIFY_STOP;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* This function gets called when a XOL instruction either gets
+ * trapped or the thread has a fatal signal, so reset the instruction
+ * pointer to its probed address.
+ */
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ instruction_pointer_set(regs, utask->vaddr);
+}
+
+/* If xol insn itself traps and generates a signal(Say,
+ * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
+ * instruction jumps back to its own address.
+ */
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+ return false;
+}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
+ struct pt_regs *regs)
+{
+ unsigned long orig_ret_vaddr = regs->u_regs[UREG_I7];
+
+ regs->u_regs[UREG_I7] = trampoline_vaddr-8;
+
+ return orig_ret_vaddr + 8;
+}
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
new file mode 100644
index 000000000000..e4cee7be5cd0
--- /dev/null
+++ b/arch/sparc/kernel/urtt_fill.S
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/thread_info.h>
+#include <asm/trap_block.h>
+#include <asm/spitfire.h>
+#include <asm/ptrace.h>
+#include <asm/head.h>
+
+ .text
+ .align 8
+ .globl user_rtt_fill_fixup_common
+user_rtt_fill_fixup_common:
+ rdpr %cwp, %g1
+ add %g1, 1, %g1
+ wrpr %g1, 0x0, %cwp
+
+ rdpr %wstate, %g2
+ sll %g2, 3, %g2
+ wrpr %g2, 0x0, %wstate
+
+ /* We know %canrestore and %otherwin are both zero. */
+
+ sethi %hi(sparc64_kern_pri_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
+ mov PRIMARY_CONTEXT, %g1
+
+661: stxa %g2, [%g1] ASI_DMMU
+ .section .sun4v_1insn_patch, "ax"
+ .word 661b
+ stxa %g2, [%g1] ASI_MMU
+ .previous
+
+ sethi %hi(KERNBASE), %g1
+ flush %g1
+
+ mov %g4, %l4
+ mov %g5, %l5
+ brnz,pn %g3, 1f
+ mov %g3, %l3
+
+ or %g4, FAULT_CODE_WINFIXUP, %g4
+ stb %g4, [%g6 + TI_FAULT_CODE]
+ stx %g5, [%g6 + TI_FAULT_ADDR]
+1:
+ mov %g6, %l1
+ wrpr %g0, 0x0, %tl
+
+661: nop
+ .section .sun4v_1insn_patch, "ax"
+ .word 661b
+ SET_GL(0)
+ .previous
+
+661: wrpr %g0, RTRAP_PSTATE, %pstate
+ .section .sun_m7_1insn_patch, "ax"
+ .word 661b
+ /* Re-enable PSTATE.mcde to maintain ADI security */
+ wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
+ .previous
+
+ mov %l1, %g6
+ ldx [%g6 + TI_TASK], %g4
+ LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
+
+ brnz,pn %l3, 1f
+ nop
+
+ call do_sparc64_fault
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ nop
+
+1: cmp %g3, 2
+ bne,pn %xcc, 2f
+ nop
+
+ sethi %hi(tlb_type), %g1
+ lduw [%g1 + %lo(tlb_type)], %g1
+ cmp %g1, 3
+ bne,pt %icc, 1f
+ add %sp, PTREGS_OFF, %o0
+ mov %l4, %o2
+ call sun4v_do_mna
+ mov %l5, %o1
+ ba,a,pt %xcc, rtrap
+1: mov %l4, %o1
+ mov %l5, %o2
+ call mem_address_unaligned
+ nop
+ ba,a,pt %xcc, rtrap
+
+2: sethi %hi(tlb_type), %g1
+ mov %l4, %o1
+ lduw [%g1 + %lo(tlb_type)], %g1
+ mov %l5, %o2
+ cmp %g1, 3
+ bne,pt %icc, 1f
+ add %sp, PTREGS_OFF, %o0
+ call sun4v_data_access_exception
+ nop
+ ba,a,pt %xcc, rtrap
+ nop
+
+1: call spitfire_data_access_exception
+ nop
+ ba,a,pt %xcc, rtrap
diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S
index b7f0f3f3a909..7a2d9a9bea59 100644
--- a/arch/sparc/kernel/utrap.S
+++ b/arch/sparc/kernel/utrap.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
.globl utrap_trap
.type utrap_trap,#function
utrap_trap: /* %g3=handler,%g4=level */
@@ -11,8 +12,7 @@ utrap_trap: /* %g3=handler,%g4=level */
mov %l4, %o1
call bad_trap
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
invoke_utrap:
sllx %g3, 3, %g3
diff --git a/arch/sparc/kernel/vdso.c b/arch/sparc/kernel/vdso.c
new file mode 100644
index 000000000000..0e27437eb97b
--- /dev/null
+++ b/arch/sparc/kernel/vdso.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
+ * Copyright 2003 Andi Kleen, SuSE Labs.
+ *
+ * Thanks to hpa@transmeta.com for some useful hint.
+ * Special thanks to Ingo Molnar for his early experience with
+ * a different vsyscall implementation for Linux/IA32 and for the name.
+ */
+
+#include <linux/time.h>
+#include <linux/timekeeper_internal.h>
+
+#include <asm/vvar.h>
+
+void update_vsyscall_tz(void)
+{
+ if (unlikely(vvar_data == NULL))
+ return;
+
+ vvar_data->tz_minuteswest = sys_tz.tz_minuteswest;
+ vvar_data->tz_dsttime = sys_tz.tz_dsttime;
+}
+
+void update_vsyscall(struct timekeeper *tk)
+{
+ struct vvar_data *vdata = vvar_data;
+
+ if (unlikely(vdata == NULL))
+ return;
+
+ vvar_write_begin(vdata);
+ vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
+ vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
+ vdata->clock.mask = tk->tkr_mono.mask;
+ vdata->clock.mult = tk->tkr_mono.mult;
+ vdata->clock.shift = tk->tkr_mono.shift;
+
+ vdata->wall_time_sec = tk->xtime_sec;
+ vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec;
+
+ vdata->monotonic_time_sec = tk->xtime_sec +
+ tk->wall_to_monotonic.tv_sec;
+ vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec +
+ (tk->wall_to_monotonic.tv_nsec <<
+ tk->tkr_mono.shift);
+
+ while (vdata->monotonic_time_snsec >=
+ (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
+ vdata->monotonic_time_snsec -=
+ ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
+ vdata->monotonic_time_sec++;
+ }
+
+ vdata->wall_time_coarse_sec = tk->xtime_sec;
+ vdata->wall_time_coarse_nsec =
+ (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
+
+ vdata->monotonic_time_coarse_sec =
+ vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
+ vdata->monotonic_time_coarse_nsec =
+ vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
+
+ while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
+ vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
+ vdata->monotonic_time_coarse_sec++;
+ }
+
+ vvar_write_end(vdata);
+}
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 8647fcc5ca6c..1a1a9d6b8f2e 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* vio.c: Virtual I/O channel devices probing infrastructure.
*
* Copyright (c) 2003-2005 IBM Corp.
@@ -45,10 +46,18 @@ static const struct vio_device_id *vio_match_device(
return NULL;
}
-static int vio_bus_match(struct device *dev, struct device_driver *drv)
+static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct vio_dev *vio_dev = to_vio_dev(dev);
+
+ add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, vio_dev->compat);
+ return 0;
+}
+
+static int vio_bus_match(struct device *dev, const struct device_driver *drv)
{
struct vio_dev *vio_dev = to_vio_dev(dev);
- struct vio_driver *vio_drv = to_vio_driver(drv);
+ const struct vio_driver *vio_drv = to_vio_driver(drv);
const struct vio_device_id *matches = vio_drv->id_table;
if (!matches)
@@ -62,26 +71,42 @@ static int vio_device_probe(struct device *dev)
struct vio_dev *vdev = to_vio_dev(dev);
struct vio_driver *drv = to_vio_driver(dev->driver);
const struct vio_device_id *id;
- int error = -ENODEV;
- if (drv->probe) {
- id = vio_match_device(drv->id_table, vdev);
- if (id)
- error = drv->probe(vdev, id);
+ if (!drv->probe)
+ return -ENODEV;
+
+ id = vio_match_device(drv->id_table, vdev);
+ if (!id)
+ return -ENODEV;
+
+ /* alloc irqs (unless the driver specified not to) */
+ if (!drv->no_irq) {
+ if (vdev->tx_irq == 0 && vdev->tx_ino != ~0UL)
+ vdev->tx_irq = sun4v_build_virq(vdev->cdev_handle,
+ vdev->tx_ino);
+
+ if (vdev->rx_irq == 0 && vdev->rx_ino != ~0UL)
+ vdev->rx_irq = sun4v_build_virq(vdev->cdev_handle,
+ vdev->rx_ino);
}
- return error;
+ return drv->probe(vdev, id);
}
-static int vio_device_remove(struct device *dev)
+static void vio_device_remove(struct device *dev)
{
struct vio_dev *vdev = to_vio_dev(dev);
struct vio_driver *drv = to_vio_driver(dev->driver);
- if (drv->remove)
- return drv->remove(vdev);
+ if (drv->remove) {
+ /*
+ * Ideally, we would remove/deallocate tx/rx virqs
+ * here - however, there are currently no support
+ * routines to do so at the moment. TBD
+ */
- return 1;
+ drv->remove(vdev);
+ }
}
static ssize_t devspec_show(struct device *dev,
@@ -97,6 +122,7 @@ static ssize_t devspec_show(struct device *dev,
return sprintf(buf, "%s\n", str);
}
+static DEVICE_ATTR_RO(devspec);
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -104,16 +130,29 @@ static ssize_t type_show(struct device *dev,
struct vio_dev *vdev = to_vio_dev(dev);
return sprintf(buf, "%s\n", vdev->type);
}
+static DEVICE_ATTR_RO(type);
-static struct device_attribute vio_dev_attrs[] = {
- __ATTR_RO(devspec),
- __ATTR_RO(type),
- __ATTR_NULL
-};
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const struct vio_dev *vdev = to_vio_dev(dev);
+
+ return sprintf(buf, "vio:T%sS%s\n", vdev->type, vdev->compat);
+}
+static DEVICE_ATTR_RO(modalias);
-static struct bus_type vio_bus_type = {
+static struct attribute *vio_dev_attrs[] = {
+ &dev_attr_devspec.attr,
+ &dev_attr_type.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+ };
+ATTRIBUTE_GROUPS(vio_dev);
+
+static const struct bus_type vio_bus_type = {
.name = "vio",
- .dev_attrs = vio_dev_attrs,
+ .dev_groups = vio_dev_groups,
+ .uevent = vio_hotplug,
.match = vio_bus_match,
.probe = vio_device_probe,
.remove = vio_device_remove,
@@ -152,7 +191,7 @@ show_pciobppath_attr(struct device *dev, struct device_attribute *attr,
vdev = to_vio_dev(dev);
dp = vdev->dp;
- return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name);
+ return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp);
}
static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH,
@@ -163,11 +202,59 @@ static struct device_node *cdev_node;
static struct vio_dev *root_vdev;
static u64 cdev_cfg_handle;
+static const u64 *vio_cfg_handle(struct mdesc_handle *hp, u64 node)
+{
+ const u64 *cfg_handle = NULL;
+ u64 a;
+
+ mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
+ u64 target;
+
+ target = mdesc_arc_target(hp, a);
+ cfg_handle = mdesc_get_property(hp, target,
+ "cfg-handle", NULL);
+ if (cfg_handle)
+ break;
+ }
+
+ return cfg_handle;
+}
+
+/**
+ * vio_vdev_node() - Find VDEV node in MD
+ * @hp: Handle to the MD
+ * @vdev: Pointer to VDEV
+ *
+ * Find the node in the current MD which matches the given vio_dev. This
+ * must be done dynamically since the node value can change if the MD
+ * is updated.
+ *
+ * NOTE: the MD must be locked, using mdesc_grab(), when calling this routine
+ *
+ * Return: The VDEV node in MDESC
+ */
+u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev)
+{
+ u64 node;
+
+ if (vdev == NULL)
+ return MDESC_NODE_NULL;
+
+ node = mdesc_get_node(hp, (const char *)vdev->node_name,
+ &vdev->md_node_info);
+
+ return node;
+}
+EXPORT_SYMBOL(vio_vdev_node);
+
static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp,
struct vio_dev *vdev)
{
u64 a;
+ vdev->tx_ino = ~0UL;
+ vdev->rx_ino = ~0UL;
+ vdev->channel_id = ~0UL;
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
const u64 *chan_id;
const u64 *irq;
@@ -177,27 +264,38 @@ static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp,
irq = mdesc_get_property(hp, target, "tx-ino", NULL);
if (irq)
- vdev->tx_irq = sun4v_build_virq(cdev_cfg_handle, *irq);
+ vdev->tx_ino = *irq;
irq = mdesc_get_property(hp, target, "rx-ino", NULL);
if (irq)
- vdev->rx_irq = sun4v_build_virq(cdev_cfg_handle, *irq);
+ vdev->rx_ino = *irq;
chan_id = mdesc_get_property(hp, target, "id", NULL);
if (chan_id)
vdev->channel_id = *chan_id;
}
+
+ vdev->cdev_handle = cdev_cfg_handle;
+}
+
+int vio_set_intr(unsigned long dev_ino, int state)
+{
+ int err;
+
+ err = sun4v_vintr_set_valid(cdev_cfg_handle, dev_ino, state);
+ return err;
}
+EXPORT_SYMBOL(vio_set_intr);
static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
+ const char *node_name,
struct device *parent)
{
- const char *type, *compat, *bus_id_name;
+ const char *type, *compat;
struct device_node *dp;
struct vio_dev *vdev;
int err, tlen, clen;
const u64 *id, *cfg_handle;
- u64 a;
type = mdesc_get_property(hp, mp, "device-type", &tlen);
if (!type) {
@@ -207,7 +305,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
tlen = strlen(type) + 1;
}
}
- if (tlen > VIO_MAX_TYPE_LEN) {
+ if (tlen > VIO_MAX_TYPE_LEN || strlen(type) >= VIO_MAX_TYPE_LEN) {
printk(KERN_ERR "VIO: Type string [%s] is too long.\n",
type);
return NULL;
@@ -215,31 +313,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
id = mdesc_get_property(hp, mp, "id", NULL);
- cfg_handle = NULL;
- mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
- u64 target;
-
- target = mdesc_arc_target(hp, a);
- cfg_handle = mdesc_get_property(hp, target,
- "cfg-handle", NULL);
- if (cfg_handle)
- break;
- }
-
- bus_id_name = type;
- if (!strcmp(type, "domain-services-port"))
- bus_id_name = "ds";
-
- /*
- * 20 char is the old driver-core name size limit, which is no more.
- * This check can probably be removed after review and possible
- * adaption of the vio users name length handling.
- */
- if (strlen(bus_id_name) >= 20 - 4) {
- printk(KERN_ERR "VIO: bus_id_name [%s] is too long.\n",
- bus_id_name);
- return NULL;
- }
+ cfg_handle = vio_cfg_handle(hp, mp);
compat = mdesc_get_property(hp, mp, "device-type", &clen);
if (!compat) {
@@ -264,22 +338,23 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
memset(vdev->compat, 0, sizeof(vdev->compat));
vdev->compat_len = clen;
- vdev->channel_id = ~0UL;
- vdev->tx_irq = ~0;
- vdev->rx_irq = ~0;
+ vdev->port_id = ~0UL;
+ vdev->tx_irq = 0;
+ vdev->rx_irq = 0;
vio_fill_channel_info(hp, mp, vdev);
if (!id) {
- dev_set_name(&vdev->dev, "%s", bus_id_name);
+ dev_set_name(&vdev->dev, "%s", type);
vdev->dev_no = ~(u64)0;
} else if (!cfg_handle) {
- dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
+ dev_set_name(&vdev->dev, "%s-%llu", type, *id);
vdev->dev_no = *id;
} else {
- dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
+ dev_set_name(&vdev->dev, "%s-%llu-%llu", type,
*cfg_handle, *id);
vdev->dev_no = *cfg_handle;
+ vdev->port_id = *id;
}
vdev->dev.parent = parent;
@@ -289,25 +364,41 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
if (parent == NULL) {
dp = cdev_node;
} else if (to_vio_dev(parent) == root_vdev) {
- dp = of_get_next_child(cdev_node, NULL);
- while (dp) {
- if (!strcmp(dp->type, type))
+ for_each_child_of_node(cdev_node, dp) {
+ if (of_node_is_type(dp, type))
break;
-
- dp = of_get_next_child(cdev_node, dp);
}
} else {
dp = to_vio_dev(parent)->dp;
}
vdev->dp = dp;
- printk(KERN_INFO "VIO: Adding device %s\n", dev_name(&vdev->dev));
+ /*
+ * node_name is NULL for the parent/channel-devices node and
+ * the parent doesn't require the MD node info.
+ */
+ if (node_name != NULL) {
+ (void) snprintf(vdev->node_name, VIO_MAX_NAME_LEN, "%s",
+ node_name);
+
+ err = mdesc_get_node_info(hp, mp, node_name,
+ &vdev->md_node_info);
+ if (err) {
+ pr_err("VIO: Could not get MD node info %s, err=%d\n",
+ dev_name(&vdev->dev), err);
+ kfree(vdev);
+ return NULL;
+ }
+ }
+
+ pr_info("VIO: Adding device %s (tx_ino = %llx, rx_ino = %llx)\n",
+ dev_name(&vdev->dev), vdev->tx_ino, vdev->rx_ino);
err = device_register(&vdev->dev);
if (err) {
printk(KERN_ERR "VIO: Could not register device %s, err=%d\n",
dev_name(&vdev->dev), err);
- kfree(vdev);
+ put_device(&vdev->dev);
return NULL;
}
if (vdev->dp)
@@ -317,32 +408,50 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
return vdev;
}
-static void vio_add(struct mdesc_handle *hp, u64 node)
+static void vio_add(struct mdesc_handle *hp, u64 node,
+ const char *node_name)
{
- (void) vio_create_one(hp, node, &root_vdev->dev);
+ (void) vio_create_one(hp, node, node_name, &root_vdev->dev);
}
-static int vio_md_node_match(struct device *dev, void *arg)
+struct vio_remove_node_data {
+ struct mdesc_handle *hp;
+ u64 node;
+};
+
+static int vio_md_node_match(struct device *dev, const void *arg)
{
struct vio_dev *vdev = to_vio_dev(dev);
+ const struct vio_remove_node_data *node_data;
+ u64 node;
- if (vdev->mp == (u64) arg)
- return 1;
+ node_data = (const struct vio_remove_node_data *)arg;
- return 0;
+ node = vio_vdev_node(node_data->hp, vdev);
+
+ if (node == node_data->node)
+ return 1;
+ else
+ return 0;
}
-static void vio_remove(struct mdesc_handle *hp, u64 node)
+static void vio_remove(struct mdesc_handle *hp, u64 node, const char *node_name)
{
+ struct vio_remove_node_data node_data;
struct device *dev;
- dev = device_find_child(&root_vdev->dev, (void *) node,
+ node_data.hp = hp;
+ node_data.node = node;
+
+ dev = device_find_child(&root_vdev->dev, (void *)&node_data,
vio_md_node_match);
if (dev) {
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
device_unregister(dev);
put_device(dev);
+ } else {
+ pr_err("VIO: %s node not found in MDESC\n", node_name);
}
}
@@ -357,7 +466,8 @@ static struct mdesc_notifier_client vio_device_notifier = {
* under "openboot" that we should not mess with as aparently that is
* reserved exclusively for OBP use.
*/
-static void vio_add_ds(struct mdesc_handle *hp, u64 node)
+static void vio_add_ds(struct mdesc_handle *hp, u64 node,
+ const char *node_name)
{
int found;
u64 a;
@@ -374,7 +484,7 @@ static void vio_add_ds(struct mdesc_handle *hp, u64 node)
}
if (found)
- (void) vio_create_one(hp, node, &root_vdev->dev);
+ (void) vio_create_one(hp, node, node_name, &root_vdev->dev);
}
static struct mdesc_notifier_client vio_ds_notifier = {
@@ -441,7 +551,7 @@ static int __init vio_init(void)
cdev_cfg_handle = *cfg_handle;
- root_vdev = vio_create_one(hp, root, NULL);
+ root_vdev = vio_create_one(hp, root, NULL, NULL);
err = -ENODEV;
if (!root_vdev) {
printk(KERN_ERR "VIO: Could not create root device.\n");
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index f8e7dd53e1c7..8fb2e7ca5015 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* viohs.c: LDOM Virtual I/O handshake helper layer.
*
* Copyright (C) 2007 David S. Miller <davem@davemloft.net>
@@ -8,6 +9,7 @@
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include <linux/slab.h>
#include <asm/ldc.h>
@@ -178,11 +180,17 @@ static int send_dreg(struct vio_driver_state *vio)
struct vio_dring_register pkt;
char all[sizeof(struct vio_dring_register) +
(sizeof(struct ldc_trans_cookie) *
- dr->ncookies)];
+ VIO_MAX_RING_COOKIES)];
} u;
+ size_t bytes = sizeof(struct vio_dring_register) +
+ (sizeof(struct ldc_trans_cookie) *
+ dr->ncookies);
int i;
- memset(&u, 0, sizeof(u));
+ if (WARN_ON(bytes > sizeof(u)))
+ return -EINVAL;
+
+ memset(&u, 0, bytes);
init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
u.pkt.dring_ident = 0;
u.pkt.num_descr = dr->num_entries;
@@ -204,7 +212,7 @@ static int send_dreg(struct vio_driver_state *vio)
(unsigned long long) u.pkt.cookies[i].cookie_size);
}
- return send_ctrl(vio, &u.pkt.tag, sizeof(u));
+ return send_ctrl(vio, &u.pkt.tag, bytes);
}
static int send_rdx(struct vio_driver_state *vio)
@@ -222,6 +230,9 @@ static int send_rdx(struct vio_driver_state *vio)
static int send_attr(struct vio_driver_state *vio)
{
+ if (!vio->ops)
+ return -EINVAL;
+
return vio->ops->send_attr(vio);
}
@@ -282,6 +293,7 @@ static int process_ver_info(struct vio_driver_state *vio,
ver.minor = vap->minor;
pkt->minor = ver.minor;
pkt->tag.stype = VIO_SUBTYPE_ACK;
+ pkt->dev_class = vio->dev_class;
viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
pkt->major, pkt->minor);
err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
@@ -373,6 +385,9 @@ static int process_attr(struct vio_driver_state *vio, void *pkt)
if (!(vio->hs_state & VIO_HS_GOTVERS))
return handshake_failure(vio);
+ if (!vio->ops)
+ return 0;
+
err = vio->ops->handle_attr(vio, pkt);
if (err < 0) {
return handshake_failure(vio);
@@ -387,6 +402,7 @@ static int process_attr(struct vio_driver_state *vio, void *pkt)
vio->hs_state |= VIO_HS_SENT_DREG;
}
}
+
return 0;
}
@@ -412,7 +428,7 @@ static int process_dreg_info(struct vio_driver_state *vio,
struct vio_dring_register *pkt)
{
struct vio_dring_state *dr;
- int i, len;
+ int i;
viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
"ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
@@ -426,6 +442,13 @@ static int process_dreg_info(struct vio_driver_state *vio,
if (vio->dr_state & VIO_DR_STATE_RXREG)
goto send_nack;
+ /* v1.6 and higher, ACK with desired, supported mode, or NACK */
+ if (vio_version_after_eq(vio, 1, 6)) {
+ if (!(pkt->options & VIO_TX_DRING))
+ goto send_nack;
+ pkt->options = VIO_TX_DRING;
+ }
+
BUG_ON(vio->desc_buf);
vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
@@ -453,12 +476,13 @@ static int process_dreg_info(struct vio_driver_state *vio,
pkt->tag.stype = VIO_SUBTYPE_ACK;
pkt->dring_ident = ++dr->ident;
- viodbg(HS, "SEND DRING_REG ACK ident[%llx]\n",
- (unsigned long long) pkt->dring_ident);
+ viodbg(HS, "SEND DRING_REG ACK ident[%llx] "
+ "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
+ (unsigned long long) pkt->dring_ident,
+ pkt->num_descr, pkt->descr_size, pkt->options,
+ pkt->num_cookies);
- len = (sizeof(*pkt) +
- (dr->ncookies * sizeof(struct ldc_trans_cookie)));
- if (send_ctrl(vio, &pkt->tag, len) < 0)
+ if (send_ctrl(vio, &pkt->tag, struct_size(pkt, cookies, dr->ncookies)) < 0)
goto send_nack;
vio->dr_state |= VIO_DR_STATE_RXREG;
@@ -636,10 +660,13 @@ int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
err = process_unknown(vio, pkt);
break;
}
+
if (!err &&
vio->hs_state != prev_state &&
- (vio->hs_state & VIO_HS_COMPLETE))
- vio->ops->handshake_complete(vio);
+ (vio->hs_state & VIO_HS_COMPLETE)) {
+ if (vio->ops)
+ vio->ops->handshake_complete(vio);
+ }
return err;
}
@@ -714,7 +741,7 @@ int vio_ldc_alloc(struct vio_driver_state *vio,
cfg.tx_irq = vio->vdev->tx_irq;
cfg.rx_irq = vio->vdev->rx_irq;
- lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg);
+ lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
if (IS_ERR(lp))
return PTR_ERR(lp);
@@ -746,7 +773,7 @@ void vio_port_up(struct vio_driver_state *vio)
err = 0;
if (state == LDC_STATE_INIT) {
- err = ldc_bind(vio->lp, vio->name);
+ err = ldc_bind(vio->lp);
if (err)
printk(KERN_WARNING "%s: Port %lu bind failed, "
"err=%d\n",
@@ -754,7 +781,11 @@ void vio_port_up(struct vio_driver_state *vio)
}
if (!err) {
- err = ldc_connect(vio->lp);
+ if (ldc_mode(vio->lp) == LDC_MODE_RAW)
+ ldc_set_state(vio->lp, LDC_STATE_CONNECTED);
+ else
+ err = ldc_connect(vio->lp);
+
if (err)
printk(KERN_WARNING "%s: Port %lu connect failed, "
"err=%d\n",
@@ -771,9 +802,9 @@ void vio_port_up(struct vio_driver_state *vio)
}
EXPORT_SYMBOL(vio_port_up);
-static void vio_port_timer(unsigned long _arg)
+static void vio_port_timer(struct timer_list *t)
{
- struct vio_driver_state *vio = (struct vio_driver_state *) _arg;
+ struct vio_driver_state *vio = timer_container_of(vio, t, timer);
vio_port_up(vio);
}
@@ -788,16 +819,21 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
case VDEV_NETWORK_SWITCH:
case VDEV_DISK:
case VDEV_DISK_SERVER:
+ case VDEV_CONSOLE_CON:
break;
default:
return -EINVAL;
}
- if (!ops->send_attr ||
- !ops->handle_attr ||
- !ops->handshake_complete)
- return -EINVAL;
+ if (dev_class == VDEV_NETWORK ||
+ dev_class == VDEV_NETWORK_SWITCH ||
+ dev_class == VDEV_DISK ||
+ dev_class == VDEV_DISK_SERVER) {
+ if (!ops || !ops->send_attr || !ops->handle_attr ||
+ !ops->handshake_complete)
+ return -EINVAL;
+ }
if (!ver_table || ver_table_size < 0)
return -EINVAL;
@@ -817,7 +853,7 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
vio->ops = ops;
- setup_timer(&vio->timer, vio_port_timer, (unsigned long) vio);
+ timer_setup(&vio->timer, vio_port_timer, 0);
return 0;
}
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index c096c624ac4d..64ed80ed6cc2 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* visemul.c: Emulation of VIS instructions.
*
* Copyright (C) 2006 David S. Miller (davem@davemloft.net)
@@ -10,7 +11,7 @@
#include <asm/ptrace.h>
#include <asm/pstate.h>
#include <asm/fpumacro.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/cacheflush.h>
/* OPF field of various VIS instructions. */
@@ -30,7 +31,7 @@
/* 001001011 - two 32-bit merges */
#define FPMERGE_OPF 0x04b
-/* 000110001 - 8-by-16-bit partitoned product */
+/* 000110001 - 8-by-16-bit partitioned product */
#define FMUL8x16_OPF 0x031
/* 000110011 - 8-by-16-bit upper alpha partitioned product */
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 0bacceb19150..f1b86eb30340 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* ld script for sparc32/sparc64 kernel */
#include <asm-generic/vmlinux.lds.h>
@@ -33,20 +34,31 @@ ENTRY(_start)
jiffies = jiffies_64;
#endif
+#ifdef CONFIG_SPARC64
+ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
+#endif
+
SECTIONS
{
- /* swapper_low_pmd_dir is sparc64 only */
- swapper_low_pmd_dir = 0x0000000000402000;
+#ifdef CONFIG_SPARC64
+ swapper_pg_dir = 0x0000000000402000;
+#endif
. = INITIAL_ADDRESS;
.text TEXTSTART :
{
_text = .;
HEAD_TEXT
+ ALIGN_FUNCTION();
+#ifdef CONFIG_SPARC64
+ /* Match text section symbols in head_64.S first */
+ *head_64.o(.text)
+#endif
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
*(.gnu.warning)
} = 0
_etext = .;
@@ -59,7 +71,7 @@ SECTIONS
.data1 : {
*(.data1)
}
- RW_DATA_SECTION(SMP_CACHE_BYTES, 0, THREAD_SIZE)
+ RW_DATA(SMP_CACHE_BYTES, 0, THREAD_SIZE)
/* End of data section */
_edata = .;
@@ -70,7 +82,6 @@ SECTIONS
__stop___fixup = .;
}
EXCEPTION_TABLE(16)
- NOTES
. = ALIGN(PAGE_SIZE);
__init_begin = ALIGN(PAGE_SIZE);
@@ -137,15 +148,50 @@ SECTIONS
*(.pause_3insn_patch)
__pause_3insn_patch_end = .;
}
+ .sun_m7_1insn_patch : {
+ __sun_m7_1insn_patch = .;
+ *(.sun_m7_1insn_patch)
+ __sun_m7_1insn_patch_end = .;
+ }
+ .sun_m7_2insn_patch : {
+ __sun_m7_2insn_patch = .;
+ *(.sun_m7_2insn_patch)
+ __sun_m7_2insn_patch_end = .;
+ }
+ .get_tick_patch : {
+ __get_tick_patch = .;
+ *(.get_tick_patch)
+ __get_tick_patch_end = .;
+ }
+ .pud_huge_patch : {
+ __pud_huge_patch = .;
+ *(.pud_huge_patch)
+ __pud_huge_patch_end = .;
+ }
+ .fast_win_ctrl_1insn_patch : {
+ __fast_win_ctrl_1insn_patch = .;
+ *(.fast_win_ctrl_1insn_patch)
+ __fast_win_ctrl_1insn_patch_end = .;
+ }
PERCPU_SECTION(SMP_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
+ .exit.text : {
+ EXIT_TEXT
+ }
+
+ .exit.data : {
+ EXIT_DATA
+ }
+
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
BSS_SECTION(0, 0, 0)
_end = . ;
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
}
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
index 3107381e576d..8f20862ccc83 100644
--- a/arch/sparc/kernel/windows.c
+++ b/arch/sparc/kernel/windows.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* windows.c: Routines to deal with register window management
* at the C-code level.
*
@@ -10,7 +11,10 @@
#include <linux/mm.h>
#include <linux/smp.h>
-#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/uaccess.h>
+
+#include "kernel.h"
/* Do save's until all user register windows are out of the cpu. */
void flush_user_windows(void)
@@ -117,8 +121,10 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
if ((sp & 7) ||
copy_to_user((char __user *) sp, &tp->reg_window[window],
- sizeof(struct reg_window32)))
- do_exit(SIGILL);
+ sizeof(struct reg_window32))) {
+ force_exit_sig(SIGILL);
+ return;
+ }
}
tp->w_saved = 0;
}
diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
index 1e67ce958369..448accee090f 100644
--- a/arch/sparc/kernel/winfixup.S
+++ b/arch/sparc/kernel/winfixup.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* winfixup.S: Handle cases where user stack pointer is found to be bogus.
*
* Copyright (C) 1997, 2006 David S. Miller (davem@davemloft.net)
@@ -32,8 +33,7 @@ fill_fixup:
rd %pc, %g7
call do_sparc64_fault
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
/* Be very careful about usage of the trap globals here.
* You cannot touch %g5 as that has the fault information.
@@ -153,6 +153,8 @@ fill_fixup_dax:
call sun4v_data_access_exception
nop
ba,a,pt %xcc, rtrap
+ nop
1: call spitfire_data_access_exception
nop
ba,a,pt %xcc, rtrap
+ nop
diff --git a/arch/sparc/kernel/wof.S b/arch/sparc/kernel/wof.S
index 28a7bc69f82b..96a3a112423a 100644
--- a/arch/sparc/kernel/wof.S
+++ b/arch/sparc/kernel/wof.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* wof.S: Sparc window overflow handler.
*
diff --git a/arch/sparc/kernel/wuf.S b/arch/sparc/kernel/wuf.S
index 2c21cc59683e..1a4ca490e9c2 100644
--- a/arch/sparc/kernel/wuf.S
+++ b/arch/sparc/kernel/wuf.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* wuf.S: Window underflow trap handler for the Sparc.
*