summaryrefslogtreecommitdiff
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/Makefile6
-rw-r--r--arch/sparc/kernel/apc.c9
-rw-r--r--arch/sparc/kernel/auxio_64.c9
-rw-r--r--arch/sparc/kernel/central.c18
-rw-r--r--arch/sparc/kernel/chmc.c19
-rw-r--r--arch/sparc/kernel/cpu.c141
-rw-r--r--arch/sparc/kernel/cpumap.c4
-rw-r--r--arch/sparc/kernel/devices.c4
-rw-r--r--arch/sparc/kernel/ds.c16
-rw-r--r--arch/sparc/kernel/entry.S47
-rw-r--r--arch/sparc/kernel/entry.h4
-rw-r--r--arch/sparc/kernel/head_32.S51
-rw-r--r--arch/sparc/kernel/head_64.S2
-rw-r--r--arch/sparc/kernel/init_task.c2
-rw-r--r--arch/sparc/kernel/iommu.c3
-rw-r--r--arch/sparc/kernel/ioport.c116
-rw-r--r--arch/sparc/kernel/irq.h93
-rw-r--r--arch/sparc/kernel/irq_32.c593
-rw-r--r--arch/sparc/kernel/irq_64.c389
-rw-r--r--arch/sparc/kernel/kernel.h54
-rw-r--r--arch/sparc/kernel/ldc.c28
-rw-r--r--arch/sparc/kernel/leon_kernel.c374
-rw-r--r--arch/sparc/kernel/leon_pmc.c82
-rw-r--r--arch/sparc/kernel/leon_smp.c184
-rw-r--r--arch/sparc/kernel/mdesc.c4
-rw-r--r--arch/sparc/kernel/of_device_32.c59
-rw-r--r--arch/sparc/kernel/of_device_64.c5
-rw-r--r--arch/sparc/kernel/of_device_common.c27
-rw-r--r--arch/sparc/kernel/pci.c11
-rw-r--r--arch/sparc/kernel/pci_common.c11
-rw-r--r--arch/sparc/kernel/pci_fire.c19
-rw-r--r--arch/sparc/kernel/pci_impl.h4
-rw-r--r--arch/sparc/kernel/pci_msi.c50
-rw-r--r--arch/sparc/kernel/pci_psycho.c9
-rw-r--r--arch/sparc/kernel/pci_sabre.c14
-rw-r--r--arch/sparc/kernel/pci_schizo.c21
-rw-r--r--arch/sparc/kernel/pci_sun4v.c18
-rw-r--r--arch/sparc/kernel/pcic.c87
-rw-r--r--arch/sparc/kernel/pcr.c2
-rw-r--r--arch/sparc/kernel/perf_event.c3
-rw-r--r--arch/sparc/kernel/pmc.c9
-rw-r--r--arch/sparc/kernel/power.c8
-rw-r--r--arch/sparc/kernel/process_32.c12
-rw-r--r--arch/sparc/kernel/prom_32.c1
-rw-r--r--arch/sparc/kernel/prom_irqtrans.c16
-rw-r--r--arch/sparc/kernel/ptrace_64.c3
-rw-r--r--arch/sparc/kernel/setup_32.c106
-rw-r--r--arch/sparc/kernel/setup_64.c78
-rw-r--r--arch/sparc/kernel/smp_32.c115
-rw-r--r--arch/sparc/kernel/smp_64.c70
-rw-r--r--arch/sparc/kernel/sun4c_irq.c225
-rw-r--r--arch/sparc/kernel/sun4d_irq.c632
-rw-r--r--arch/sparc/kernel/sun4d_smp.c267
-rw-r--r--arch/sparc/kernel/sun4m_irq.c359
-rw-r--r--arch/sparc/kernel/sun4m_smp.c140
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c21
-rw-r--r--arch/sparc/kernel/sysfs.c3
-rw-r--r--arch/sparc/kernel/systbls_32.S4
-rw-r--r--arch/sparc/kernel/systbls_64.S6
-rw-r--r--arch/sparc/kernel/tick14.c39
-rw-r--r--arch/sparc/kernel/time_32.c33
-rw-r--r--arch/sparc/kernel/time_64.c28
-rw-r--r--arch/sparc/kernel/traps_64.c3
-rw-r--r--arch/sparc/kernel/una_asm_64.S2
-rw-r--r--arch/sparc/kernel/us2e_cpufreq.c4
-rw-r--r--arch/sparc/kernel/us3_cpufreq.c4
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S2
67 files changed, 2383 insertions, 2399 deletions
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 599398fbbc7c..9cff2709a96d 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -42,7 +42,6 @@ obj-$(CONFIG_SPARC32) += windows.o
obj-y += cpu.o
obj-$(CONFIG_SPARC32) += devices.o
obj-$(CONFIG_SPARC32) += tadpole.o
-obj-$(CONFIG_SPARC32) += tick14.o
obj-y += ptrace_$(BITS).o
obj-y += unaligned_$(BITS).o
obj-y += una_asm_$(BITS).o
@@ -54,6 +53,7 @@ obj-y += of_device_$(BITS).o
obj-$(CONFIG_SPARC64) += prom_irqtrans.o
obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o
+obj-$(CONFIG_SPARC_LEON)+= leon_pmc.o
obj-$(CONFIG_SPARC64) += reboot.o
obj-$(CONFIG_SPARC64) += sysfs.o
@@ -71,10 +71,6 @@ obj-$(CONFIG_SPARC64) += pcr.o
obj-$(CONFIG_SPARC64) += nmi.o
obj-$(CONFIG_SPARC64_SMP) += cpumap.o
-# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation
-obj-$(CONFIG_SPARC32) += devres.o
-devres-y := ../../../kernel/irq/devres.o
-
obj-y += dma.o
obj-$(CONFIG_SPARC32_PCI) += pcic.o
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 52de4a9424e8..1e34f29e58bb 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -137,8 +137,7 @@ static const struct file_operations apc_fops = {
static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops };
-static int __devinit apc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit apc_probe(struct platform_device *op)
{
int err;
@@ -166,7 +165,7 @@ static int __devinit apc_probe(struct platform_device *op,
return 0;
}
-static struct of_device_id __initdata apc_match[] = {
+static struct of_device_id apc_match[] = {
{
.name = APC_OBPNAME,
},
@@ -174,7 +173,7 @@ static struct of_device_id __initdata apc_match[] = {
};
MODULE_DEVICE_TABLE(of, apc_match);
-static struct of_platform_driver apc_driver = {
+static struct platform_driver apc_driver = {
.driver = {
.name = "apc",
.owner = THIS_MODULE,
@@ -185,7 +184,7 @@ static struct of_platform_driver apc_driver = {
static int __init apc_init(void)
{
- return of_register_platform_driver(&apc_driver);
+ return platform_driver_register(&apc_driver);
}
/* This driver is not critical to the boot process
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
index 3efd3c5af6a9..773091ac71a3 100644
--- a/arch/sparc/kernel/auxio_64.c
+++ b/arch/sparc/kernel/auxio_64.c
@@ -93,7 +93,7 @@ void auxio_set_lte(int on)
}
EXPORT_SYMBOL(auxio_set_lte);
-static struct of_device_id __initdata auxio_match[] = {
+static const struct of_device_id auxio_match[] = {
{
.name = "auxio",
},
@@ -102,8 +102,7 @@ static struct of_device_id __initdata auxio_match[] = {
MODULE_DEVICE_TABLE(of, auxio_match);
-static int __devinit auxio_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int __devinit auxio_probe(struct platform_device *dev)
{
struct device_node *dp = dev->dev.of_node;
unsigned long size;
@@ -132,7 +131,7 @@ static int __devinit auxio_probe(struct platform_device *dev,
return 0;
}
-static struct of_platform_driver auxio_driver = {
+static struct platform_driver auxio_driver = {
.probe = auxio_probe,
.driver = {
.name = "auxio",
@@ -143,7 +142,7 @@ static struct of_platform_driver auxio_driver = {
static int __init auxio_init(void)
{
- return of_register_platform_driver(&auxio_driver);
+ return platform_driver_register(&auxio_driver);
}
/* Must be after subsys_initcall() so that busses are probed. Must
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index cfa2624c5332..7eef3f741963 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -59,8 +59,7 @@ static int __devinit clock_board_calc_nslots(struct clock_board *p)
}
}
-static int __devinit clock_board_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit clock_board_probe(struct platform_device *op)
{
struct clock_board *p = kzalloc(sizeof(*p), GFP_KERNEL);
int err = -ENOMEM;
@@ -141,14 +140,14 @@ out_free:
goto out;
}
-static struct of_device_id __initdata clock_board_match[] = {
+static const struct of_device_id clock_board_match[] = {
{
.name = "clock-board",
},
{},
};
-static struct of_platform_driver clock_board_driver = {
+static struct platform_driver clock_board_driver = {
.probe = clock_board_probe,
.driver = {
.name = "clock_board",
@@ -157,8 +156,7 @@ static struct of_platform_driver clock_board_driver = {
},
};
-static int __devinit fhc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit fhc_probe(struct platform_device *op)
{
struct fhc *p = kzalloc(sizeof(*p), GFP_KERNEL);
int err = -ENOMEM;
@@ -247,14 +245,14 @@ out_free:
goto out;
}
-static struct of_device_id __initdata fhc_match[] = {
+static const struct of_device_id fhc_match[] = {
{
.name = "fhc",
},
{},
};
-static struct of_platform_driver fhc_driver = {
+static struct platform_driver fhc_driver = {
.probe = fhc_probe,
.driver = {
.name = "fhc",
@@ -265,8 +263,8 @@ static struct of_platform_driver fhc_driver = {
static int __init sunfire_init(void)
{
- (void) of_register_platform_driver(&fhc_driver);
- (void) of_register_platform_driver(&clock_board_driver);
+ (void) platform_driver_register(&fhc_driver);
+ (void) platform_driver_register(&clock_board_driver);
return 0;
}
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index 08c466ebb32b..668c7be5d365 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -392,8 +392,7 @@ static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p,
}
}
-static int __devinit jbusmc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit jbusmc_probe(struct platform_device *op)
{
const struct linux_prom64_registers *mem_regs;
struct device_node *mem_node;
@@ -690,8 +689,7 @@ static void chmc_fetch_decode_regs(struct chmc *p)
chmc_read_mcreg(p, CHMCTRL_DECODE4));
}
-static int __devinit chmc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit chmc_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
unsigned long ver;
@@ -765,13 +763,12 @@ out_free:
goto out;
}
-static int __devinit us3mc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit us3mc_probe(struct platform_device *op)
{
if (mc_type == MC_TYPE_SAFARI)
- return chmc_probe(op, match);
+ return chmc_probe(op);
else if (mc_type == MC_TYPE_JBUS)
- return jbusmc_probe(op, match);
+ return jbusmc_probe(op);
return -ENODEV;
}
@@ -810,7 +807,7 @@ static const struct of_device_id us3mc_match[] = {
};
MODULE_DEVICE_TABLE(of, us3mc_match);
-static struct of_platform_driver us3mc_driver = {
+static struct platform_driver us3mc_driver = {
.driver = {
.name = "us3mc",
.owner = THIS_MODULE,
@@ -848,7 +845,7 @@ static int __init us3mc_init(void)
ret = register_dimm_printer(us3mc_dimm_printer);
if (!ret) {
- ret = of_register_platform_driver(&us3mc_driver);
+ ret = platform_driver_register(&us3mc_driver);
if (ret)
unregister_dimm_printer(us3mc_dimm_printer);
}
@@ -859,7 +856,7 @@ static void __exit us3mc_cleanup(void)
{
if (us3mc_platform()) {
unregister_dimm_printer(us3mc_dimm_printer);
- of_unregister_platform_driver(&us3mc_driver);
+ platform_driver_unregister(&us3mc_driver);
}
}
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 0dc714fa23d8..138dbbc8dc84 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -4,6 +4,7 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/seq_file.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -11,7 +12,9 @@
#include <linux/threads.h>
#include <asm/spitfire.h>
+#include <asm/pgtable.h>
#include <asm/oplib.h>
+#include <asm/setup.h>
#include <asm/page.h>
#include <asm/head.h>
#include <asm/psr.h>
@@ -23,6 +26,9 @@
DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
EXPORT_PER_CPU_SYMBOL(__cpu_data);
+int ncpus_probed;
+unsigned int fsr_storage;
+
struct cpu_info {
int psr_vers;
const char *name;
@@ -247,13 +253,12 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
* machine type value into consideration too. I will fix this.
*/
-const char *sparc_cpu_type;
-const char *sparc_fpu_type;
+static const char *sparc_cpu_type;
+static const char *sparc_fpu_type;
const char *sparc_pmu_type;
-unsigned int fsr_storage;
-static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
+static void __init set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
{
const struct manufacturer_info *manuf;
int i;
@@ -313,7 +318,123 @@ static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
}
#ifdef CONFIG_SPARC32
-void __cpuinit cpu_probe(void)
+static int show_cpuinfo(struct seq_file *m, void *__unused)
+{
+ seq_printf(m,
+ "cpu\t\t: %s\n"
+ "fpu\t\t: %s\n"
+ "promlib\t\t: Version %d Revision %d\n"
+ "prom\t\t: %d.%d\n"
+ "type\t\t: %s\n"
+ "ncpus probed\t: %d\n"
+ "ncpus active\t: %d\n"
+#ifndef CONFIG_SMP
+ "CPU0Bogo\t: %lu.%02lu\n"
+ "CPU0ClkTck\t: %ld\n"
+#endif
+ ,
+ sparc_cpu_type,
+ sparc_fpu_type ,
+ romvec->pv_romvers,
+ prom_rev,
+ romvec->pv_printrev >> 16,
+ romvec->pv_printrev & 0xffff,
+ &cputypval[0],
+ ncpus_probed,
+ num_online_cpus()
+#ifndef CONFIG_SMP
+ , cpu_data(0).udelay_val/(500000/HZ),
+ (cpu_data(0).udelay_val/(5000/HZ)) % 100,
+ cpu_data(0).clock_tick
+#endif
+ );
+
+#ifdef CONFIG_SMP
+ smp_bogo(m);
+#endif
+ mmu_info(m);
+#ifdef CONFIG_SMP
+ smp_info(m);
+#endif
+ return 0;
+}
+#endif /* CONFIG_SPARC32 */
+
+#ifdef CONFIG_SPARC64
+unsigned int dcache_parity_tl1_occurred;
+unsigned int icache_parity_tl1_occurred;
+
+
+static int show_cpuinfo(struct seq_file *m, void *__unused)
+{
+ seq_printf(m,
+ "cpu\t\t: %s\n"
+ "fpu\t\t: %s\n"
+ "pmu\t\t: %s\n"
+ "prom\t\t: %s\n"
+ "type\t\t: %s\n"
+ "ncpus probed\t: %d\n"
+ "ncpus active\t: %d\n"
+ "D$ parity tl1\t: %u\n"
+ "I$ parity tl1\t: %u\n"
+#ifndef CONFIG_SMP
+ "Cpu0ClkTck\t: %016lx\n"
+#endif
+ ,
+ sparc_cpu_type,
+ sparc_fpu_type,
+ sparc_pmu_type,
+ prom_version,
+ ((tlb_type == hypervisor) ?
+ "sun4v" :
+ "sun4u"),
+ ncpus_probed,
+ num_online_cpus(),
+ dcache_parity_tl1_occurred,
+ icache_parity_tl1_occurred
+#ifndef CONFIG_SMP
+ , cpu_data(0).clock_tick
+#endif
+ );
+#ifdef CONFIG_SMP
+ smp_bogo(m);
+#endif
+ mmu_info(m);
+#ifdef CONFIG_SMP
+ smp_info(m);
+#endif
+ return 0;
+}
+#endif /* CONFIG_SPARC64 */
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ /* The pointer we are returning is arbitrary,
+ * it just has to be non-NULL and not IS_ERR
+ * in the success case.
+ */
+ return *pos == 0 ? &c_start : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start =c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
+
+#ifdef CONFIG_SPARC32
+static int __init cpu_type_probe(void)
{
int psr_impl, psr_vers, fpu_vers;
int psr;
@@ -324,7 +445,7 @@ void __cpuinit cpu_probe(void)
psr = get_psr();
put_psr(psr | PSR_EF);
#ifdef CONFIG_SPARC_LEON
- fpu_vers = 7;
+ fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
#else
fpu_vers = ((get_fsr() >> 17) & 0x7);
#endif
@@ -332,8 +453,12 @@ void __cpuinit cpu_probe(void)
put_psr(psr);
set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers);
+
+ return 0;
}
-#else
+#endif /* CONFIG_SPARC32 */
+
+#ifdef CONFIG_SPARC64
static void __init sun4v_cpu_probe(void)
{
switch (sun4v_chip_type) {
@@ -374,6 +499,6 @@ static int __init cpu_type_probe(void)
}
return 0;
}
+#endif /* CONFIG_SPARC64 */
early_initcall(cpu_type_probe);
-#endif
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index 8de64c8126bc..d91fd782743a 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -202,7 +202,7 @@ static struct cpuinfo_tree *build_cpuinfo_tree(void)
new_tree->total_nodes = n;
memcpy(&new_tree->level, tmp_level, sizeof(tmp_level));
- prev_cpu = cpu = first_cpu(cpu_online_map);
+ prev_cpu = cpu = cpumask_first(cpu_online_mask);
/* Initialize all levels in the tree with the first CPU */
for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) {
@@ -381,7 +381,7 @@ static int simple_map_to_cpu(unsigned int index)
}
/* Impossible, since num_online_cpus() <= num_possible_cpus() */
- return first_cpu(cpu_online_map);
+ return cpumask_first(cpu_online_mask);
}
static int _map_to_cpu(unsigned int index)
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
index d2eddd6647cd..113c052c3043 100644
--- a/arch/sparc/kernel/devices.c
+++ b/arch/sparc/kernel/devices.c
@@ -20,7 +20,6 @@
#include <asm/system.h>
#include <asm/cpudata.h>
-extern void cpu_probe(void);
extern void clock_stop_probe(void); /* tadpole.c */
extern void sun4c_probe_memerr_reg(void);
@@ -115,7 +114,7 @@ int cpu_get_hwmid(phandle prom_node)
void __init device_scan(void)
{
- prom_printf("Booting Linux...\n");
+ printk(KERN_NOTICE "Booting Linux...\n");
#ifndef CONFIG_SMP
{
@@ -133,7 +132,6 @@ void __init device_scan(void)
}
#endif /* !CONFIG_SMP */
- cpu_probe();
{
extern void auxio_probe(void);
extern void auxio_power_probe(void);
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 4a700f4b79ce..dd1342c0a3be 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -497,7 +497,7 @@ static void dr_cpu_init_response(struct ds_data *resp, u64 req_num,
tag->num_records = ncpus;
i = 0;
- for_each_cpu_mask(cpu, *mask) {
+ for_each_cpu(cpu, mask) {
ent[i].cpu = cpu;
ent[i].result = DR_CPU_RES_OK;
ent[i].stat = default_stat;
@@ -534,7 +534,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
int resp_len, ncpus, cpu;
unsigned long flags;
- ncpus = cpus_weight(*mask);
+ ncpus = cpumask_weight(mask);
resp_len = dr_cpu_size_response(ncpus);
resp = kzalloc(resp_len, GFP_KERNEL);
if (!resp)
@@ -547,7 +547,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
mdesc_populate_present_mask(mask);
mdesc_fill_in_cpu_data(mask);
- for_each_cpu_mask(cpu, *mask) {
+ for_each_cpu(cpu, mask) {
int err;
printk(KERN_INFO "ds-%llu: Starting cpu %d...\n",
@@ -593,7 +593,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
int resp_len, ncpus, cpu;
unsigned long flags;
- ncpus = cpus_weight(*mask);
+ ncpus = cpumask_weight(mask);
resp_len = dr_cpu_size_response(ncpus);
resp = kzalloc(resp_len, GFP_KERNEL);
if (!resp)
@@ -603,7 +603,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
resp_len, ncpus, mask,
DR_CPU_STAT_UNCONFIGURED);
- for_each_cpu_mask(cpu, *mask) {
+ for_each_cpu(cpu, mask) {
int err;
printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n",
@@ -649,13 +649,13 @@ static void __cpuinit dr_cpu_data(struct ds_info *dp,
purge_dups(cpu_list, tag->num_records);
- cpus_clear(mask);
+ cpumask_clear(&mask);
for (i = 0; i < tag->num_records; i++) {
if (cpu_list[i] == CPU_SENTINEL)
continue;
if (cpu_list[i] < nr_cpu_ids)
- cpu_set(cpu_list[i], mask);
+ cpumask_set_cpu(cpu_list[i], &mask);
}
if (tag->type == DR_CPU_CONFIGURE)
@@ -1218,7 +1218,7 @@ static int ds_remove(struct vio_dev *vdev)
return 0;
}
-static struct vio_device_id __initdata ds_match[] = {
+static const struct vio_device_id ds_match[] = {
{
.type = "domain-services-port",
},
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 1504df8ddf70..8341963f4c84 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -269,19 +269,22 @@ smp4m_ticker:
/* Here is where we check for possible SMP IPI passed to us
* on some level other than 15 which is the NMI and only used
* for cross calls. That has a separate entry point below.
+ *
+ * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*.
*/
maybe_smp4m_msg:
GET_PROCESSOR4M_ID(o3)
sethi %hi(sun4m_irq_percpu), %l5
sll %o3, 2, %o3
or %l5, %lo(sun4m_irq_percpu), %o5
- sethi %hi(0x40000000), %o2
+ sethi %hi(0x70000000), %o2 ! Check all soft-IRQs
ld [%o5 + %o3], %o1
ld [%o1 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending
andcc %o3, %o2, %g0
be,a smp4m_ticker
cmp %l7, 14
- st %o2, [%o1 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x40000000
+ /* Soft-IRQ IPI */
+ st %o2, [%o1 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x70000000
WRITE_PAUSE
ld [%o1 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending
WRITE_PAUSE
@@ -290,9 +293,27 @@ maybe_smp4m_msg:
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
- call smp_reschedule_irq
+ sll %o2, 28, %o2 ! shift for simpler checks below
+maybe_smp4m_msg_check_single:
+ andcc %o2, 0x1, %g0
+ beq,a maybe_smp4m_msg_check_mask
+ andcc %o2, 0x2, %g0
+ call smp_call_function_single_interrupt
nop
-
+ andcc %o2, 0x2, %g0
+maybe_smp4m_msg_check_mask:
+ beq,a maybe_smp4m_msg_check_resched
+ andcc %o2, 0x4, %g0
+ call smp_call_function_interrupt
+ nop
+ andcc %o2, 0x4, %g0
+maybe_smp4m_msg_check_resched:
+ /* rescheduling is done in RESTORE_ALL regardless, but incr stats */
+ beq,a maybe_smp4m_msg_out
+ nop
+ call smp_resched_interrupt
+ nop
+maybe_smp4m_msg_out:
RESTORE_ALL
.align 4
@@ -401,18 +422,18 @@ linux_trap_ipi15_sun4d:
1: b,a 1b
#ifdef CONFIG_SPARC_LEON
-
- .globl smpleon_ticker
- /* SMP per-cpu ticker interrupts are handled specially. */
-smpleon_ticker:
+ .globl smpleon_ipi
+ .extern leon_ipi_interrupt
+ /* SMP per-cpu IPI interrupts are handled specially. */
+smpleon_ipi:
SAVE_ALL
or %l0, PSR_PIL, %g2
wr %g2, 0x0, %psr
WRITE_PAUSE
wr %g2, PSR_ET, %psr
WRITE_PAUSE
- call leon_percpu_timer_interrupt
- add %sp, STACKFRAME_SZ, %o0
+ call leonsmp_ipi_interrupt
+ add %sp, STACKFRAME_SZ, %o1 ! pt_regs
wr %l0, PSR_ET, %psr
WRITE_PAUSE
RESTORE_ALL
@@ -801,7 +822,7 @@ vac_linesize_patch_32: subcc %l7, 32, %l7
.globl vac_hwflush_patch1_on, vac_hwflush_patch2_on
/*
- * Ugly, but we cant use hardware flushing on the sun4 and we'd require
+ * Ugly, but we can't use hardware flushing on the sun4 and we'd require
* two instructions (Anton)
*/
vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7
@@ -851,7 +872,7 @@ sun4c_fault:
sethi %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4
/* If the kernel references a bum kernel pointer, or a pte which
- * points to a non existant page in ram, we will run this code
+ * points to a non existent page in ram, we will run this code
* _forever_ and lock up the machine!!!!! So we must check for
* this condition, the AC_SYNC_ERR bits are what we must examine.
* Also a parity error would make this happen as well. So we just
@@ -1283,7 +1304,7 @@ linux_syscall_trace:
.globl ret_from_fork
ret_from_fork:
call schedule_tail
- mov %g3, %o0
+ ld [%g3 + TI_TASK], %o0
b ret_sys_call
ld [%sp + STACKFRAME_SZ + PT_I0], %o0
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index c011b932bb17..d1f1361c4167 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -213,8 +213,8 @@ extern struct cheetah_err_info *cheetah_error_log;
struct ino_bucket {
/*0x00*/unsigned long __irq_chain_pa;
- /* Virtual interrupt number assigned to this INO. */
-/*0x08*/unsigned int __virt_irq;
+ /* Interrupt number assigned to this INO. */
+/*0x08*/unsigned int __irq;
/*0x0c*/unsigned int __pad;
};
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index 59423491cef8..587785759838 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -810,31 +810,25 @@ found_version:
got_prop:
#ifdef CONFIG_SPARC_LEON
/* no cpu-type check is needed, it is a SPARC-LEON */
-#ifdef CONFIG_SMP
- ba leon_smp_init
- nop
- .global leon_smp_init
-leon_smp_init:
- sethi %hi(boot_cpu_id), %g1 ! master always 0
- stb %g0, [%g1 + %lo(boot_cpu_id)]
- sethi %hi(boot_cpu_id4), %g1 ! master always 0
- stb %g0, [%g1 + %lo(boot_cpu_id4)]
+ sethi %hi(boot_cpu_id), %g2 ! boot-cpu index
- rd %asr17,%g1
- srl %g1,28,%g1
+#ifdef CONFIG_SMP
+ ldub [%g2 + %lo(boot_cpu_id)], %g1
+ cmp %g1, 0xff ! unset means first CPU
+ bne leon_smp_cpu_startup ! continue only with master
+ nop
+#endif
+ /* Get CPU-ID from most significant 4-bit of ASR17 */
+ rd %asr17, %g1
+ srl %g1, 28, %g1
- cmp %g0,%g1
- beq sun4c_continue_boot !continue with master
- nop
+ /* Update boot_cpu_id only on boot cpu */
+ stub %g1, [%g2 + %lo(boot_cpu_id)]
- ba leon_smp_cpu_startup
- nop
-#else
ba sun4c_continue_boot
nop
#endif
-#endif
set cputypval, %o2
ldub [%o2 + 0x4], %l1
@@ -893,9 +887,6 @@ sun4d_init:
sta %g4, [%g0] ASI_M_VIKING_TMP1
sethi %hi(boot_cpu_id), %g5
stb %g4, [%g5 + %lo(boot_cpu_id)]
- sll %g4, 2, %g4
- sethi %hi(boot_cpu_id4), %g5
- stb %g4, [%g5 + %lo(boot_cpu_id4)]
#endif
/* Fall through to sun4m_init */
@@ -1024,14 +1015,28 @@ sun4c_continue_boot:
bl 1b
add %o0, 0x1, %o0
+ /* If boot_cpu_id has not been setup by machine specific
+ * init-code above we default it to zero.
+ */
+ sethi %hi(boot_cpu_id), %g2
+ ldub [%g2 + %lo(boot_cpu_id)], %g3
+ cmp %g3, 0xff
+ bne 1f
+ nop
+ mov %g0, %g3
+ stub %g3, [%g2 + %lo(boot_cpu_id)]
+
+1: /* boot_cpu_id set. calculate boot_cpu_id4 = boot_cpu_id*4 */
+ sll %g3, 2, %g3
+ sethi %hi(boot_cpu_id4), %g2
+ stub %g3, [%g2 + %lo(boot_cpu_id4)]
+
/* Initialize the uwinmask value for init task just in case.
* But first make current_set[boot_cpu_id] point to something useful.
*/
set init_thread_union, %g6
set current_set, %g2
#ifdef CONFIG_SMP
- sethi %hi(boot_cpu_id4), %g3
- ldub [%g3 + %lo(boot_cpu_id4)], %g3
st %g6, [%g2]
add %g2, %g3, %g2
#endif
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index f8f21050448b..aa594c792d19 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -85,7 +85,7 @@ sparc_ramdisk_image64:
sparc64_boot:
mov %o4, %l7
- /* We need to remap the kernel. Use position independant
+ /* We need to remap the kernel. Use position independent
* code to remap us to KERNBASE.
*
* SILO can invoke us with 32-bit address masking enabled,
diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c
index 5fe3d65581f7..35f141a9f506 100644
--- a/arch/sparc/kernel/init_task.c
+++ b/arch/sparc/kernel/init_task.c
@@ -15,7 +15,7 @@ EXPORT_SYMBOL(init_task);
/* .text section in head.S is aligned at 8k boundary and this gets linked
* right after that so that the init_thread_union is aligned properly as well.
- * If this is not aligned on a 8k boundry, then you should change code
+ * If this is not aligned on a 8k boundary, then you should change code
* in etrap.S which assumes it.
*/
union thread_union init_thread_union __init_task_data =
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 72509d0e34be..6f01e8c83197 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -333,13 +333,10 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
void *cpu, dma_addr_t dvma)
{
struct iommu *iommu;
- iopte_t *iopte;
unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
- iopte = iommu->page_table +
- ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags);
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 41f7e4e0f72a..1c9c80a1a86a 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -50,10 +50,19 @@
#include <asm/io-unit.h>
#include <asm/leon.h>
-#ifdef CONFIG_SPARC_LEON
-#define mmu_inval_dma_area(p, l) leon_flush_dcache_all()
+/* This function must make sure that caches and memory are coherent after DMA
+ * On LEON systems without cache snooping it flushes the entire D-CACHE.
+ */
+#ifndef CONFIG_SPARC_LEON
+static inline void dma_make_coherent(unsigned long pa, unsigned long len)
+{
+}
#else
-#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
+static inline void dma_make_coherent(unsigned long pa, unsigned long len)
+{
+ if (!sparc_leon3_snooping_enabled())
+ leon_flush_dcache_all();
+}
#endif
static struct resource *_sparc_find_resource(struct resource *r,
@@ -254,7 +263,7 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
dma_addr_t *dma_addrp, gfp_t gfp)
{
struct platform_device *op = to_platform_device(dev);
- unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
+ unsigned long len_total = PAGE_ALIGN(len);
unsigned long va;
struct resource *res;
int order;
@@ -280,7 +289,7 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
goto err_nova;
}
- mmu_inval_dma_area(va, len_total);
+
// XXX The mmu_map_dma_area does this for us below, see comments.
// sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
/*
@@ -297,9 +306,9 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
err_noiommu:
release_resource(res);
err_nova:
- free_pages(va, order);
-err_nomem:
kfree(res);
+err_nomem:
+ free_pages(va, order);
err_nopages:
return NULL;
}
@@ -321,7 +330,7 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p,
return;
}
- n = (n + PAGE_SIZE-1) & PAGE_MASK;
+ n = PAGE_ALIGN(n);
if ((res->end-res->start)+1 != n) {
printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
(long)((res->end-res->start)+1), n);
@@ -331,7 +340,6 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p,
release_resource(res);
kfree(res);
- /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
pgv = virt_to_page(p);
mmu_unmap_dma_area(dev, ba, n);
@@ -408,9 +416,6 @@ struct dma_map_ops sbus_dma_ops = {
.sync_sg_for_device = sbus_sync_sg_for_device,
};
-struct dma_map_ops *dma_ops = &sbus_dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
static int __init sparc_register_ioport(void)
{
register_proc_sparc_ioport();
@@ -422,7 +427,9 @@ arch_initcall(sparc_register_ioport);
#endif /* CONFIG_SBUS */
-#ifdef CONFIG_PCI
+
+/* LEON reuses PCI DMA ops */
+#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices.
@@ -430,8 +437,8 @@ arch_initcall(sparc_register_ioport);
static void *pci32_alloc_coherent(struct device *dev, size_t len,
dma_addr_t *pba, gfp_t gfp)
{
- unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
- unsigned long va;
+ unsigned long len_total = PAGE_ALIGN(len);
+ void *va;
struct resource *res;
int order;
@@ -443,34 +450,33 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
}
order = get_order(len_total);
- va = __get_free_pages(GFP_KERNEL, order);
- if (va == 0) {
+ va = (void *) __get_free_pages(GFP_KERNEL, order);
+ if (va == NULL) {
printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
- return NULL;
+ goto err_nopages;
}
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
- free_pages(va, order);
printk("pci_alloc_consistent: no core\n");
- return NULL;
+ goto err_nomem;
}
if (allocate_resource(&_sparc_dvma, res, len_total,
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
- free_pages(va, order);
- kfree(res);
- return NULL;
+ goto err_nova;
}
- mmu_inval_dma_area(va, len_total);
-#if 0
-/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
- (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
-#endif
sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
return (void *) res->start;
+
+err_nova:
+ kfree(res);
+err_nomem:
+ free_pages((unsigned long)va, order);
+err_nopages:
+ return NULL;
}
/* Free and unmap a consistent DMA buffer.
@@ -485,7 +491,6 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
dma_addr_t ba)
{
struct resource *res;
- unsigned long pgp;
if ((res = _sparc_find_resource(&_sparc_dvma,
(unsigned long)p)) == NULL) {
@@ -498,21 +503,19 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
return;
}
- n = (n + PAGE_SIZE-1) & PAGE_MASK;
+ n = PAGE_ALIGN(n);
if ((res->end-res->start)+1 != n) {
printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
(long)((res->end-res->start)+1), (long)n);
return;
}
- pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
- mmu_inval_dma_area(pgp, n);
+ dma_make_coherent(ba, n);
sparc_unmapiorange((unsigned long)p, n);
release_resource(res);
kfree(res);
-
- free_pages(pgp, get_order(n));
+ free_pages((unsigned long)phys_to_virt(ba), get_order(n));
}
/*
@@ -527,6 +530,13 @@ static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
return page_to_phys(page) + offset;
}
+static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ if (dir != PCI_DMA_TODEVICE)
+ dma_make_coherent(ba, PAGE_ALIGN(size));
+}
+
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
@@ -551,8 +561,7 @@ static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
/* IIep is write-through, not flushing. */
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg_page(sg)) == NULL);
- sg->dma_address = virt_to_phys(sg_virt(sg));
+ sg->dma_address = sg_phys(sg);
sg->dma_length = sg->length;
}
return nents;
@@ -571,10 +580,7 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg_page(sg)) == NULL);
- mmu_inval_dma_area(
- (unsigned long) page_address(sg_page(sg)),
- (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+ dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
}
}
}
@@ -593,8 +599,7 @@ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
size_t size, enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE) {
- mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
- (size + PAGE_SIZE-1) & PAGE_MASK);
+ dma_make_coherent(ba, PAGE_ALIGN(size));
}
}
@@ -602,8 +607,7 @@ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
size_t size, enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE) {
- mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
- (size + PAGE_SIZE-1) & PAGE_MASK);
+ dma_make_coherent(ba, PAGE_ALIGN(size));
}
}
@@ -621,10 +625,7 @@ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg_page(sg)) == NULL);
- mmu_inval_dma_area(
- (unsigned long) page_address(sg_page(sg)),
- (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+ dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
}
}
}
@@ -637,10 +638,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg_page(sg)) == NULL);
- mmu_inval_dma_area(
- (unsigned long) page_address(sg_page(sg)),
- (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+ dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
}
}
}
@@ -649,6 +647,7 @@ struct dma_map_ops pci32_dma_ops = {
.alloc_coherent = pci32_alloc_coherent,
.free_coherent = pci32_free_coherent,
.map_page = pci32_map_page,
+ .unmap_page = pci32_unmap_page,
.map_sg = pci32_map_sg,
.unmap_sg = pci32_unmap_sg,
.sync_single_for_cpu = pci32_sync_single_for_cpu,
@@ -658,7 +657,16 @@ struct dma_map_ops pci32_dma_ops = {
};
EXPORT_SYMBOL(pci32_dma_ops);
-#endif /* CONFIG_PCI */
+#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
+
+#ifdef CONFIG_SPARC_LEON
+struct dma_map_ops *dma_ops = &pci32_dma_ops;
+#elif defined(CONFIG_SBUS)
+struct dma_map_ops *dma_ops = &sbus_dma_ops;
+#endif
+
+EXPORT_SYMBOL(dma_ops);
+
/*
* Return whether the given PCI device DMA address mask can be
@@ -717,7 +725,7 @@ static const struct file_operations sparc_io_proc_fops = {
static struct resource *_sparc_find_resource(struct resource *root,
unsigned long hit)
{
- struct resource *tmp;
+ struct resource *tmp;
for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
if (tmp->start <= hit && tmp->end >= hit)
diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h
index db7513881530..100b9c204e78 100644
--- a/arch/sparc/kernel/irq.h
+++ b/arch/sparc/kernel/irq.h
@@ -1,5 +1,62 @@
+#include <linux/platform_device.h>
+
#include <asm/btfixup.h>
+struct irq_bucket {
+ struct irq_bucket *next;
+ unsigned int real_irq;
+ unsigned int irq;
+ unsigned int pil;
+};
+
+#define SUN4D_MAX_BOARD 10
+#define SUN4D_MAX_IRQ ((SUN4D_MAX_BOARD + 2) << 5)
+
+/* Map between the irq identifier used in hw to the
+ * irq_bucket. The map is sufficient large to hold
+ * the sun4d hw identifiers.
+ */
+extern struct irq_bucket *irq_map[SUN4D_MAX_IRQ];
+
+
+/* sun4m specific type definitions */
+
+/* This maps direct to CPU specific interrupt registers */
+struct sun4m_irq_percpu {
+ u32 pending;
+ u32 clear;
+ u32 set;
+};
+
+/* This maps direct to global interrupt registers */
+struct sun4m_irq_global {
+ u32 pending;
+ u32 mask;
+ u32 mask_clear;
+ u32 mask_set;
+ u32 interrupt_target;
+};
+
+extern struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
+extern struct sun4m_irq_global __iomem *sun4m_irq_global;
+
+/*
+ * Platform specific irq configuration
+ * The individual platforms assign their platform
+ * specifics in their init functions.
+ */
+struct sparc_irq_config {
+ void (*init_timers)(irq_handler_t);
+ unsigned int (*build_device_irq)(struct platform_device *op,
+ unsigned int real_irq);
+};
+extern struct sparc_irq_config sparc_irq_config;
+
+unsigned int irq_alloc(unsigned int real_irq, unsigned int pil);
+void irq_link(unsigned int irq);
+void irq_unlink(unsigned int irq);
+void handler_irq(unsigned int pil, struct pt_regs *regs);
+
/* Dave Redman (djhr@tadpole.co.uk)
* changed these to function pointers.. it saves cycles and will allow
* the irq dependencies to be split into different files at a later date
@@ -8,33 +65,9 @@
* Changed these to btfixup entities... It saves cycles :)
*/
-BTFIXUPDEF_CALL(void, disable_irq, unsigned int)
-BTFIXUPDEF_CALL(void, enable_irq, unsigned int)
-BTFIXUPDEF_CALL(void, disable_pil_irq, unsigned int)
-BTFIXUPDEF_CALL(void, enable_pil_irq, unsigned int)
BTFIXUPDEF_CALL(void, clear_clock_irq, void)
BTFIXUPDEF_CALL(void, load_profile_irq, int, unsigned int)
-static inline void __disable_irq(unsigned int irq)
-{
- BTFIXUP_CALL(disable_irq)(irq);
-}
-
-static inline void __enable_irq(unsigned int irq)
-{
- BTFIXUP_CALL(enable_irq)(irq);
-}
-
-static inline void disable_pil_irq(unsigned int irq)
-{
- BTFIXUP_CALL(disable_pil_irq)(irq);
-}
-
-static inline void enable_pil_irq(unsigned int irq)
-{
- BTFIXUP_CALL(enable_pil_irq)(irq);
-}
-
static inline void clear_clock_irq(void)
{
BTFIXUP_CALL(clear_clock_irq)();
@@ -45,12 +78,6 @@ static inline void load_profile_irq(int cpu, int limit)
BTFIXUP_CALL(load_profile_irq)(cpu, limit);
}
-extern void (*sparc_init_timers)(irq_handler_t lvl10_irq);
-
-extern void claim_ticker14(irq_handler_t irq_handler,
- int irq,
- unsigned int timeout);
-
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, set_cpu_int, int, int)
BTFIXUPDEF_CALL(void, clear_cpu_int, int, int)
@@ -59,4 +86,10 @@ BTFIXUPDEF_CALL(void, set_irq_udt, int)
#define set_cpu_int(cpu,level) BTFIXUP_CALL(set_cpu_int)(cpu,level)
#define clear_cpu_int(cpu,level) BTFIXUP_CALL(clear_cpu_int)(cpu,level)
#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
+
+/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
+#define SUN4D_IPI_IRQ 14
+
+extern void sun4d_ipi_interrupt(void);
+
#endif
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index 5ad6e5c5dbb3..9b89d842913c 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -1,8 +1,8 @@
/*
- * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
- * Sparc the IRQs are basically 'cast in stone'
- * and you are supposed to probe the prom's device
- * node trees to find out who's got which IRQ.
+ * Interrupt request handling routines. On the
+ * Sparc the IRQs are basically 'cast in stone'
+ * and you are supposed to probe the prom's device
+ * node trees to find out who's got which IRQ.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
@@ -11,40 +11,12 @@
* Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
*/
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/linkage.h>
#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/delay.h>
-#include <linux/threads.h>
-#include <linux/spinlock.h>
#include <linux/seq_file.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/psr.h>
-#include <asm/smp.h>
-#include <asm/vaddrs.h>
-#include <asm/timer.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-#include <asm/traps.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/pcic.h>
#include <asm/cacheflush.h>
-#include <asm/irq_regs.h>
+#include <asm/cpudata.h>
+#include <asm/pcic.h>
#include <asm/leon.h>
#include "kernel.h"
@@ -57,6 +29,10 @@
#define SMP_NOP2
#define SMP_NOP3
#endif /* SMP */
+
+/* platform specific irq setup */
+struct sparc_irq_config sparc_irq_config;
+
unsigned long arch_local_irq_save(void)
{
unsigned long retval;
@@ -126,309 +102,185 @@ EXPORT_SYMBOL(arch_local_irq_restore);
* directed CPU interrupts using the existing enable/disable irq code
* with tweaks.
*
+ * Sun4d complicates things even further. IRQ numbers are arbitrary
+ * 32-bit values in that case. Since this is similar to sparc64,
+ * we adopt a virtual IRQ numbering scheme as is done there.
+ * Virutal interrupt numbers are allocated by build_irq(). So NR_IRQS
+ * just becomes a limit of how many interrupt sources we can handle in
+ * a single system. Even fully loaded SS2000 machines top off at
+ * about 32 interrupt sources or so, therefore a NR_IRQS value of 64
+ * is more than enough.
+ *
+ * We keep a map of per-PIL enable interrupts. These get wired
+ * up via the irq_chip->startup() method which gets invoked by
+ * the generic IRQ layer during request_irq().
*/
-static void irq_panic(void)
-{
- extern char *cputypval;
- prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval);
- prom_halt();
-}
-
-void (*sparc_init_timers)(irq_handler_t ) =
- (void (*)(irq_handler_t )) irq_panic;
-
-/*
- * Dave Redman (djhr@tadpole.co.uk)
- *
- * There used to be extern calls and hard coded values here.. very sucky!
- * instead, because some of the devices attach very early, I do something
- * equally sucky but at least we'll never try to free statically allocated
- * space or call kmalloc before kmalloc_init :(.
- *
- * In fact it's the timer10 that attaches first.. then timer14
- * then kmalloc_init is called.. then the tty interrupts attach.
- * hmmm....
- *
- */
-#define MAX_STATIC_ALLOC 4
-struct irqaction static_irqaction[MAX_STATIC_ALLOC];
-int static_irq_count;
-static struct {
- struct irqaction *action;
- int flags;
-} sparc_irq[NR_IRQS];
-#define SPARC_IRQ_INPROGRESS 1
+/* Table of allocated irqs. Unused entries has irq == 0 */
+static struct irq_bucket irq_table[NR_IRQS];
+/* Protect access to irq_table */
+static DEFINE_SPINLOCK(irq_table_lock);
-/* Used to protect the IRQ action lists */
-DEFINE_SPINLOCK(irq_action_lock);
+/* Map between the irq identifier used in hw to the irq_bucket. */
+struct irq_bucket *irq_map[SUN4D_MAX_IRQ];
+/* Protect access to irq_map */
+static DEFINE_SPINLOCK(irq_map_lock);
-int show_interrupts(struct seq_file *p, void *v)
+/* Allocate a new irq from the irq_table */
+unsigned int irq_alloc(unsigned int real_irq, unsigned int pil)
{
- int i = *(loff_t *) v;
- struct irqaction * action;
unsigned long flags;
-#ifdef CONFIG_SMP
- int j;
-#endif
+ unsigned int i;
- if (sparc_cpu_model == sun4d) {
- extern int show_sun4d_interrupts(struct seq_file *, void *);
-
- return show_sun4d_interrupts(p, v);
+ spin_lock_irqsave(&irq_table_lock, flags);
+ for (i = 1; i < NR_IRQS; i++) {
+ if (irq_table[i].real_irq == real_irq && irq_table[i].pil == pil)
+ goto found;
}
- spin_lock_irqsave(&irq_action_lock, flags);
+
+ for (i = 1; i < NR_IRQS; i++) {
+ if (!irq_table[i].irq)
+ break;
+ }
+
if (i < NR_IRQS) {
- action = sparc_irq[i].action;
- if (!action)
- goto out_unlock;
- seq_printf(p, "%3d: ", i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for_each_online_cpu(j) {
- seq_printf(p, "%10u ",
- kstat_cpu(j).irqs[i]);
- }
-#endif
- seq_printf(p, " %c %s",
- (action->flags & IRQF_DISABLED) ? '+' : ' ',
- action->name);
- for (action=action->next; action; action = action->next) {
- seq_printf(p, ",%s %s",
- (action->flags & IRQF_DISABLED) ? " +" : "",
- action->name);
- }
- seq_putc(p, '\n');
+ irq_table[i].real_irq = real_irq;
+ irq_table[i].irq = i;
+ irq_table[i].pil = pil;
+ } else {
+ printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
+ i = 0;
}
-out_unlock:
- spin_unlock_irqrestore(&irq_action_lock, flags);
- return 0;
+found:
+ spin_unlock_irqrestore(&irq_table_lock, flags);
+
+ return i;
}
-void free_irq(unsigned int irq, void *dev_id)
+/* Based on a single pil handler_irq may need to call several
+ * interrupt handlers. Use irq_map as entry to irq_table,
+ * and let each entry in irq_table point to the next entry.
+ */
+void irq_link(unsigned int irq)
{
- struct irqaction * action;
- struct irqaction **actionp;
- unsigned long flags;
- unsigned int cpu_irq;
-
- if (sparc_cpu_model == sun4d) {
- extern void sun4d_free_irq(unsigned int, void *);
-
- sun4d_free_irq(irq, dev_id);
- return;
- }
- cpu_irq = irq & (NR_IRQS - 1);
- if (cpu_irq > 14) { /* 14 irq levels on the sparc */
- printk("Trying to free bogus IRQ %d\n", irq);
- return;
- }
-
- spin_lock_irqsave(&irq_action_lock, flags);
+ struct irq_bucket *p;
+ unsigned long flags;
+ unsigned int pil;
- actionp = &sparc_irq[cpu_irq].action;
- action = *actionp;
+ BUG_ON(irq >= NR_IRQS);
- if (!action->handler) {
- printk("Trying to free free IRQ%d\n",irq);
- goto out_unlock;
- }
- if (dev_id) {
- for (; action; action = action->next) {
- if (action->dev_id == dev_id)
- break;
- actionp = &action->next;
- }
- if (!action) {
- printk("Trying to free free shared IRQ%d\n",irq);
- goto out_unlock;
- }
- } else if (action->flags & IRQF_SHARED) {
- printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
- goto out_unlock;
- }
- if (action->flags & SA_STATIC_ALLOC)
- {
- /* This interrupt is marked as specially allocated
- * so it is a bad idea to free it.
- */
- printk("Attempt to free statically allocated IRQ%d (%s)\n",
- irq, action->name);
- goto out_unlock;
- }
+ spin_lock_irqsave(&irq_map_lock, flags);
- *actionp = action->next;
+ p = &irq_table[irq];
+ pil = p->pil;
+ BUG_ON(pil > SUN4D_MAX_IRQ);
+ p->next = irq_map[pil];
+ irq_map[pil] = p;
- spin_unlock_irqrestore(&irq_action_lock, flags);
+ spin_unlock_irqrestore(&irq_map_lock, flags);
+}
- synchronize_irq(irq);
+void irq_unlink(unsigned int irq)
+{
+ struct irq_bucket *p, **pnext;
+ unsigned long flags;
- spin_lock_irqsave(&irq_action_lock, flags);
+ BUG_ON(irq >= NR_IRQS);
- kfree(action);
+ spin_lock_irqsave(&irq_map_lock, flags);
- if (!sparc_irq[cpu_irq].action)
- __disable_irq(irq);
+ p = &irq_table[irq];
+ BUG_ON(p->pil > SUN4D_MAX_IRQ);
+ pnext = &irq_map[p->pil];
+ while (*pnext != p)
+ pnext = &(*pnext)->next;
+ *pnext = p->next;
-out_unlock:
- spin_unlock_irqrestore(&irq_action_lock, flags);
+ spin_unlock_irqrestore(&irq_map_lock, flags);
}
-EXPORT_SYMBOL(free_irq);
-/*
- * This is called when we want to synchronize with
- * interrupts. We may for example tell a device to
- * stop sending interrupts: but to make sure there
- * are no interrupts that are executing on another
- * CPU we need to call this function.
- */
-#ifdef CONFIG_SMP
-void synchronize_irq(unsigned int irq)
+/* /proc/interrupts printing */
+int arch_show_interrupts(struct seq_file *p, int prec)
{
- unsigned int cpu_irq;
-
- cpu_irq = irq & (NR_IRQS - 1);
- while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS)
- cpu_relax();
-}
-EXPORT_SYMBOL(synchronize_irq);
-#endif /* SMP */
+ int j;
-void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
-{
- int i;
- struct irqaction * action;
- unsigned int cpu_irq;
-
- cpu_irq = irq & (NR_IRQS - 1);
- action = sparc_irq[cpu_irq].action;
-
- printk("IO device interrupt, irq = %d\n", irq);
- printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
- regs->npc, regs->u_regs[14]);
- if (action) {
- printk("Expecting: ");
- for (i = 0; i < 16; i++)
- if (action->handler)
- printk("[%s:%d:0x%x] ", action->name,
- (int) i, (unsigned int) action->handler);
- }
- printk("AIEEE\n");
- panic("bogus interrupt received");
+#ifdef CONFIG_SMP
+ seq_printf(p, "RES: ");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", cpu_data(j).irq_resched_count);
+ seq_printf(p, " IPI rescheduling interrupts\n");
+ seq_printf(p, "CAL: ");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", cpu_data(j).irq_call_count);
+ seq_printf(p, " IPI function call interrupts\n");
+#endif
+ seq_printf(p, "NMI: ");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", cpu_data(j).counter);
+ seq_printf(p, " Non-maskable interrupts\n");
+ return 0;
}
-void handler_irq(int irq, struct pt_regs * regs)
+void handler_irq(unsigned int pil, struct pt_regs *regs)
{
struct pt_regs *old_regs;
- struct irqaction * action;
- int cpu = smp_processor_id();
-#ifdef CONFIG_SMP
- extern void smp4m_irq_rotate(int cpu);
-#endif
+ struct irq_bucket *p;
+ BUG_ON(pil > 15);
old_regs = set_irq_regs(regs);
irq_enter();
- disable_pil_irq(irq);
-#ifdef CONFIG_SMP
- /* Only rotate on lower priority IRQs (scsi, ethernet, etc.). */
- if((sparc_cpu_model==sun4m) && (irq < 10))
- smp4m_irq_rotate(cpu);
-#endif
- action = sparc_irq[irq].action;
- sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS;
- kstat_cpu(cpu).irqs[irq]++;
- do {
- if (!action || !action->handler)
- unexpected_irq(irq, NULL, regs);
- action->handler(irq, action->dev_id);
- action = action->next;
- } while (action);
- sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS;
- enable_pil_irq(irq);
+
+ p = irq_map[pil];
+ while (p) {
+ struct irq_bucket *next = p->next;
+
+ generic_handle_irq(p->irq);
+ p = next;
+ }
irq_exit();
set_irq_regs(old_regs);
}
#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
+static unsigned int floppy_irq;
-/* Fast IRQs on the Sparc can only have one routine attached to them,
- * thus no sharing possible.
- */
-static int request_fast_irq(unsigned int irq,
- void (*handler)(void),
- unsigned long irqflags, const char *devname)
+int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
{
- struct irqaction *action;
- unsigned long flags;
unsigned int cpu_irq;
- int ret;
+ int err;
+
#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
struct tt_entry *trap_table;
- extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
#endif
-
- cpu_irq = irq & (NR_IRQS - 1);
- if(cpu_irq > 14) {
- ret = -EINVAL;
- goto out;
- }
- if(!handler) {
- ret = -EINVAL;
- goto out;
- }
-
- spin_lock_irqsave(&irq_action_lock, flags);
- action = sparc_irq[cpu_irq].action;
- if(action) {
- if(action->flags & IRQF_SHARED)
- panic("Trying to register fast irq when already shared.\n");
- if(irqflags & IRQF_SHARED)
- panic("Trying to register fast irq as shared.\n");
+ err = request_irq(irq, irq_handler, 0, "floppy", NULL);
+ if (err)
+ return -1;
- /* Anyway, someone already owns it so cannot be made fast. */
- printk("request_fast_irq: Trying to register yet already owned.\n");
- ret = -EBUSY;
- goto out_unlock;
- }
+ /* Save for later use in floppy interrupt handler */
+ floppy_irq = irq;
- /* If this is flagged as statically allocated then we use our
- * private struct which is never freed.
- */
- if (irqflags & SA_STATIC_ALLOC) {
- if (static_irq_count < MAX_STATIC_ALLOC)
- action = &static_irqaction[static_irq_count++];
- else
- printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
- irq, devname);
- }
-
- if (action == NULL)
- action = kmalloc(sizeof(struct irqaction),
- GFP_ATOMIC);
-
- if (!action) {
- ret = -ENOMEM;
- goto out_unlock;
- }
+ cpu_irq = (irq & (NR_IRQS - 1));
/* Dork with trap table if we get this far. */
#define INSTANTIATE(table) \
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \
- SPARC_BRANCH((unsigned long) handler, \
+ SPARC_BRANCH((unsigned long) floppy_hardint, \
(unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
INSTANTIATE(sparc_ttable)
#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
- trap_table = &trapbase_cpu1; INSTANTIATE(trap_table)
- trap_table = &trapbase_cpu2; INSTANTIATE(trap_table)
- trap_table = &trapbase_cpu3; INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu1;
+ INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu2;
+ INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu3;
+ INSTANTIATE(trap_table)
#endif
#undef INSTANTIATE
/*
@@ -437,24 +289,12 @@ static int request_fast_irq(unsigned int irq,
* writing we have no CPU-neutral interface to fine-grained flushes.
*/
flush_cache_all();
-
- action->flags = irqflags;
- action->name = devname;
- action->dev_id = NULL;
- action->next = NULL;
-
- sparc_irq[cpu_irq].action = action;
-
- __enable_irq(irq);
-
- ret = 0;
-out_unlock:
- spin_unlock_irqrestore(&irq_action_lock, flags);
-out:
- return ret;
+ return 0;
}
+EXPORT_SYMBOL(sparc_floppy_request_irq);
-/* These variables are used to access state from the assembler
+/*
+ * These variables are used to access state from the assembler
* interrupt handler, floppy_hardint, so we cannot put these in
* the floppy driver image because that would not work in the
* modular case.
@@ -477,155 +317,23 @@ EXPORT_SYMBOL(pdma_base);
unsigned long pdma_areasize;
EXPORT_SYMBOL(pdma_areasize);
-extern void floppy_hardint(void);
-
-static irq_handler_t floppy_irq_handler;
-
+/* Use the generic irq support to call floppy_interrupt
+ * which was setup using request_irq() in sparc_floppy_request_irq().
+ * We only have one floppy interrupt so we do not need to check
+ * for additional handlers being wired up by irq_link()
+ */
void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
{
struct pt_regs *old_regs;
- int cpu = smp_processor_id();
old_regs = set_irq_regs(regs);
- disable_pil_irq(irq);
irq_enter();
- kstat_cpu(cpu).irqs[irq]++;
- floppy_irq_handler(irq, dev_id);
+ generic_handle_irq(floppy_irq);
irq_exit();
- enable_pil_irq(irq);
set_irq_regs(old_regs);
- // XXX Eek, it's totally changed with preempt_count() and such
- // if (softirq_pending(cpu))
- // do_softirq();
}
-
-int sparc_floppy_request_irq(int irq, unsigned long flags,
- irq_handler_t irq_handler)
-{
- floppy_irq_handler = irq_handler;
- return request_fast_irq(irq, floppy_hardint, flags, "floppy");
-}
-EXPORT_SYMBOL(sparc_floppy_request_irq);
-
#endif
-int request_irq(unsigned int irq,
- irq_handler_t handler,
- unsigned long irqflags, const char * devname, void *dev_id)
-{
- struct irqaction * action, **actionp;
- unsigned long flags;
- unsigned int cpu_irq;
- int ret;
-
- if (sparc_cpu_model == sun4d) {
- extern int sun4d_request_irq(unsigned int,
- irq_handler_t ,
- unsigned long, const char *, void *);
- return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
- }
- cpu_irq = irq & (NR_IRQS - 1);
- if(cpu_irq > 14) {
- ret = -EINVAL;
- goto out;
- }
- if (!handler) {
- ret = -EINVAL;
- goto out;
- }
-
- spin_lock_irqsave(&irq_action_lock, flags);
-
- actionp = &sparc_irq[cpu_irq].action;
- action = *actionp;
- if (action) {
- if (!(action->flags & IRQF_SHARED) || !(irqflags & IRQF_SHARED)) {
- ret = -EBUSY;
- goto out_unlock;
- }
- if ((action->flags & IRQF_DISABLED) != (irqflags & IRQF_DISABLED)) {
- printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
- ret = -EBUSY;
- goto out_unlock;
- }
- for ( ; action; action = *actionp)
- actionp = &action->next;
- }
-
- /* If this is flagged as statically allocated then we use our
- * private struct which is never freed.
- */
- if (irqflags & SA_STATIC_ALLOC) {
- if (static_irq_count < MAX_STATIC_ALLOC)
- action = &static_irqaction[static_irq_count++];
- else
- printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
- }
-
- if (action == NULL)
- action = kmalloc(sizeof(struct irqaction),
- GFP_ATOMIC);
-
- if (!action) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- action->handler = handler;
- action->flags = irqflags;
- action->name = devname;
- action->next = NULL;
- action->dev_id = dev_id;
-
- *actionp = action;
-
- __enable_irq(irq);
-
- ret = 0;
-out_unlock:
- spin_unlock_irqrestore(&irq_action_lock, flags);
-out:
- return ret;
-}
-
-EXPORT_SYMBOL(request_irq);
-
-void disable_irq_nosync(unsigned int irq)
-{
- __disable_irq(irq);
-}
-EXPORT_SYMBOL(disable_irq_nosync);
-
-void disable_irq(unsigned int irq)
-{
- __disable_irq(irq);
-}
-EXPORT_SYMBOL(disable_irq);
-
-void enable_irq(unsigned int irq)
-{
- __enable_irq(irq);
-}
-
-EXPORT_SYMBOL(enable_irq);
-
-/* We really don't need these at all on the Sparc. We only have
- * stubs here because they are exported to modules.
- */
-unsigned long probe_irq_on(void)
-{
- return 0;
-}
-
-EXPORT_SYMBOL(probe_irq_on);
-
-int probe_irq_off(unsigned long mask)
-{
- return 0;
-}
-
-EXPORT_SYMBOL(probe_irq_off);
-
/* djhr
* This could probably be made indirect too and assigned in the CPU
* bits of the code. That would be much nicer I think and would also
@@ -636,27 +344,20 @@ EXPORT_SYMBOL(probe_irq_off);
void __init init_IRQ(void)
{
- extern void sun4c_init_IRQ( void );
- extern void sun4m_init_IRQ( void );
- extern void sun4d_init_IRQ( void );
-
- switch(sparc_cpu_model) {
+ switch (sparc_cpu_model) {
case sun4c:
case sun4:
sun4c_init_IRQ();
break;
case sun4m:
-#ifdef CONFIG_PCI
pcic_probe();
- if (pcic_present()) {
+ if (pcic_present())
sun4m_pci_init_IRQ();
- break;
- }
-#endif
- sun4m_init_IRQ();
+ else
+ sun4m_init_IRQ();
break;
-
+
case sun4d:
sun4d_init_IRQ();
break;
@@ -672,9 +373,3 @@ void __init init_IRQ(void)
btfixup();
}
-#ifdef CONFIG_PROC_FS
-void init_irq_proc(void)
-{
- /* For now, nothing... */
-}
-#endif /* CONFIG_PROC_FS */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 830d70a3e20b..4e78862d12fd 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -82,7 +82,7 @@ static void bucket_clear_chain_pa(unsigned long bucket_pa)
"i" (ASI_PHYS_USE_EC));
}
-static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
+static unsigned int bucket_get_irq(unsigned long bucket_pa)
{
unsigned int ret;
@@ -90,21 +90,20 @@ static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
: "=&r" (ret)
: "r" (bucket_pa +
offsetof(struct ino_bucket,
- __virt_irq)),
+ __irq)),
"i" (ASI_PHYS_USE_EC));
return ret;
}
-static void bucket_set_virt_irq(unsigned long bucket_pa,
- unsigned int virt_irq)
+static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
{
__asm__ __volatile__("stwa %0, [%1] %2"
: /* no outputs */
- : "r" (virt_irq),
+ : "r" (irq),
"r" (bucket_pa +
offsetof(struct ino_bucket,
- __virt_irq)),
+ __irq)),
"i" (ASI_PHYS_USE_EC));
}
@@ -114,97 +113,63 @@ static struct {
unsigned int dev_handle;
unsigned int dev_ino;
unsigned int in_use;
-} virt_irq_table[NR_IRQS];
-static DEFINE_SPINLOCK(virt_irq_alloc_lock);
+} irq_table[NR_IRQS];
+static DEFINE_SPINLOCK(irq_alloc_lock);
-unsigned char virt_irq_alloc(unsigned int dev_handle,
- unsigned int dev_ino)
+unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
{
unsigned long flags;
unsigned char ent;
BUILD_BUG_ON(NR_IRQS >= 256);
- spin_lock_irqsave(&virt_irq_alloc_lock, flags);
+ spin_lock_irqsave(&irq_alloc_lock, flags);
for (ent = 1; ent < NR_IRQS; ent++) {
- if (!virt_irq_table[ent].in_use)
+ if (!irq_table[ent].in_use)
break;
}
if (ent >= NR_IRQS) {
printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
ent = 0;
} else {
- virt_irq_table[ent].dev_handle = dev_handle;
- virt_irq_table[ent].dev_ino = dev_ino;
- virt_irq_table[ent].in_use = 1;
+ irq_table[ent].dev_handle = dev_handle;
+ irq_table[ent].dev_ino = dev_ino;
+ irq_table[ent].in_use = 1;
}
- spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
+ spin_unlock_irqrestore(&irq_alloc_lock, flags);
return ent;
}
#ifdef CONFIG_PCI_MSI
-void virt_irq_free(unsigned int virt_irq)
+void irq_free(unsigned int irq)
{
unsigned long flags;
- if (virt_irq >= NR_IRQS)
+ if (irq >= NR_IRQS)
return;
- spin_lock_irqsave(&virt_irq_alloc_lock, flags);
+ spin_lock_irqsave(&irq_alloc_lock, flags);
- virt_irq_table[virt_irq].in_use = 0;
+ irq_table[irq].in_use = 0;
- spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
+ spin_unlock_irqrestore(&irq_alloc_lock, flags);
}
#endif
/*
* /proc/interrupts printing:
*/
-
-int show_interrupts(struct seq_file *p, void *v)
+int arch_show_interrupts(struct seq_file *p, int prec)
{
- int i = *(loff_t *) v, j;
- struct irqaction * action;
- unsigned long flags;
+ int j;
- if (i == 0) {
- seq_printf(p, " ");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
- seq_putc(p, '\n');
- }
-
- if (i < NR_IRQS) {
- raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
- if (!action)
- goto skip;
- seq_printf(p, "%3d: ",i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
-#endif
- seq_printf(p, " %9s", irq_desc[i].chip->name);
- seq_printf(p, " %s", action->name);
-
- for (action=action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
-
- seq_putc(p, '\n');
-skip:
- raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- } else if (i == NR_IRQS) {
- seq_printf(p, "NMI: ");
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
- seq_printf(p, " Non-maskable interrupts\n");
- }
+ seq_printf(p, "NMI: ");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
+ seq_printf(p, " Non-maskable interrupts\n");
return 0;
}
@@ -253,39 +218,38 @@ struct irq_handler_data {
};
#ifdef CONFIG_SMP
-static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
+static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
{
cpumask_t mask;
int cpuid;
cpumask_copy(&mask, affinity);
- if (cpus_equal(mask, cpu_online_map)) {
- cpuid = map_to_cpu(virt_irq);
+ if (cpumask_equal(&mask, cpu_online_mask)) {
+ cpuid = map_to_cpu(irq);
} else {
cpumask_t tmp;
- cpus_and(tmp, cpu_online_map, mask);
- cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
+ cpumask_and(&tmp, cpu_online_mask, &mask);
+ cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
}
return cpuid;
}
#else
-#define irq_choose_cpu(virt_irq, affinity) \
+#define irq_choose_cpu(irq, affinity) \
real_hard_smp_processor_id()
#endif
-static void sun4u_irq_enable(unsigned int virt_irq)
+static void sun4u_irq_enable(struct irq_data *data)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+ struct irq_handler_data *handler_data = data->handler_data;
- if (likely(data)) {
+ if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
- cpuid = irq_choose_cpu(virt_irq,
- irq_desc[virt_irq].affinity);
- imap = data->imap;
+ cpuid = irq_choose_cpu(data->irq, data->affinity);
+ imap = handler_data->imap;
tid = sun4u_compute_tid(imap, cpuid);
@@ -294,21 +258,21 @@ static void sun4u_irq_enable(unsigned int virt_irq)
IMAP_AID_SAFARI | IMAP_NID_SAFARI);
val |= tid | IMAP_VALID;
upa_writeq(val, imap);
- upa_writeq(ICLR_IDLE, data->iclr);
+ upa_writeq(ICLR_IDLE, handler_data->iclr);
}
}
-static int sun4u_set_affinity(unsigned int virt_irq,
- const struct cpumask *mask)
+static int sun4u_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+ struct irq_handler_data *handler_data = data->handler_data;
- if (likely(data)) {
+ if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
- cpuid = irq_choose_cpu(virt_irq, mask);
- imap = data->imap;
+ cpuid = irq_choose_cpu(data->irq, mask);
+ imap = handler_data->imap;
tid = sun4u_compute_tid(imap, cpuid);
@@ -317,7 +281,7 @@ static int sun4u_set_affinity(unsigned int virt_irq,
IMAP_AID_SAFARI | IMAP_NID_SAFARI);
val |= tid | IMAP_VALID;
upa_writeq(val, imap);
- upa_writeq(ICLR_IDLE, data->iclr);
+ upa_writeq(ICLR_IDLE, handler_data->iclr);
}
return 0;
@@ -340,27 +304,22 @@ static int sun4u_set_affinity(unsigned int virt_irq,
* sees that, it also hooks up a default ->shutdown method which
* invokes ->mask() which we do not want. See irq_chip_set_defaults().
*/
-static void sun4u_irq_disable(unsigned int virt_irq)
+static void sun4u_irq_disable(struct irq_data *data)
{
}
-static void sun4u_irq_eoi(unsigned int virt_irq)
+static void sun4u_irq_eoi(struct irq_data *data)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
- struct irq_desc *desc = irq_desc + virt_irq;
+ struct irq_handler_data *handler_data = data->handler_data;
- if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- return;
-
- if (likely(data))
- upa_writeq(ICLR_IDLE, data->iclr);
+ if (likely(handler_data))
+ upa_writeq(ICLR_IDLE, handler_data->iclr);
}
-static void sun4v_irq_enable(unsigned int virt_irq)
+static void sun4v_irq_enable(struct irq_data *data)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
- unsigned long cpuid = irq_choose_cpu(virt_irq,
- irq_desc[virt_irq].affinity);
+ unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
int err;
err = sun4v_intr_settarget(ino, cpuid);
@@ -377,11 +336,11 @@ static void sun4v_irq_enable(unsigned int virt_irq)
ino, err);
}
-static int sun4v_set_affinity(unsigned int virt_irq,
- const struct cpumask *mask)
+static int sun4v_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
- unsigned long cpuid = irq_choose_cpu(virt_irq, mask);
+ unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned long cpuid = irq_choose_cpu(data->irq, mask);
int err;
err = sun4v_intr_settarget(ino, cpuid);
@@ -392,9 +351,9 @@ static int sun4v_set_affinity(unsigned int virt_irq,
return 0;
}
-static void sun4v_irq_disable(unsigned int virt_irq)
+static void sun4v_irq_disable(struct irq_data *data)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
+ unsigned int ino = irq_table[data->irq].dev_ino;
int err;
err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
@@ -403,30 +362,26 @@ static void sun4v_irq_disable(unsigned int virt_irq)
"err(%d)\n", ino, err);
}
-static void sun4v_irq_eoi(unsigned int virt_irq)
+static void sun4v_irq_eoi(struct irq_data *data)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
- struct irq_desc *desc = irq_desc + virt_irq;
+ unsigned int ino = irq_table[data->irq].dev_ino;
int err;
- if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- return;
-
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setstate(%x): "
"err(%d)\n", ino, err);
}
-static void sun4v_virq_enable(unsigned int virt_irq)
+static void sun4v_virq_enable(struct irq_data *data)
{
unsigned long cpuid, dev_handle, dev_ino;
int err;
- cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity);
+ cpuid = irq_choose_cpu(data->irq, data->affinity);
- dev_handle = virt_irq_table[virt_irq].dev_handle;
- dev_ino = virt_irq_table[virt_irq].dev_ino;
+ dev_handle = irq_table[data->irq].dev_handle;
+ dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
@@ -447,16 +402,16 @@ static void sun4v_virq_enable(unsigned int virt_irq)
dev_handle, dev_ino, err);
}
-static int sun4v_virt_set_affinity(unsigned int virt_irq,
- const struct cpumask *mask)
+static int sun4v_virt_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
unsigned long cpuid, dev_handle, dev_ino;
int err;
- cpuid = irq_choose_cpu(virt_irq, mask);
+ cpuid = irq_choose_cpu(data->irq, mask);
- dev_handle = virt_irq_table[virt_irq].dev_handle;
- dev_ino = virt_irq_table[virt_irq].dev_ino;
+ dev_handle = irq_table[data->irq].dev_handle;
+ dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
@@ -467,13 +422,13 @@ static int sun4v_virt_set_affinity(unsigned int virt_irq,
return 0;
}
-static void sun4v_virq_disable(unsigned int virt_irq)
+static void sun4v_virq_disable(struct irq_data *data)
{
unsigned long dev_handle, dev_ino;
int err;
- dev_handle = virt_irq_table[virt_irq].dev_handle;
- dev_ino = virt_irq_table[virt_irq].dev_ino;
+ dev_handle = irq_table[data->irq].dev_handle;
+ dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_DISABLED);
@@ -483,17 +438,13 @@ static void sun4v_virq_disable(unsigned int virt_irq)
dev_handle, dev_ino, err);
}
-static void sun4v_virq_eoi(unsigned int virt_irq)
+static void sun4v_virq_eoi(struct irq_data *data)
{
- struct irq_desc *desc = irq_desc + virt_irq;
unsigned long dev_handle, dev_ino;
int err;
- if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- return;
-
- dev_handle = virt_irq_table[virt_irq].dev_handle;
- dev_ino = virt_irq_table[virt_irq].dev_ino;
+ dev_handle = irq_table[data->irq].dev_handle;
+ dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
@@ -504,132 +455,128 @@ static void sun4v_virq_eoi(unsigned int virt_irq)
}
static struct irq_chip sun4u_irq = {
- .name = "sun4u",
- .enable = sun4u_irq_enable,
- .disable = sun4u_irq_disable,
- .eoi = sun4u_irq_eoi,
- .set_affinity = sun4u_set_affinity,
+ .name = "sun4u",
+ .irq_enable = sun4u_irq_enable,
+ .irq_disable = sun4u_irq_disable,
+ .irq_eoi = sun4u_irq_eoi,
+ .irq_set_affinity = sun4u_set_affinity,
+ .flags = IRQCHIP_EOI_IF_HANDLED,
};
static struct irq_chip sun4v_irq = {
- .name = "sun4v",
- .enable = sun4v_irq_enable,
- .disable = sun4v_irq_disable,
- .eoi = sun4v_irq_eoi,
- .set_affinity = sun4v_set_affinity,
+ .name = "sun4v",
+ .irq_enable = sun4v_irq_enable,
+ .irq_disable = sun4v_irq_disable,
+ .irq_eoi = sun4v_irq_eoi,
+ .irq_set_affinity = sun4v_set_affinity,
+ .flags = IRQCHIP_EOI_IF_HANDLED,
};
static struct irq_chip sun4v_virq = {
- .name = "vsun4v",
- .enable = sun4v_virq_enable,
- .disable = sun4v_virq_disable,
- .eoi = sun4v_virq_eoi,
- .set_affinity = sun4v_virt_set_affinity,
+ .name = "vsun4v",
+ .irq_enable = sun4v_virq_enable,
+ .irq_disable = sun4v_virq_disable,
+ .irq_eoi = sun4v_virq_eoi,
+ .irq_set_affinity = sun4v_virt_set_affinity,
+ .flags = IRQCHIP_EOI_IF_HANDLED,
};
-static void pre_flow_handler(unsigned int virt_irq,
- struct irq_desc *desc)
+static void pre_flow_handler(struct irq_data *d)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
+ struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
+ unsigned int ino = irq_table[d->irq].dev_ino;
- data->pre_handler(ino, data->arg1, data->arg2);
-
- handle_fasteoi_irq(virt_irq, desc);
+ handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
}
-void irq_install_pre_handler(int virt_irq,
+void irq_install_pre_handler(int irq,
void (*func)(unsigned int, void *, void *),
void *arg1, void *arg2)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
- struct irq_desc *desc = irq_desc + virt_irq;
+ struct irq_handler_data *handler_data = irq_get_handler_data(irq);
- data->pre_handler = func;
- data->arg1 = arg1;
- data->arg2 = arg2;
+ handler_data->pre_handler = func;
+ handler_data->arg1 = arg1;
+ handler_data->arg2 = arg2;
- desc->handle_irq = pre_flow_handler;
+ __irq_set_preflow_handler(irq, pre_flow_handler);
}
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
struct ino_bucket *bucket;
- struct irq_handler_data *data;
- unsigned int virt_irq;
+ struct irq_handler_data *handler_data;
+ unsigned int irq;
int ino;
BUG_ON(tlb_type == hypervisor);
ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
bucket = &ivector_table[ino];
- virt_irq = bucket_get_virt_irq(__pa(bucket));
- if (!virt_irq) {
- virt_irq = virt_irq_alloc(0, ino);
- bucket_set_virt_irq(__pa(bucket), virt_irq);
- set_irq_chip_and_handler_name(virt_irq,
- &sun4u_irq,
- handle_fasteoi_irq,
- "IVEC");
+ irq = bucket_get_irq(__pa(bucket));
+ if (!irq) {
+ irq = irq_alloc(0, ino);
+ bucket_set_irq(__pa(bucket), irq);
+ irq_set_chip_and_handler_name(irq, &sun4u_irq,
+ handle_fasteoi_irq, "IVEC");
}
- data = get_irq_chip_data(virt_irq);
- if (unlikely(data))
+ handler_data = irq_get_handler_data(irq);
+ if (unlikely(handler_data))
goto out;
- data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!data)) {
+ handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+ if (unlikely(!handler_data)) {
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
- set_irq_chip_data(virt_irq, data);
+ irq_set_handler_data(irq, handler_data);
- data->imap = imap;
- data->iclr = iclr;
+ handler_data->imap = imap;
+ handler_data->iclr = iclr;
out:
- return virt_irq;
+ return irq;
}
static unsigned int sun4v_build_common(unsigned long sysino,
struct irq_chip *chip)
{
struct ino_bucket *bucket;
- struct irq_handler_data *data;
- unsigned int virt_irq;
+ struct irq_handler_data *handler_data;
+ unsigned int irq;
BUG_ON(tlb_type != hypervisor);
bucket = &ivector_table[sysino];
- virt_irq = bucket_get_virt_irq(__pa(bucket));
- if (!virt_irq) {
- virt_irq = virt_irq_alloc(0, sysino);
- bucket_set_virt_irq(__pa(bucket), virt_irq);
- set_irq_chip_and_handler_name(virt_irq, chip,
- handle_fasteoi_irq,
+ irq = bucket_get_irq(__pa(bucket));
+ if (!irq) {
+ irq = irq_alloc(0, sysino);
+ bucket_set_irq(__pa(bucket), irq);
+ irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
"IVEC");
}
- data = get_irq_chip_data(virt_irq);
- if (unlikely(data))
+ handler_data = irq_get_handler_data(irq);
+ if (unlikely(handler_data))
goto out;
- data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!data)) {
+ handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+ if (unlikely(!handler_data)) {
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
- set_irq_chip_data(virt_irq, data);
+ irq_set_handler_data(irq, handler_data);
/* Catch accidental accesses to these things. IMAP/ICLR handling
* is done by hypervisor calls on sun4v platforms, not by direct
* register accesses.
*/
- data->imap = ~0UL;
- data->iclr = ~0UL;
+ handler_data->imap = ~0UL;
+ handler_data->iclr = ~0UL;
out:
- return virt_irq;
+ return irq;
}
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
@@ -641,11 +588,10 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
- struct irq_handler_data *data;
+ struct irq_handler_data *handler_data;
unsigned long hv_err, cookie;
struct ino_bucket *bucket;
- struct irq_desc *desc;
- unsigned int virt_irq;
+ unsigned int irq;
bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
if (unlikely(!bucket))
@@ -662,32 +608,29 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
((unsigned long) bucket +
sizeof(struct ino_bucket)));
- virt_irq = virt_irq_alloc(devhandle, devino);
- bucket_set_virt_irq(__pa(bucket), virt_irq);
+ irq = irq_alloc(devhandle, devino);
+ bucket_set_irq(__pa(bucket), irq);
- set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
- handle_fasteoi_irq,
+ irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
"IVEC");
- data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!data))
+ handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+ if (unlikely(!handler_data))
return 0;
/* In order to make the LDC channel startup sequence easier,
* especially wrt. locking, we do not let request_irq() enable
* the interrupt.
*/
- desc = irq_desc + virt_irq;
- desc->status |= IRQ_NOAUTOEN;
-
- set_irq_chip_data(virt_irq, data);
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ irq_set_handler_data(irq, handler_data);
/* Catch accidental accesses to these things. IMAP/ICLR handling
* is done by hypervisor calls on sun4v platforms, not by direct
* register accesses.
*/
- data->imap = ~0UL;
- data->iclr = ~0UL;
+ handler_data->imap = ~0UL;
+ handler_data->iclr = ~0UL;
cookie = ~__pa(bucket);
hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
@@ -697,30 +640,30 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
prom_halt();
}
- return virt_irq;
+ return irq;
}
-void ack_bad_irq(unsigned int virt_irq)
+void ack_bad_irq(unsigned int irq)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
+ unsigned int ino = irq_table[irq].dev_ino;
if (!ino)
ino = 0xdeadbeef;
- printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
- ino, virt_irq);
+ printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
+ ino, irq);
}
void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS];
-void __irq_entry handler_irq(int irq, struct pt_regs *regs)
+void __irq_entry handler_irq(int pil, struct pt_regs *regs)
{
unsigned long pstate, bucket_pa;
struct pt_regs *old_regs;
void *orig_sp;
- clear_softint(1 << irq);
+ clear_softint(1 << pil);
old_regs = set_irq_regs(regs);
irq_enter();
@@ -739,18 +682,14 @@ void __irq_entry handler_irq(int irq, struct pt_regs *regs)
orig_sp = set_hardirq_stack();
while (bucket_pa) {
- struct irq_desc *desc;
unsigned long next_pa;
- unsigned int virt_irq;
+ unsigned int irq;
next_pa = bucket_get_chain_pa(bucket_pa);
- virt_irq = bucket_get_virt_irq(bucket_pa);
+ irq = bucket_get_irq(bucket_pa);
bucket_clear_chain_pa(bucket_pa);
- desc = irq_desc + virt_irq;
-
- if (!(desc->status & IRQ_DISABLED))
- desc->handle_irq(virt_irq, desc);
+ generic_handle_irq(irq);
bucket_pa = next_pa;
}
@@ -793,16 +732,18 @@ void fixup_irqs(void)
unsigned int irq;
for (irq = 0; irq < NR_IRQS; irq++) {
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_data *data = irq_desc_get_irq_data(desc);
unsigned long flags;
- raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
- if (irq_desc[irq].action &&
- !(irq_desc[irq].status & IRQ_PER_CPU)) {
- if (irq_desc[irq].chip->set_affinity)
- irq_desc[irq].chip->set_affinity(irq,
- irq_desc[irq].affinity);
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ if (desc->action && !irqd_is_per_cpu(data)) {
+ if (data->chip->irq_set_affinity)
+ data->chip->irq_set_affinity(data,
+ data->affinity,
+ false);
}
- raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
tick_ops->disable_irq();
@@ -1040,5 +981,5 @@ void __init init_IRQ(void)
: "i" (PSTATE_IE)
: "g1");
- irq_desc[0].action = &timer_irq_action;
+ irq_to_desc(0)->action = &timer_irq_action;
}
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index 15d8a3f645c9..6f6544cfa0ef 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -3,12 +3,12 @@
#include <linux/interrupt.h>
+#include <asm/traps.h>
+
/* cpu.c */
-extern const char *sparc_cpu_type;
extern const char *sparc_pmu_type;
-extern const char *sparc_fpu_type;
-
extern unsigned int fsr_storage;
+extern int ncpus_probed;
#ifdef CONFIG_SPARC32
/* cpu.c */
@@ -26,6 +26,54 @@ extern int static_irq_count;
extern spinlock_t irq_action_lock;
extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
+extern void init_IRQ(void);
+
+/* sun4c_irq.c */
+extern void sun4c_init_IRQ(void);
+
+/* sun4m_irq.c */
+extern unsigned int lvl14_resolution;
+
+extern void sun4m_init_IRQ(void);
+extern void sun4m_unmask_profile_irq(void);
+extern void sun4m_clear_profile_irq(int cpu);
+
+/* sun4d_irq.c */
+extern spinlock_t sun4d_imsk_lock;
+
+extern void sun4d_init_IRQ(void);
+extern int sun4d_request_irq(unsigned int irq,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname, void *dev_id);
+extern int show_sun4d_interrupts(struct seq_file *, void *);
+extern void sun4d_distribute_irqs(void);
+extern void sun4d_free_irq(unsigned int irq, void *dev_id);
+
+/* head_32.S */
+extern unsigned int t_nmi[];
+extern unsigned int linux_trap_ipi15_sun4d[];
+extern unsigned int linux_trap_ipi15_sun4m[];
+
+extern struct tt_entry trapbase_cpu1;
+extern struct tt_entry trapbase_cpu2;
+extern struct tt_entry trapbase_cpu3;
+
+extern char cputypval[];
+
+/* entry.S */
+extern unsigned long lvl14_save[4];
+extern unsigned int real_irq_entry[];
+extern unsigned int smp4d_ticker[];
+extern unsigned int patchme_maybe_smp_msg[];
+
+extern void floppy_hardint(void);
+
+/* trampoline_32.S */
+extern int __smp4m_processor_id(void);
+extern int __smp4d_processor_id(void);
+extern unsigned long sun4m_cpu_startup;
+extern unsigned long sun4d_cpu_startup;
#else /* CONFIG_SPARC32 */
#endif /* CONFIG_SPARC32 */
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index df39a0f0d27a..732b0bce6001 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -790,16 +790,20 @@ static void send_events(struct ldc_channel *lp, unsigned int event_mask)
static irqreturn_t ldc_rx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
- unsigned long orig_state, hv_err, flags;
+ unsigned long orig_state, flags;
unsigned int event_mask;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
- hv_err = sun4v_ldc_rx_get_state(lp->id,
- &lp->rx_head,
- &lp->rx_tail,
- &lp->chan_state);
+
+ /* We should probably check for hypervisor errors here and
+ * reset the LDC channel if we get one.
+ */
+ sun4v_ldc_rx_get_state(lp->id,
+ &lp->rx_head,
+ &lp->rx_tail,
+ &lp->chan_state);
ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->rx_head, lp->rx_tail);
@@ -904,16 +908,20 @@ out:
static irqreturn_t ldc_tx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
- unsigned long flags, hv_err, orig_state;
+ unsigned long flags, orig_state;
unsigned int event_mask = 0;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
- hv_err = sun4v_ldc_tx_get_state(lp->id,
- &lp->tx_head,
- &lp->tx_tail,
- &lp->chan_state);
+
+ /* We should probably check for hypervisor errors here and
+ * reset the LDC channel if we get one.
+ */
+ sun4v_ldc_tx_get_state(lp->id,
+ &lp->tx_head,
+ &lp->tx_tail,
+ &lp->chan_state);
ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->tx_head, lp->tx_tail);
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index fdab7f854f80..2f538ac2e139 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -19,52 +19,70 @@
#include <asm/leon_amba.h>
#include <asm/traps.h>
#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include <asm/setup.h>
#include "prom.h"
#include "irq.h"
struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address */
struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address */
-struct amba_apb_device leon_percpu_timer_dev[16];
int leondebug_irq_disable;
int leon_debug_irqout;
static int dummy_master_l10_counter;
+unsigned long amba_system_id;
+static DEFINE_SPINLOCK(leon_irq_lock);
unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
+int leon3_ticker_irq; /* Timer ticker IRQ */
unsigned int sparc_leon_eirq;
-#define LEON_IMASK ((&leon3_irqctrl_regs->mask[0]))
+#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
+#define LEON_IACK (&leon3_irqctrl_regs->iclear)
+#define LEON_DO_ACK_HW 1
-/* Return the IRQ of the pending IRQ on the extended IRQ controller */
-int sparc_leon_eirq_get(int eirq, int cpu)
+/* Return the last ACKed IRQ by the Extended IRQ controller. It has already
+ * been (automatically) ACKed when the CPU takes the trap.
+ */
+static inline unsigned int leon_eirq_get(int cpu)
{
return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f;
}
-irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id)
+/* Handle one or multiple IRQs from the extended interrupt controller */
+static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
{
- printk(KERN_ERR "sparc_leon_eirq_isr: ERROR EXTENDED IRQ\n");
- return IRQ_HANDLED;
+ unsigned int eirq;
+ int cpu = sparc_leon3_cpuid();
+
+ eirq = leon_eirq_get(cpu);
+ if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */
+ generic_handle_irq(irq_map[eirq]->irq);
}
/* The extended IRQ controller has been found, this function registers it */
-void sparc_leon_eirq_register(int eirq)
+void leon_eirq_setup(unsigned int eirq)
{
- int irq;
+ unsigned long mask, oldmask;
+ unsigned int veirq;
- /* Register a "BAD" handler for this interrupt, it should never happen */
- irq = request_irq(eirq, sparc_leon_eirq_isr,
- (IRQF_DISABLED | SA_STATIC_ALLOC), "extirq", NULL);
-
- if (irq) {
- printk(KERN_ERR
- "sparc_leon_eirq_register: unable to attach IRQ%d\n",
- eirq);
- } else {
- sparc_leon_eirq = eirq;
+ if (eirq < 1 || eirq > 0xf) {
+ printk(KERN_ERR "LEON EXT IRQ NUMBER BAD: %d\n", eirq);
+ return;
}
+ veirq = leon_build_device_irq(eirq, leon_handle_ext_irq, "extirq", 0);
+
+ /*
+ * Unmask the Extended IRQ, the IRQs routed through the Ext-IRQ
+ * controller have a mask-bit of their own, so this is safe.
+ */
+ irq_link(veirq);
+ mask = 1 << eirq;
+ oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(boot_cpu_id));
+ LEON3_BYPASS_STORE_PA(LEON_IMASK(boot_cpu_id), (oldmask | mask));
+ sparc_leon_eirq = eirq;
}
static inline unsigned long get_irqmask(unsigned int irq)
@@ -82,45 +100,167 @@ static inline unsigned long get_irqmask(unsigned int irq)
return mask;
}
-static void leon_enable_irq(unsigned int irq_nr)
+#ifdef CONFIG_SMP
+static int irq_choose_cpu(const struct cpumask *affinity)
{
- unsigned long mask, flags;
- mask = get_irqmask(irq_nr);
- local_irq_save(flags);
- LEON3_BYPASS_STORE_PA(LEON_IMASK,
- (LEON3_BYPASS_LOAD_PA(LEON_IMASK) | (mask)));
- local_irq_restore(flags);
+ cpumask_t mask;
+
+ cpus_and(mask, cpu_online_map, *affinity);
+ if (cpus_equal(mask, cpu_online_map) || cpus_empty(mask))
+ return boot_cpu_id;
+ else
+ return first_cpu(mask);
}
+#else
+#define irq_choose_cpu(affinity) boot_cpu_id
+#endif
-static void leon_disable_irq(unsigned int irq_nr)
+static int leon_set_affinity(struct irq_data *data, const struct cpumask *dest,
+ bool force)
{
- unsigned long mask, flags;
- mask = get_irqmask(irq_nr);
- local_irq_save(flags);
- LEON3_BYPASS_STORE_PA(LEON_IMASK,
- (LEON3_BYPASS_LOAD_PA(LEON_IMASK) & ~(mask)));
- local_irq_restore(flags);
+ unsigned long mask, oldmask, flags;
+ int oldcpu, newcpu;
+
+ mask = (unsigned long)data->chip_data;
+ oldcpu = irq_choose_cpu(data->affinity);
+ newcpu = irq_choose_cpu(dest);
+
+ if (oldcpu == newcpu)
+ goto out;
+
+ /* unmask on old CPU first before enabling on the selected CPU */
+ spin_lock_irqsave(&leon_irq_lock, flags);
+ oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(oldcpu));
+ LEON3_BYPASS_STORE_PA(LEON_IMASK(oldcpu), (oldmask & ~mask));
+ oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(newcpu));
+ LEON3_BYPASS_STORE_PA(LEON_IMASK(newcpu), (oldmask | mask));
+ spin_unlock_irqrestore(&leon_irq_lock, flags);
+out:
+ return IRQ_SET_MASK_OK;
+}
+
+static void leon_unmask_irq(struct irq_data *data)
+{
+ unsigned long mask, oldmask, flags;
+ int cpu;
+
+ mask = (unsigned long)data->chip_data;
+ cpu = irq_choose_cpu(data->affinity);
+ spin_lock_irqsave(&leon_irq_lock, flags);
+ oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
+ LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask));
+ spin_unlock_irqrestore(&leon_irq_lock, flags);
+}
+static void leon_mask_irq(struct irq_data *data)
+{
+ unsigned long mask, oldmask, flags;
+ int cpu;
+
+ mask = (unsigned long)data->chip_data;
+ cpu = irq_choose_cpu(data->affinity);
+ spin_lock_irqsave(&leon_irq_lock, flags);
+ oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
+ LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask));
+ spin_unlock_irqrestore(&leon_irq_lock, flags);
+}
+
+static unsigned int leon_startup_irq(struct irq_data *data)
+{
+ irq_link(data->irq);
+ leon_unmask_irq(data);
+ return 0;
+}
+
+static void leon_shutdown_irq(struct irq_data *data)
+{
+ leon_mask_irq(data);
+ irq_unlink(data->irq);
+}
+
+/* Used by external level sensitive IRQ handlers on the LEON: ACK IRQ ctrl */
+static void leon_eoi_irq(struct irq_data *data)
+{
+ unsigned long mask = (unsigned long)data->chip_data;
+
+ if (mask & LEON_DO_ACK_HW)
+ LEON3_BYPASS_STORE_PA(LEON_IACK, mask & ~LEON_DO_ACK_HW);
+}
+
+static struct irq_chip leon_irq = {
+ .name = "leon",
+ .irq_startup = leon_startup_irq,
+ .irq_shutdown = leon_shutdown_irq,
+ .irq_mask = leon_mask_irq,
+ .irq_unmask = leon_unmask_irq,
+ .irq_eoi = leon_eoi_irq,
+ .irq_set_affinity = leon_set_affinity,
+};
+
+/*
+ * Build a LEON IRQ for the edge triggered LEON IRQ controller:
+ * Edge (normal) IRQ - handle_simple_irq, ack=DONT-CARE, never ack
+ * Level IRQ (PCI|Level-GPIO) - handle_fasteoi_irq, ack=1, ack after ISR
+ * Per-CPU Edge - handle_percpu_irq, ack=0
+ */
+unsigned int leon_build_device_irq(unsigned int real_irq,
+ irq_flow_handler_t flow_handler,
+ const char *name, int do_ack)
+{
+ unsigned int irq;
+ unsigned long mask;
+
+ irq = 0;
+ mask = get_irqmask(real_irq);
+ if (mask == 0)
+ goto out;
+
+ irq = irq_alloc(real_irq, real_irq);
+ if (irq == 0)
+ goto out;
+
+ if (do_ack)
+ mask |= LEON_DO_ACK_HW;
+
+ irq_set_chip_and_handler_name(irq, &leon_irq,
+ flow_handler, name);
+ irq_set_chip_data(irq, (void *)mask);
+
+out:
+ return irq;
+}
+
+static unsigned int _leon_build_device_irq(struct platform_device *op,
+ unsigned int real_irq)
+{
+ return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0);
}
void __init leon_init_timers(irq_handler_t counter_fn)
{
- int irq;
+ int irq, eirq;
struct device_node *rootnp, *np, *nnp;
struct property *pp;
int len;
- int cpu, icsel;
+ int icsel;
int ampopts;
+ int err;
leondebug_irq_disable = 0;
leon_debug_irqout = 0;
master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
dummy_master_l10_counter = 0;
- /*Find IRQMP IRQ Controller Registers base address otherwise bail out.*/
rootnp = of_find_node_by_path("/ambapp0");
if (!rootnp)
goto bad;
+
+ /* Find System ID: GRLIB build ID and optional CHIP ID */
+ pp = of_find_property(rootnp, "systemid", &len);
+ if (pp)
+ amba_system_id = *(unsigned long *)pp->value;
+
+ /* Find IRQMP IRQ Controller Registers base adr otherwise bail out */
np = of_find_node_by_name(rootnp, "GAISLER_IRQMP");
if (!np) {
np = of_find_node_by_name(rootnp, "01_00d");
@@ -166,98 +306,85 @@ void __init leon_init_timers(irq_handler_t counter_fn)
leon3_gptimer_irq = *(unsigned int *)pp->value;
} while (0);
- if (leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq) {
- LEON3_BYPASS_STORE_PA(
- &leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0);
- LEON3_BYPASS_STORE_PA(
- &leon3_gptimer_regs->e[leon3_gptimer_idx].rld,
- (((1000000 / HZ) - 1)));
- LEON3_BYPASS_STORE_PA(
+ if (!(leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq))
+ goto bad;
+
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0);
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld,
+ (((1000000 / HZ) - 1)));
+ LEON3_BYPASS_STORE_PA(
&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
#ifdef CONFIG_SMP
- leon_percpu_timer_dev[0].start = (int)leon3_gptimer_regs;
- leon_percpu_timer_dev[0].irq = leon3_gptimer_irq + 1 +
- leon3_gptimer_idx;
-
- if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
- (1<<LEON3_GPTIMER_SEPIRQ))) {
- prom_printf("irq timer not configured with separate irqs\n");
- BUG();
- }
+ leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx;
- LEON3_BYPASS_STORE_PA(
- &leon3_gptimer_regs->e[leon3_gptimer_idx+1].val, 0);
- LEON3_BYPASS_STORE_PA(
- &leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
- (((1000000/HZ) - 1)));
- LEON3_BYPASS_STORE_PA(
- &leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, 0);
-# endif
-
- /*
- * The IRQ controller may (if implemented) consist of multiple
- * IRQ controllers, each mapped on a 4Kb boundary.
- * Each CPU may be routed to different IRQCTRLs, however
- * we assume that all CPUs (in SMP system) is routed to the
- * same IRQ Controller, and for non-SMP only one IRQCTRL is
- * accessed anyway.
- * In AMP systems, Linux must run on CPU0 for the time being.
- */
- cpu = sparc_leon3_cpuid();
- icsel = LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->icsel[cpu/8]);
- icsel = (icsel >> ((7 - (cpu&0x7)) * 4)) & 0xf;
- leon3_irqctrl_regs += icsel;
- } else {
- goto bad;
+ if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
+ (1<<LEON3_GPTIMER_SEPIRQ))) {
+ printk(KERN_ERR "timer not configured with separate irqs\n");
+ BUG();
}
- irq = request_irq(leon3_gptimer_irq+leon3_gptimer_idx,
- counter_fn,
- (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL);
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val,
+ 0);
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
+ (((1000000/HZ) - 1)));
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
+ 0);
+#endif
- if (irq) {
- printk(KERN_ERR "leon_time_init: unable to attach IRQ%d\n",
- LEON_INTERRUPT_TIMER1);
+ /*
+ * The IRQ controller may (if implemented) consist of multiple
+ * IRQ controllers, each mapped on a 4Kb boundary.
+ * Each CPU may be routed to different IRQCTRLs, however
+ * we assume that all CPUs (in SMP system) is routed to the
+ * same IRQ Controller, and for non-SMP only one IRQCTRL is
+ * accessed anyway.
+ * In AMP systems, Linux must run on CPU0 for the time being.
+ */
+ icsel = LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->icsel[boot_cpu_id/8]);
+ icsel = (icsel >> ((7 - (boot_cpu_id&0x7)) * 4)) & 0xf;
+ leon3_irqctrl_regs += icsel;
+
+ /* Mask all IRQs on boot-cpu IRQ controller */
+ LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[boot_cpu_id], 0);
+
+ /* Probe extended IRQ controller */
+ eirq = (LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->mpstatus)
+ >> 16) & 0xf;
+ if (eirq != 0)
+ leon_eirq_setup(eirq);
+
+ irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx);
+ err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
+ if (err) {
+ printk(KERN_ERR "unable to attach timer IRQ%d\n", irq);
prom_halt();
}
-# ifdef CONFIG_SMP
- {
- unsigned long flags;
- struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_percpu_timer_dev[0].irq - 1)];
-
- /* For SMP we use the level 14 ticker, however the bootup code
- * has copied the firmwares level 14 vector into boot cpu's
- * trap table, we must fix this now or we get squashed.
- */
- local_irq_save(flags);
-
- patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
+ LEON3_GPTIMER_EN |
+ LEON3_GPTIMER_RL |
+ LEON3_GPTIMER_LD |
+ LEON3_GPTIMER_IRQEN);
- /* Adjust so that we jump directly to smpleon_ticker */
- trap_table->inst_three += smpleon_ticker - real_irq_entry;
-
- local_flush_cache_all();
- local_irq_restore(flags);
+#ifdef CONFIG_SMP
+ /* Install per-cpu IRQ handler for broadcasted ticker */
+ irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq,
+ "per-cpu", 0);
+ err = request_irq(irq, leon_percpu_timer_interrupt,
+ IRQF_PERCPU | IRQF_TIMER, "ticker",
+ NULL);
+ if (err) {
+ printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq);
+ prom_halt();
}
-# endif
-
- if (leon3_gptimer_regs) {
- LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
- LEON3_GPTIMER_EN |
- LEON3_GPTIMER_RL |
- LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN);
-#ifdef CONFIG_SMP
- LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
- LEON3_GPTIMER_EN |
- LEON3_GPTIMER_RL |
- LEON3_GPTIMER_LD |
- LEON3_GPTIMER_IRQEN);
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
+ LEON3_GPTIMER_EN |
+ LEON3_GPTIMER_RL |
+ LEON3_GPTIMER_LD |
+ LEON3_GPTIMER_IRQEN);
#endif
-
- }
return;
bad:
printk(KERN_ERR "No Timer/irqctrl found\n");
@@ -274,9 +401,6 @@ void leon_load_profile_irq(int cpu, unsigned int limit)
BUG();
}
-
-
-
void __init leon_trans_init(struct device_node *dp)
{
if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) {
@@ -330,22 +454,18 @@ void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu)
{
unsigned long mask, flags, *addr;
mask = get_irqmask(irq_nr);
- local_irq_save(flags);
- addr = (unsigned long *)&(leon3_irqctrl_regs->mask[cpu]);
- LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | (mask)));
- local_irq_restore(flags);
+ spin_lock_irqsave(&leon_irq_lock, flags);
+ addr = (unsigned long *)LEON_IMASK(cpu);
+ LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | mask));
+ spin_unlock_irqrestore(&leon_irq_lock, flags);
}
#endif
void __init leon_init_IRQ(void)
{
- sparc_init_timers = leon_init_timers;
-
- BTFIXUPSET_CALL(enable_irq, leon_enable_irq, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(disable_irq, leon_disable_irq, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(enable_pil_irq, leon_enable_irq, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(disable_pil_irq, leon_disable_irq, BTFIXUPCALL_NORM);
+ sparc_irq_config.init_timers = leon_init_timers;
+ sparc_irq_config.build_device_irq = _leon_build_device_irq;
BTFIXUPSET_CALL(clear_clock_irq, leon_clear_clock_irq,
BTFIXUPCALL_NORM);
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
new file mode 100644
index 000000000000..519ca923f59f
--- /dev/null
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -0,0 +1,82 @@
+/* leon_pmc.c: LEON Power-down cpu_idle() handler
+ *
+ * Copyright (C) 2011 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
+ */
+
+#include <linux/init.h>
+#include <linux/pm.h>
+
+#include <asm/leon_amba.h>
+#include <asm/leon.h>
+
+/* List of Systems that need fixup instructions around power-down instruction */
+unsigned int pmc_leon_fixup_ids[] = {
+ AEROFLEX_UT699,
+ GAISLER_GR712RC,
+ LEON4_NEXTREME1,
+ 0
+};
+
+int pmc_leon_need_fixup(void)
+{
+ unsigned int systemid = amba_system_id >> 16;
+ unsigned int *id;
+
+ id = &pmc_leon_fixup_ids[0];
+ while (*id != 0) {
+ if (*id == systemid)
+ return 1;
+ id++;
+ }
+
+ return 0;
+}
+
+/*
+ * CPU idle callback function for systems that need some extra handling
+ * See .../arch/sparc/kernel/process.c
+ */
+void pmc_leon_idle_fixup(void)
+{
+ /* Prepare an address to a non-cachable region. APB is always
+ * none-cachable. One instruction is executed after the Sleep
+ * instruction, we make sure to read the bus and throw away the
+ * value by accessing a non-cachable area, also we make sure the
+ * MMU does not get a TLB miss here by using the MMU BYPASS ASI.
+ */
+ register unsigned int address = (unsigned int)leon3_irqctrl_regs;
+ __asm__ __volatile__ (
+ "mov %%g0, %%asr19\n"
+ "lda [%0] %1, %%g0\n"
+ :
+ : "r"(address), "i"(ASI_LEON_BYPASS));
+}
+
+/*
+ * CPU idle callback function
+ * See .../arch/sparc/kernel/process.c
+ */
+void pmc_leon_idle(void)
+{
+ /* For systems without power-down, this will be no-op */
+ __asm__ __volatile__ ("mov %g0, %asr19\n\t");
+}
+
+/* Install LEON Power Down function */
+static int __init leon_pmc_install(void)
+{
+ /* Assign power management IDLE handler */
+ if (pmc_leon_need_fixup())
+ pm_idle = pmc_leon_idle_fixup;
+ else
+ pm_idle = pmc_leon_idle;
+
+ printk(KERN_INFO "leon: power management initialized\n");
+
+ return 0;
+}
+
+/* This driver is not critical to the boot process, don't care
+ * if initialized late.
+ */
+late_initcall(leon_pmc_install);
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 16582d85368a..fe8fb44c609c 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -14,6 +14,7 @@
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <linux/of.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
@@ -29,6 +30,7 @@
#include <asm/ptrace.h>
#include <asm/atomic.h>
#include <asm/irq_regs.h>
+#include <asm/traps.h>
#include <asm/delay.h>
#include <asm/irq.h>
@@ -41,6 +43,8 @@
#include <asm/leon.h>
#include <asm/leon_amba.h>
+#include "kernel.h"
+
#ifdef CONFIG_SPARC_LEON
#include "irq.h"
@@ -48,9 +52,12 @@
extern ctxd_t *srmmu_ctx_table_phys;
static int smp_processors_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
-extern unsigned char boot_cpu_id;
extern cpumask_t smp_commenced_mask;
void __init leon_configure_cache_smp(void);
+static void leon_ipi_init(void);
+
+/* IRQ number of LEON IPIs */
+int leon_ipi_irq = LEON3_IRQ_IPI_DEFAULT;
static inline unsigned long do_swap(volatile unsigned long *ptr,
unsigned long val)
@@ -92,8 +99,6 @@ void __cpuinit leon_callin(void)
local_flush_cache_all();
local_flush_tlb_all();
- cpu_probe();
-
/* Fix idle thread fields. */
__asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(&current_set[cpuid])
: "memory" /* paranoid */);
@@ -102,11 +107,11 @@ void __cpuinit leon_callin(void)
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
- while (!cpu_isset(cpuid, smp_commenced_mask))
+ while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
mb();
local_irq_enable();
- cpu_set(cpuid, cpu_online_map);
+ set_cpu_online(cpuid, true);
}
/*
@@ -177,13 +182,16 @@ void __init leon_boot_cpus(void)
int nrcpu = leon_smp_nrcpus();
int me = smp_processor_id();
+ /* Setup IPI */
+ leon_ipi_init();
+
printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x\n", (unsigned int)me,
(unsigned int)nrcpu, (unsigned int)NR_CPUS,
(unsigned int)&(leon3_irqctrl_regs->mpstatus));
leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me);
leon_enable_irq_cpu(LEON3_IRQ_TICKER, me);
- leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, me);
+ leon_enable_irq_cpu(leon_ipi_irq, me);
leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER);
@@ -218,6 +226,10 @@ int __cpuinit leon_boot_one_cpu(int i)
(unsigned int)&leon3_irqctrl_regs->mpstatus);
local_flush_cache_all();
+ /* Make sure all IRQs are of from the start for this new CPU */
+ LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0);
+
+ /* Wake one CPU */
LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i);
/* wheee... it's going... */
@@ -234,7 +246,7 @@ int __cpuinit leon_boot_one_cpu(int i)
} else {
leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i);
leon_enable_irq_cpu(LEON3_IRQ_TICKER, i);
- leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, i);
+ leon_enable_irq_cpu(leon_ipi_irq, i);
}
local_flush_cache_all();
@@ -260,24 +272,24 @@ void __init leon_smp_done(void)
local_flush_cache_all();
/* Free unneeded trap tables */
- if (!cpu_isset(1, cpu_present_map)) {
- ClearPageReserved(virt_to_page(trapbase_cpu1));
- init_page_count(virt_to_page(trapbase_cpu1));
- free_page((unsigned long)trapbase_cpu1);
+ if (!cpu_present(1)) {
+ ClearPageReserved(virt_to_page(&trapbase_cpu1));
+ init_page_count(virt_to_page(&trapbase_cpu1));
+ free_page((unsigned long)&trapbase_cpu1);
totalram_pages++;
num_physpages++;
}
- if (!cpu_isset(2, cpu_present_map)) {
- ClearPageReserved(virt_to_page(trapbase_cpu2));
- init_page_count(virt_to_page(trapbase_cpu2));
- free_page((unsigned long)trapbase_cpu2);
+ if (!cpu_present(2)) {
+ ClearPageReserved(virt_to_page(&trapbase_cpu2));
+ init_page_count(virt_to_page(&trapbase_cpu2));
+ free_page((unsigned long)&trapbase_cpu2);
totalram_pages++;
num_physpages++;
}
- if (!cpu_isset(3, cpu_present_map)) {
- ClearPageReserved(virt_to_page(trapbase_cpu3));
- init_page_count(virt_to_page(trapbase_cpu3));
- free_page((unsigned long)trapbase_cpu3);
+ if (!cpu_present(3)) {
+ ClearPageReserved(virt_to_page(&trapbase_cpu3));
+ init_page_count(virt_to_page(&trapbase_cpu3));
+ free_page((unsigned long)&trapbase_cpu3);
totalram_pages++;
num_physpages++;
}
@@ -290,6 +302,99 @@ void leon_irq_rotate(int cpu)
{
}
+struct leon_ipi_work {
+ int single;
+ int msk;
+ int resched;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct leon_ipi_work, leon_ipi_work);
+
+/* Initialize IPIs on the LEON, in order to save IRQ resources only one IRQ
+ * is used for all three types of IPIs.
+ */
+static void __init leon_ipi_init(void)
+{
+ int cpu, len;
+ struct leon_ipi_work *work;
+ struct property *pp;
+ struct device_node *rootnp;
+ struct tt_entry *trap_table;
+ unsigned long flags;
+
+ /* Find IPI IRQ or stick with default value */
+ rootnp = of_find_node_by_path("/ambapp0");
+ if (rootnp) {
+ pp = of_find_property(rootnp, "ipi_num", &len);
+ if (pp && (*(int *)pp->value))
+ leon_ipi_irq = *(int *)pp->value;
+ }
+ printk(KERN_INFO "leon: SMP IPIs at IRQ %d\n", leon_ipi_irq);
+
+ /* Adjust so that we jump directly to smpleon_ipi */
+ local_irq_save(flags);
+ trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)];
+ trap_table->inst_three += smpleon_ipi - real_irq_entry;
+ local_flush_cache_all();
+ local_irq_restore(flags);
+
+ for_each_possible_cpu(cpu) {
+ work = &per_cpu(leon_ipi_work, cpu);
+ work->single = work->msk = work->resched = 0;
+ }
+}
+
+static void leon_ipi_single(int cpu)
+{
+ struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
+
+ /* Mark work */
+ work->single = 1;
+
+ /* Generate IRQ on the CPU */
+ set_cpu_int(cpu, leon_ipi_irq);
+}
+
+static void leon_ipi_mask_one(int cpu)
+{
+ struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
+
+ /* Mark work */
+ work->msk = 1;
+
+ /* Generate IRQ on the CPU */
+ set_cpu_int(cpu, leon_ipi_irq);
+}
+
+static void leon_ipi_resched(int cpu)
+{
+ struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
+
+ /* Mark work */
+ work->resched = 1;
+
+ /* Generate IRQ on the CPU (any IRQ will cause resched) */
+ set_cpu_int(cpu, leon_ipi_irq);
+}
+
+void leonsmp_ipi_interrupt(void)
+{
+ struct leon_ipi_work *work = &__get_cpu_var(leon_ipi_work);
+
+ if (work->single) {
+ work->single = 0;
+ smp_call_function_single_interrupt();
+ }
+ if (work->msk) {
+ work->msk = 0;
+ smp_call_function_interrupt();
+ }
+ if (work->resched) {
+ work->resched = 0;
+ smp_resched_interrupt();
+ }
+}
+
static struct smp_funcall {
smpfunc_t func;
unsigned long arg1;
@@ -335,10 +440,10 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
{
register int i;
- cpu_clear(smp_processor_id(), mask);
- cpus_and(mask, cpu_online_map, mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+ cpumask_and(&mask, cpu_online_mask, &mask);
for (i = 0; i <= high; i++) {
- if (cpu_isset(i, mask)) {
+ if (cpumask_test_cpu(i, &mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
set_cpu_int(i, LEON3_IRQ_CROSS_CALL);
@@ -352,7 +457,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
i = 0;
do {
- if (!cpu_isset(i, mask))
+ if (!cpumask_test_cpu(i, &mask))
continue;
while (!ccall_info.processors_in[i])
@@ -361,7 +466,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
i = 0;
do {
- if (!cpu_isset(i, mask))
+ if (!cpumask_test_cpu(i, &mask))
continue;
while (!ccall_info.processors_out[i])
@@ -384,27 +489,23 @@ void leon_cross_call_irq(void)
ccall_info.processors_out[i] = 1;
}
-void leon_percpu_timer_interrupt(struct pt_regs *regs)
+irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused)
{
- struct pt_regs *old_regs;
int cpu = smp_processor_id();
- old_regs = set_irq_regs(regs);
-
leon_clear_profile_irq(cpu);
profile_tick(CPU_PROFILING);
if (!--prof_counter(cpu)) {
- int user = user_mode(regs);
+ int user = user_mode(get_irq_regs());
- irq_enter();
update_process_times(user);
- irq_exit();
prof_counter(cpu) = prof_multiplier(cpu);
}
- set_irq_regs(old_regs);
+
+ return IRQ_HANDLED;
}
static void __init smp_setup_percpu_timer(void)
@@ -437,15 +538,6 @@ void __init leon_blackbox_current(unsigned *addr)
}
-/*
- * CPU idle callback function
- * See .../arch/sparc/kernel/process.c
- */
-void pmc_leon_idle(void)
-{
- __asm__ volatile ("mov %g0, %asr19");
-}
-
void __init leon_init_smp(void)
{
/* Patch ipi15 trap table */
@@ -456,13 +548,9 @@ void __init leon_init_smp(void)
BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id,
BTFIXUPCALL_NORM);
-
-#ifndef PMC_NO_IDLE
- /* Assign power management IDLE handler */
- pm_idle = pmc_leon_idle;
- printk(KERN_INFO "leon: power management initialized\n");
-#endif
-
+ BTFIXUPSET_CALL(smp_ipi_resched, leon_ipi_resched, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(smp_ipi_single, leon_ipi_single, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(smp_ipi_mask_one, leon_ipi_mask_one, BTFIXUPCALL_NORM);
}
#endif /* CONFIG_SPARC_LEON */
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 6addb914fcc8..42f28c7420e1 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -107,7 +107,7 @@ static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size
return hp;
}
-static void mdesc_memblock_free(struct mdesc_handle *hp)
+static void __init mdesc_memblock_free(struct mdesc_handle *hp)
{
unsigned int alloc_size;
unsigned long start;
@@ -768,7 +768,7 @@ static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handl
cpuid, NR_CPUS);
continue;
}
- if (!cpu_isset(cpuid, *mask))
+ if (!cpumask_test_cpu(cpuid, mask))
continue;
#endif
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 2d055a1e9cc2..a312af40ea84 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -13,6 +13,7 @@
#include <asm/leon_amba.h>
#include "of_device_common.h"
+#include "irq.h"
/*
* PCI bus specific translator
@@ -355,7 +356,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
if (intr) {
op->archdata.num_irqs = len / sizeof(struct linux_prom_irqs);
for (i = 0; i < op->archdata.num_irqs; i++)
- op->archdata.irqs[i] = intr[i].pri;
+ op->archdata.irqs[i] =
+ sparc_irq_config.build_device_irq(op, intr[i].pri);
} else {
const unsigned int *irq =
of_get_property(dp, "interrupts", &len);
@@ -363,64 +365,13 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
if (irq) {
op->archdata.num_irqs = len / sizeof(unsigned int);
for (i = 0; i < op->archdata.num_irqs; i++)
- op->archdata.irqs[i] = irq[i];
+ op->archdata.irqs[i] =
+ sparc_irq_config.build_device_irq(op, irq[i]);
} else {
op->archdata.num_irqs = 0;
}
}
- if (sparc_cpu_model == sun4d) {
- static int pil_to_sbus[] = {
- 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
- };
- struct device_node *io_unit, *sbi = dp->parent;
- const struct linux_prom_registers *regs;
- int board, slot;
-
- while (sbi) {
- if (!strcmp(sbi->name, "sbi"))
- break;
-
- sbi = sbi->parent;
- }
- if (!sbi)
- goto build_resources;
-
- regs = of_get_property(dp, "reg", NULL);
- if (!regs)
- goto build_resources;
-
- slot = regs->which_io;
-
- /* If SBI's parent is not io-unit or the io-unit lacks
- * a "board#" property, something is very wrong.
- */
- if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
- printk("%s: Error, parent is not io-unit.\n",
- sbi->full_name);
- goto build_resources;
- }
- io_unit = sbi->parent;
- board = of_getintprop_default(io_unit, "board#", -1);
- if (board == -1) {
- printk("%s: Error, lacks board# property.\n",
- io_unit->full_name);
- goto build_resources;
- }
-
- for (i = 0; i < op->archdata.num_irqs; i++) {
- int this_irq = op->archdata.irqs[i];
- int sbusl = pil_to_sbus[this_irq];
-
- if (sbusl)
- this_irq = (((board + 1) << 5) +
- (sbusl << 2) +
- slot);
-
- op->archdata.irqs[i] = this_irq;
- }
- }
-build_resources:
build_device_resources(op, parent);
op->dev.parent = parent;
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 63cd4e5d47c2..3bb2eace58cf 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -459,7 +459,7 @@ apply_interrupt_map(struct device_node *dp, struct device_node *pp,
*
* Handle this by deciding that, if we didn't get a
* match in the parent's 'interrupt-map', and the
- * parent is an IRQ translater, then use the parent as
+ * parent is an IRQ translator, then use the parent as
* our IRQ controller.
*/
if (pp->irq_trans)
@@ -622,8 +622,9 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
out:
nid = of_node_to_nid(dp);
if (nid != -1) {
- cpumask_t numa_mask = *cpumask_of_node(nid);
+ cpumask_t numa_mask;
+ cpumask_copy(&numa_mask, cpumask_of_node(nid));
irq_set_affinity(irq, &numa_mask);
}
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
index 49ddff56cb04..cb15bbf8a201 100644
--- a/arch/sparc/kernel/of_device_common.c
+++ b/arch/sparc/kernel/of_device_common.c
@@ -22,6 +22,33 @@ unsigned int irq_of_parse_and_map(struct device_node *node, int index)
}
EXPORT_SYMBOL(irq_of_parse_and_map);
+int of_address_to_resource(struct device_node *node, int index,
+ struct resource *r)
+{
+ struct platform_device *op = of_find_device_by_node(node);
+
+ if (!op || index >= op->num_resources)
+ return -EINVAL;
+
+ memcpy(r, &op->archdata.resource[index], sizeof(*r));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_address_to_resource);
+
+void __iomem *of_iomap(struct device_node *node, int index)
+{
+ struct platform_device *op = of_find_device_by_node(node);
+ struct resource *r;
+
+ if (!op || index >= op->num_resources)
+ return NULL;
+
+ r = &op->archdata.resource[index];
+
+ return of_ioremap(r, 0, resource_size(r), (char *) r->name);
+}
+EXPORT_SYMBOL(of_iomap);
+
/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
* BUS and propagate to all child platform_device objects.
*/
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 4137579d9adc..713dc91020a6 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -675,6 +675,7 @@ static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus)
* humanoid.
*/
err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
+ (void) err;
}
list_for_each_entry(child_bus, &bus->children, node)
pci_bus_register_of_sysfs(child_bus);
@@ -1001,22 +1002,22 @@ EXPORT_SYMBOL(pci_domain_nr);
int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
- unsigned int virt_irq;
+ unsigned int irq;
if (!pbm->setup_msi_irq)
return -EINVAL;
- return pbm->setup_msi_irq(&virt_irq, pdev, desc);
+ return pbm->setup_msi_irq(&irq, pdev, desc);
}
-void arch_teardown_msi_irq(unsigned int virt_irq)
+void arch_teardown_msi_irq(unsigned int irq)
{
- struct msi_desc *entry = get_irq_msi(virt_irq);
+ struct msi_desc *entry = irq_get_msi_desc(irq);
struct pci_dev *pdev = entry->dev;
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
if (pbm->teardown_msi_irq)
- pbm->teardown_msi_irq(virt_irq, pdev);
+ pbm->teardown_msi_irq(irq, pdev);
}
#endif /* !(CONFIG_PCI_MSI) */
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index 6c7a33af3ba6..6e3874b64488 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -295,14 +295,17 @@ static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
unsigned int bus = bus_dev->number;
unsigned int device = PCI_SLOT(devfn);
unsigned int func = PCI_FUNC(devfn);
- unsigned long ret;
if (config_out_of_range(pbm, bus, devfn, where)) {
/* Do nothing. */
} else {
- ret = pci_sun4v_config_put(devhandle,
- HV_PCI_DEVICE_BUILD(bus, device, func),
- where, size, value);
+ /* We don't check for hypervisor errors here, but perhaps
+ * we should and influence our return value depending upon
+ * what kind of error is thrown.
+ */
+ pci_sun4v_config_put(devhandle,
+ HV_PCI_DEVICE_BUILD(bus, device, func),
+ where, size, value);
}
return PCIBIOS_SUCCESSFUL;
}
diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c
index efb896d68754..d29a32fcc5e4 100644
--- a/arch/sparc/kernel/pci_fire.c
+++ b/arch/sparc/kernel/pci_fire.c
@@ -214,11 +214,9 @@ static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
{
- unsigned long msiqid;
u64 val;
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
- msiqid = (val & MSI_MAP_EQNUM);
val &= ~MSI_MAP_VALID;
@@ -277,7 +275,7 @@ static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
{
unsigned long cregs = (unsigned long) pbm->pbm_regs;
unsigned long imap_reg, iclr_reg, int_ctrlr;
- unsigned int virt_irq;
+ unsigned int irq;
int fixup;
u64 val;
@@ -293,14 +291,14 @@ static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
- virt_irq = build_irq(fixup, iclr_reg, imap_reg);
- if (!virt_irq)
+ irq = build_irq(fixup, iclr_reg, imap_reg);
+ if (!irq)
return -ENOMEM;
upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
- return virt_irq;
+ return irq;
}
static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
@@ -455,8 +453,7 @@ static int __devinit pci_fire_pbm_init(struct pci_pbm_info *pbm,
return 0;
}
-static int __devinit fire_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit fire_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
@@ -499,7 +496,7 @@ out_err:
return err;
}
-static struct of_device_id __initdata fire_match[] = {
+static const struct of_device_id fire_match[] = {
{
.name = "pci",
.compatible = "pciex108e,80f0",
@@ -507,7 +504,7 @@ static struct of_device_id __initdata fire_match[] = {
{},
};
-static struct of_platform_driver fire_driver = {
+static struct platform_driver fire_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -518,7 +515,7 @@ static struct of_platform_driver fire_driver = {
static int __init fire_init(void)
{
- return of_register_platform_driver(&fire_driver);
+ return platform_driver_register(&fire_driver);
}
subsys_initcall(fire_init);
diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h
index e20ed5f06e9c..6beb60df31d0 100644
--- a/arch/sparc/kernel/pci_impl.h
+++ b/arch/sparc/kernel/pci_impl.h
@@ -131,9 +131,9 @@ struct pci_pbm_info {
void *msi_queues;
unsigned long *msi_bitmap;
unsigned int *msi_irq_table;
- int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev,
+ int (*setup_msi_irq)(unsigned int *irq_p, struct pci_dev *pdev,
struct msi_desc *entry);
- void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev);
+ void (*teardown_msi_irq)(unsigned int irq, struct pci_dev *pdev);
const struct sparc64_msiq_ops *msi_ops;
#endif /* !(CONFIG_PCI_MSI) */
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index b210416ace7b..580651af73f2 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -30,13 +30,10 @@ static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
if (likely(err > 0)) {
- struct irq_desc *desc;
- unsigned int virt_irq;
+ unsigned int irq;
- virt_irq = pbm->msi_irq_table[msi - pbm->msi_first];
- desc = irq_desc + virt_irq;
-
- desc->handle_irq(virt_irq, desc);
+ irq = pbm->msi_irq_table[msi - pbm->msi_first];
+ generic_handle_irq(irq);
}
if (unlikely(err < 0))
@@ -121,7 +118,7 @@ static struct irq_chip msi_irq = {
/* XXX affinity XXX */
};
-static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
+static int sparc64_setup_msi_irq(unsigned int *irq_p,
struct pci_dev *pdev,
struct msi_desc *entry)
{
@@ -131,17 +128,17 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
int msi, err;
u32 msiqid;
- *virt_irq_p = virt_irq_alloc(0, 0);
+ *irq_p = irq_alloc(0, 0);
err = -ENOMEM;
- if (!*virt_irq_p)
+ if (!*irq_p)
goto out_err;
- set_irq_chip_and_handler_name(*virt_irq_p, &msi_irq,
- handle_simple_irq, "MSI");
+ irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq,
+ "MSI");
err = alloc_msi(pbm);
if (unlikely(err < 0))
- goto out_virt_irq_free;
+ goto out_irq_free;
msi = err;
@@ -152,7 +149,7 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
if (err)
goto out_msi_free;
- pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p;
+ pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
if (entry->msi_attrib.is_64) {
msg.address_hi = pbm->msi64_start >> 32;
@@ -163,24 +160,24 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
}
msg.data = msi;
- set_irq_msi(*virt_irq_p, entry);
- write_msi_msg(*virt_irq_p, &msg);
+ irq_set_msi_desc(*irq_p, entry);
+ write_msi_msg(*irq_p, &msg);
return 0;
out_msi_free:
free_msi(pbm, msi);
-out_virt_irq_free:
- set_irq_chip(*virt_irq_p, NULL);
- virt_irq_free(*virt_irq_p);
- *virt_irq_p = 0;
+out_irq_free:
+ irq_set_chip(*irq_p, NULL);
+ irq_free(*irq_p);
+ *irq_p = 0;
out_err:
return err;
}
-static void sparc64_teardown_msi_irq(unsigned int virt_irq,
+static void sparc64_teardown_msi_irq(unsigned int irq,
struct pci_dev *pdev)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
@@ -189,12 +186,12 @@ static void sparc64_teardown_msi_irq(unsigned int virt_irq,
int i, err;
for (i = 0; i < pbm->msi_num; i++) {
- if (pbm->msi_irq_table[i] == virt_irq)
+ if (pbm->msi_irq_table[i] == irq)
break;
}
if (i >= pbm->msi_num) {
printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
- pbm->name, virt_irq);
+ pbm->name, irq);
return;
}
@@ -205,14 +202,14 @@ static void sparc64_teardown_msi_irq(unsigned int virt_irq,
if (err) {
printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
"irq %u, gives error %d\n",
- pbm->name, msi_num, virt_irq, err);
+ pbm->name, msi_num, irq, err);
return;
}
free_msi(pbm, msi_num);
- set_irq_chip(virt_irq, NULL);
- virt_irq_free(virt_irq);
+ irq_set_chip(irq, NULL);
+ irq_free(irq);
}
static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
@@ -287,8 +284,9 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
nid = pbm->numa_node;
if (nid != -1) {
- cpumask_t numa_mask = *cpumask_of_node(nid);
+ cpumask_t numa_mask;
+ cpumask_copy(&numa_mask, cpumask_of_node(nid));
irq_set_affinity(irq, &numa_mask);
}
err = request_irq(irq, sparc64_msiq_interrupt, 0,
diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c
index 22eab7cf3b11..86ae08d9b6ee 100644
--- a/arch/sparc/kernel/pci_psycho.c
+++ b/arch/sparc/kernel/pci_psycho.c
@@ -503,8 +503,7 @@ static struct pci_pbm_info * __devinit psycho_find_sibling(u32 upa_portid)
#define PSYCHO_CONFIGSPACE 0x001000000UL
-static int __devinit psycho_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit psycho_probe(struct platform_device *op)
{
const struct linux_prom64_registers *pr_regs;
struct device_node *dp = op->dev.of_node;
@@ -593,7 +592,7 @@ out_err:
return err;
}
-static struct of_device_id __initdata psycho_match[] = {
+static const struct of_device_id psycho_match[] = {
{
.name = "pci",
.compatible = "pci108e,8000",
@@ -601,7 +600,7 @@ static struct of_device_id __initdata psycho_match[] = {
{},
};
-static struct of_platform_driver psycho_driver = {
+static struct platform_driver psycho_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -612,7 +611,7 @@ static struct of_platform_driver psycho_driver = {
static int __init psycho_init(void)
{
- return of_register_platform_driver(&psycho_driver);
+ return platform_driver_register(&psycho_driver);
}
subsys_initcall(psycho_init);
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c
index 5c3f5ec4cabc..d1840dbdaa2f 100644
--- a/arch/sparc/kernel/pci_sabre.c
+++ b/arch/sparc/kernel/pci_sabre.c
@@ -452,9 +452,10 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm,
sabre_scan_bus(pbm, &op->dev);
}
-static int __devinit sabre_probe(struct platform_device *op,
- const struct of_device_id *match)
+static const struct of_device_id sabre_match[];
+static int __devinit sabre_probe(struct platform_device *op)
{
+ const struct of_device_id *match;
const struct linux_prom64_registers *pr_regs;
struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
@@ -464,7 +465,8 @@ static int __devinit sabre_probe(struct platform_device *op,
const u32 *vdma;
u64 clear_irq;
- hummingbird_p = (match->data != NULL);
+ match = of_match_device(sabre_match, &op->dev);
+ hummingbird_p = match && (match->data != NULL);
if (!hummingbird_p) {
struct device_node *cpu_dp;
@@ -582,7 +584,7 @@ out_err:
return err;
}
-static struct of_device_id __initdata sabre_match[] = {
+static const struct of_device_id sabre_match[] = {
{
.name = "pci",
.compatible = "pci108e,a001",
@@ -595,7 +597,7 @@ static struct of_device_id __initdata sabre_match[] = {
{},
};
-static struct of_platform_driver sabre_driver = {
+static struct platform_driver sabre_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -606,7 +608,7 @@ static struct of_platform_driver sabre_driver = {
static int __init sabre_init(void)
{
- return of_register_platform_driver(&sabre_driver);
+ return platform_driver_register(&sabre_driver);
}
subsys_initcall(sabre_init);
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 445a47a2fb3d..283fbc329a43 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -1313,7 +1313,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
const struct linux_prom64_registers *regs;
struct device_node *dp = op->dev.of_node;
const char *chipset_name;
- int is_pbm_a, err;
+ int err;
switch (chip_type) {
case PBM_CHIP_TYPE_TOMATILLO:
@@ -1343,8 +1343,6 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
*/
regs = of_get_property(dp, "reg", NULL);
- is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000);
-
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
@@ -1460,10 +1458,15 @@ out_err:
return err;
}
-static int __devinit schizo_probe(struct platform_device *op,
- const struct of_device_id *match)
+static const struct of_device_id schizo_match[];
+static int __devinit schizo_probe(struct platform_device *op)
{
- return __schizo_init(op, (unsigned long) match->data);
+ const struct of_device_id *match;
+
+ match = of_match_device(schizo_match, &op->dev);
+ if (!match)
+ return -EINVAL;
+ return __schizo_init(op, (unsigned long)match->data);
}
/* The ordering of this table is very important. Some Tomatillo
@@ -1471,7 +1474,7 @@ static int __devinit schizo_probe(struct platform_device *op,
* and pci108e,8001. So list the chips in reverse chronological
* order.
*/
-static struct of_device_id __initdata schizo_match[] = {
+static const struct of_device_id schizo_match[] = {
{
.name = "pci",
.compatible = "pci108e,a801",
@@ -1490,7 +1493,7 @@ static struct of_device_id __initdata schizo_match[] = {
{},
};
-static struct of_platform_driver schizo_driver = {
+static struct platform_driver schizo_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -1501,7 +1504,7 @@ static struct of_platform_driver schizo_driver = {
static int __init schizo_init(void)
{
- return of_register_platform_driver(&schizo_driver);
+ return platform_driver_register(&schizo_driver);
}
subsys_initcall(schizo_init);
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 743344aa6d8a..b01a06e9ae4e 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -580,7 +580,7 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
struct iommu *iommu = pbm->iommu;
- unsigned long num_tsb_entries, sz, tsbsize;
+ unsigned long num_tsb_entries, sz;
u32 dma_mask, dma_offset;
const u32 *vdma;
@@ -596,7 +596,6 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
- tsbsize = num_tsb_entries * sizeof(iopte_t);
dma_offset = vdma[0];
@@ -844,9 +843,9 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
unsigned long msiqid,
unsigned long devino)
{
- unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
+ unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
- if (!virt_irq)
+ if (!irq)
return -ENOMEM;
if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
@@ -854,7 +853,7 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
return -EINVAL;
- return virt_irq;
+ return irq;
}
static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
@@ -918,8 +917,7 @@ static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
return 0;
}
-static int __devinit pci_sun4v_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit pci_sun4v_probe(struct platform_device *op)
{
const struct linux_prom64_registers *regs;
static int hvapi_negotiated = 0;
@@ -1000,7 +998,7 @@ out_err:
return err;
}
-static struct of_device_id __initdata pci_sun4v_match[] = {
+static const struct of_device_id pci_sun4v_match[] = {
{
.name = "pci",
.compatible = "SUNW,sun4v-pci",
@@ -1008,7 +1006,7 @@ static struct of_device_id __initdata pci_sun4v_match[] = {
{},
};
-static struct of_platform_driver pci_sun4v_driver = {
+static struct platform_driver pci_sun4v_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -1019,7 +1017,7 @@ static struct of_platform_driver pci_sun4v_driver = {
static int __init pci_sun4v_init(void)
{
- return of_register_platform_driver(&pci_sun4v_driver);
+ return platform_driver_register(&pci_sun4v_driver);
}
subsys_initcall(pci_sun4v_init);
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index aeaa09a3c655..948601a066ff 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -164,6 +164,9 @@ void __iomem *pcic_regs;
volatile int pcic_speculative;
volatile int pcic_trapped;
+/* forward */
+unsigned int pcic_build_device_irq(struct platform_device *op,
+ unsigned int real_irq);
#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3))
@@ -523,6 +526,7 @@ static void
pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
{
struct pcic_ca2irq *p;
+ unsigned int real_irq;
int i, ivec;
char namebuf[64];
@@ -551,26 +555,25 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
i = p->pin;
if (i >= 0 && i < 4) {
ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
- dev->irq = ivec >> (i << 2) & 0xF;
+ real_irq = ivec >> (i << 2) & 0xF;
} else if (i >= 4 && i < 8) {
ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
- dev->irq = ivec >> ((i-4) << 2) & 0xF;
+ real_irq = ivec >> ((i-4) << 2) & 0xF;
} else { /* Corrupted map */
printk("PCIC: BAD PIN %d\n", i); for (;;) {}
}
/* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */
- /*
- * dev->irq=0 means PROM did not bother to program the upper
+ /* real_irq means PROM did not bother to program the upper
* half of PCIC. This happens on JS-E with PROM 3.11, for instance.
*/
- if (dev->irq == 0 || p->force) {
+ if (real_irq == 0 || p->force) {
if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */
printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
}
printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n",
p->irq, p->pin, dev->bus->number, dev->devfn);
- dev->irq = p->irq;
+ real_irq = p->irq;
i = p->pin;
if (i >= 4) {
@@ -584,7 +587,8 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
ivec |= p->irq << (i << 2);
writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_LO);
}
- }
+ }
+ dev->irq = pcic_build_device_irq(NULL, real_irq);
}
/*
@@ -700,10 +704,8 @@ static void pcic_clear_clock_irq(void)
static irqreturn_t pcic_timer_handler (int irq, void *h)
{
- write_seqlock(&xtime_lock); /* Dummy, to show that we remember */
pcic_clear_clock_irq();
- do_timer(1);
- write_sequnlock(&xtime_lock);
+ xtime_update(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
@@ -731,6 +733,7 @@ void __init pci_time_init(void)
struct linux_pcic *pcic = &pcic0;
unsigned long v;
int timer_irq, irq;
+ int err;
do_arch_gettimeoffset = pci_gettimeoffset;
@@ -742,9 +745,10 @@ void __init pci_time_init(void)
timer_irq = PCI_COUNTER_IRQ_SYS(v);
writel (PCI_COUNTER_IRQ_SET(timer_irq, 0),
pcic->pcic_regs+PCI_COUNTER_IRQ);
- irq = request_irq(timer_irq, pcic_timer_handler,
- (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL);
- if (irq) {
+ irq = pcic_build_device_irq(NULL, timer_irq);
+ err = request_irq(irq, pcic_timer_handler,
+ IRQF_TIMER, "timer", NULL);
+ if (err) {
prom_printf("time_init: unable to attach IRQ%d\n", timer_irq);
prom_halt();
}
@@ -805,50 +809,73 @@ static inline unsigned long get_irqmask(int irq_nr)
return 1 << irq_nr;
}
-static void pcic_disable_irq(unsigned int irq_nr)
+static void pcic_mask_irq(struct irq_data *data)
{
unsigned long mask, flags;
- mask = get_irqmask(irq_nr);
+ mask = (unsigned long)data->chip_data;
local_irq_save(flags);
writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET);
local_irq_restore(flags);
}
-static void pcic_enable_irq(unsigned int irq_nr)
+static void pcic_unmask_irq(struct irq_data *data)
{
unsigned long mask, flags;
- mask = get_irqmask(irq_nr);
+ mask = (unsigned long)data->chip_data;
local_irq_save(flags);
writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR);
local_irq_restore(flags);
}
-static void pcic_load_profile_irq(int cpu, unsigned int limit)
+static unsigned int pcic_startup_irq(struct irq_data *data)
{
- printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__);
+ irq_link(data->irq);
+ pcic_unmask_irq(data);
+ return 0;
}
-/* We assume the caller has disabled local interrupts when these are called,
- * or else very bizarre behavior will result.
- */
-static void pcic_disable_pil_irq(unsigned int pil)
+static struct irq_chip pcic_irq = {
+ .name = "pcic",
+ .irq_startup = pcic_startup_irq,
+ .irq_mask = pcic_mask_irq,
+ .irq_unmask = pcic_unmask_irq,
+};
+
+unsigned int pcic_build_device_irq(struct platform_device *op,
+ unsigned int real_irq)
{
- writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET);
+ unsigned int irq;
+ unsigned long mask;
+
+ irq = 0;
+ mask = get_irqmask(real_irq);
+ if (mask == 0)
+ goto out;
+
+ irq = irq_alloc(real_irq, real_irq);
+ if (irq == 0)
+ goto out;
+
+ irq_set_chip_and_handler_name(irq, &pcic_irq,
+ handle_level_irq, "PCIC");
+ irq_set_chip_data(irq, (void *)mask);
+
+out:
+ return irq;
}
-static void pcic_enable_pil_irq(unsigned int pil)
+
+static void pcic_load_profile_irq(int cpu, unsigned int limit)
{
- writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR);
+ printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__);
}
void __init sun4m_pci_init_IRQ(void)
{
- BTFIXUPSET_CALL(enable_irq, pcic_enable_irq, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(disable_irq, pcic_disable_irq, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(enable_pil_irq, pcic_enable_pil_irq, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(disable_pil_irq, pcic_disable_pil_irq, BTFIXUPCALL_NORM);
+ sparc_irq_config.build_device_irq = pcic_build_device_irq;
+
BTFIXUPSET_CALL(clear_clock_irq, pcic_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, pcic_load_profile_irq, BTFIXUPCALL_NORM);
}
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 7c2ced612b8f..8ac23e660080 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -81,7 +81,7 @@ static void n2_pcr_write(u64 val)
unsigned long ret;
ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
- if (val != HV_EOK)
+ if (ret != HV_EOK)
write_pcr(val);
}
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 760578687e7c..2cb0e1c001e2 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -26,6 +26,7 @@
#include <asm/nmi.h>
#include <asm/pcr.h>
+#include "kernel.h"
#include "kstack.h"
/* Sparc64 chips have two performance counters, 32-bits each, with
@@ -1027,7 +1028,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
/*
* If group events scheduling transaction was started,
- * skip the schedulability test here, it will be peformed
+ * skip the schedulability test here, it will be performed
* at commit time(->commit_txn) as a whole
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index 94536a85f161..6a585d393580 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -51,8 +51,7 @@ static void pmc_swift_idle(void)
#endif
}
-static int __devinit pmc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit pmc_probe(struct platform_device *op)
{
regs = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]), PMC_OBPNAME);
@@ -70,7 +69,7 @@ static int __devinit pmc_probe(struct platform_device *op,
return 0;
}
-static struct of_device_id __initdata pmc_match[] = {
+static struct of_device_id pmc_match[] = {
{
.name = PMC_OBPNAME,
},
@@ -78,7 +77,7 @@ static struct of_device_id __initdata pmc_match[] = {
};
MODULE_DEVICE_TABLE(of, pmc_match);
-static struct of_platform_driver pmc_driver = {
+static struct platform_driver pmc_driver = {
.driver = {
.name = "pmc",
.owner = THIS_MODULE,
@@ -89,7 +88,7 @@ static struct of_platform_driver pmc_driver = {
static int __init pmc_init(void)
{
- return of_register_platform_driver(&pmc_driver);
+ return platform_driver_register(&pmc_driver);
}
/* This driver is not critical to the boot process
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c
index 2c59f4d387dd..cb4c0f57c024 100644
--- a/arch/sparc/kernel/power.c
+++ b/arch/sparc/kernel/power.c
@@ -33,7 +33,7 @@ static int __devinit has_button_interrupt(unsigned int irq, struct device_node *
return 1;
}
-static int __devinit power_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit power_probe(struct platform_device *op)
{
struct resource *res = &op->resource[0];
unsigned int irq = op->archdata.irqs[0];
@@ -52,14 +52,14 @@ static int __devinit power_probe(struct platform_device *op, const struct of_dev
return 0;
}
-static struct of_device_id __initdata power_match[] = {
+static const struct of_device_id power_match[] = {
{
.name = "power",
},
{},
};
-static struct of_platform_driver power_driver = {
+static struct platform_driver power_driver = {
.probe = power_probe,
.driver = {
.name = "power",
@@ -70,7 +70,7 @@ static struct of_platform_driver power_driver = {
static int __init power_init(void)
{
- return of_register_platform_driver(&power_driver);
+ return platform_driver_register(&power_driver);
}
device_initcall(power_init);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 17529298c50a..c8cc461ff75f 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -128,8 +128,16 @@ void cpu_idle(void)
set_thread_flag(TIF_POLLING_NRFLAG);
/* endless idle loop with no priority at all */
while(1) {
- while (!need_resched())
- cpu_relax();
+#ifdef CONFIG_SPARC_LEON
+ if (pm_idle) {
+ while (!need_resched())
+ (*pm_idle)();
+ } else
+#endif
+ {
+ while (!need_resched())
+ cpu_relax();
+ }
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index 05fb25330583..5ce3d15a99b0 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -326,7 +326,6 @@ void __init of_console_init(void)
of_console_options = NULL;
}
- prom_printf(msg, of_console_path);
printk(msg, of_console_path);
}
diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c
index ce651147fabc..570b98f6e897 100644
--- a/arch/sparc/kernel/prom_irqtrans.c
+++ b/arch/sparc/kernel/prom_irqtrans.c
@@ -227,7 +227,7 @@ static unsigned int sabre_irq_build(struct device_node *dp,
unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int inofixup = 0;
- int virt_irq;
+ int irq;
ino &= 0x3f;
if (ino < SABRE_ONBOARD_IRQ_BASE) {
@@ -247,7 +247,7 @@ static unsigned int sabre_irq_build(struct device_node *dp,
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
- virt_irq = build_irq(inofixup, iclr, imap);
+ irq = build_irq(inofixup, iclr, imap);
/* If the parent device is a PCI<->PCI bridge other than
* APB, we have to install a pre-handler to ensure that
@@ -256,13 +256,13 @@ static unsigned int sabre_irq_build(struct device_node *dp,
*/
regs = of_get_property(dp, "reg", NULL);
if (regs && sabre_device_needs_wsync(dp)) {
- irq_install_pre_handler(virt_irq,
+ irq_install_pre_handler(irq,
sabre_wsync_handler,
(void *) (long) regs->phys_hi,
(void *) irq_data);
}
- return virt_irq;
+ return irq;
}
static void __init sabre_irq_trans_init(struct device_node *dp)
@@ -382,7 +382,7 @@ static unsigned int schizo_irq_build(struct device_node *dp,
unsigned long pbm_regs = irq_data->pbm_regs;
unsigned long imap, iclr;
int ign_fixup;
- int virt_irq;
+ int irq;
int is_tomatillo;
ino &= 0x3f;
@@ -409,17 +409,17 @@ static unsigned int schizo_irq_build(struct device_node *dp,
ign_fixup = (1 << 6);
}
- virt_irq = build_irq(ign_fixup, iclr, imap);
+ irq = build_irq(ign_fixup, iclr, imap);
if (is_tomatillo) {
- irq_install_pre_handler(virt_irq,
+ irq_install_pre_handler(irq,
tomatillo_wsync_handler,
((irq_data->chip_version <= 4) ?
(void *) 1 : (void *) 0),
(void *) irq_data->sync_reg);
}
- return virt_irq;
+ return irq;
}
static void __init __schizo_irq_trans_init(struct device_node *dp,
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 9ccc812bc09e..96ee50a80661 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -1086,6 +1086,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{
+#ifdef CONFIG_AUDITSYSCALL
if (unlikely(current->audit_context)) {
unsigned long tstate = regs->tstate;
int result = AUDITSC_SUCCESS;
@@ -1095,7 +1096,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(result, regs->u_regs[UREG_I0]);
}
-
+#endif
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->u_regs[UREG_G1]);
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 648f2161b851..3249d3f3234d 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -82,7 +82,7 @@ static void prom_sync_me(void)
"nop\n\t" : : "r" (&trapbase));
prom_printf("PROM SYNC COMMAND...\n");
- show_free_areas();
+ show_free_areas(0);
if(current->pid != 0) {
local_irq_enable();
sys_sync();
@@ -103,16 +103,20 @@ static unsigned int boot_flags __initdata = 0;
/* Exported for mm/init.c:paging_init. */
unsigned long cmdline_memory_size __initdata = 0;
+/* which CPU booted us (0xff = not set) */
+unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */
+unsigned char boot_cpu_id4; /* boot_cpu_id << 2 */
+
static void
prom_console_write(struct console *con, const char *s, unsigned n)
{
prom_write(s, n);
}
-static struct console prom_debug_console = {
- .name = "debug",
+static struct console prom_early_console = {
+ .name = "earlyprom",
.write = prom_console_write,
- .flags = CON_PRINTBUFFER,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1,
};
@@ -133,8 +137,7 @@ static void __init process_switch(char c)
prom_halt();
break;
case 'p':
- /* Use PROM debug console. */
- register_console(&prom_debug_console);
+ /* Just ignore, this behavior is now the default. */
break;
default:
printk("Unknown boot switch (-%c)\n", c);
@@ -184,7 +187,6 @@ static void __init boot_flags_init(char *commands)
*/
extern void sun4c_probe_vac(void);
-extern char cputypval;
extern unsigned short root_flags;
extern unsigned short root_dev;
@@ -216,23 +218,27 @@ void __init setup_arch(char **cmdline_p)
strcpy(boot_command_line, *cmdline_p);
parse_early_param();
+ boot_flags_init(*cmdline_p);
+
+ register_console(&prom_early_console);
+
/* Set sparc_cpu_model */
sparc_cpu_model = sun_unknown;
- if (!strcmp(&cputypval,"sun4 "))
+ if (!strcmp(&cputypval[0], "sun4 "))
sparc_cpu_model = sun4;
- if (!strcmp(&cputypval,"sun4c"))
+ if (!strcmp(&cputypval[0], "sun4c"))
sparc_cpu_model = sun4c;
- if (!strcmp(&cputypval,"sun4m"))
+ if (!strcmp(&cputypval[0], "sun4m"))
sparc_cpu_model = sun4m;
- if (!strcmp(&cputypval,"sun4s"))
+ if (!strcmp(&cputypval[0], "sun4s"))
sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */
- if (!strcmp(&cputypval,"sun4d"))
+ if (!strcmp(&cputypval[0], "sun4d"))
sparc_cpu_model = sun4d;
- if (!strcmp(&cputypval,"sun4e"))
+ if (!strcmp(&cputypval[0], "sun4e"))
sparc_cpu_model = sun4e;
- if (!strcmp(&cputypval,"sun4u"))
+ if (!strcmp(&cputypval[0], "sun4u"))
sparc_cpu_model = sun4u;
- if (!strncmp(&cputypval, "leon" , 4))
+ if (!strncmp(&cputypval[0], "leon" , 4))
sparc_cpu_model = sparc_leon;
printk("ARCH: ");
@@ -266,7 +272,6 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
- boot_flags_init(*cmdline_p);
idprom_init();
if (ARCH_SUN4C)
@@ -312,75 +317,6 @@ void __init setup_arch(char **cmdline_p)
smp_setup_cpu_possible_map();
}
-static int ncpus_probed;
-
-static int show_cpuinfo(struct seq_file *m, void *__unused)
-{
- seq_printf(m,
- "cpu\t\t: %s\n"
- "fpu\t\t: %s\n"
- "promlib\t\t: Version %d Revision %d\n"
- "prom\t\t: %d.%d\n"
- "type\t\t: %s\n"
- "ncpus probed\t: %d\n"
- "ncpus active\t: %d\n"
-#ifndef CONFIG_SMP
- "CPU0Bogo\t: %lu.%02lu\n"
- "CPU0ClkTck\t: %ld\n"
-#endif
- ,
- sparc_cpu_type,
- sparc_fpu_type ,
- romvec->pv_romvers,
- prom_rev,
- romvec->pv_printrev >> 16,
- romvec->pv_printrev & 0xffff,
- &cputypval,
- ncpus_probed,
- num_online_cpus()
-#ifndef CONFIG_SMP
- , cpu_data(0).udelay_val/(500000/HZ),
- (cpu_data(0).udelay_val/(5000/HZ)) % 100,
- cpu_data(0).clock_tick
-#endif
- );
-
-#ifdef CONFIG_SMP
- smp_bogo(m);
-#endif
- mmu_info(m);
-#ifdef CONFIG_SMP
- smp_info(m);
-#endif
- return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
- /* The pointer we are returning is arbitrary,
- * it just has to be non-NULL and not IS_ERR
- * in the success case.
- */
- return *pos == 0 ? &c_start : NULL;
-}
-
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
- ++*pos;
- return c_start(m, pos);
-}
-
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-
-const struct seq_operations cpuinfo_op = {
- .start =c_start,
- .next = c_next,
- .stop = c_stop,
- .show = show_cpuinfo,
-};
-
extern int stop_a_enabled;
void sun_do_break(void)
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 29bafe051bb1..f3b6850cc8db 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -339,84 +339,6 @@ void __init setup_arch(char **cmdline_p)
paging_init();
}
-/* BUFFER is PAGE_SIZE bytes long. */
-
-extern void smp_info(struct seq_file *);
-extern void smp_bogo(struct seq_file *);
-extern void mmu_info(struct seq_file *);
-
-unsigned int dcache_parity_tl1_occurred;
-unsigned int icache_parity_tl1_occurred;
-
-int ncpus_probed;
-
-static int show_cpuinfo(struct seq_file *m, void *__unused)
-{
- seq_printf(m,
- "cpu\t\t: %s\n"
- "fpu\t\t: %s\n"
- "pmu\t\t: %s\n"
- "prom\t\t: %s\n"
- "type\t\t: %s\n"
- "ncpus probed\t: %d\n"
- "ncpus active\t: %d\n"
- "D$ parity tl1\t: %u\n"
- "I$ parity tl1\t: %u\n"
-#ifndef CONFIG_SMP
- "Cpu0ClkTck\t: %016lx\n"
-#endif
- ,
- sparc_cpu_type,
- sparc_fpu_type,
- sparc_pmu_type,
- prom_version,
- ((tlb_type == hypervisor) ?
- "sun4v" :
- "sun4u"),
- ncpus_probed,
- num_online_cpus(),
- dcache_parity_tl1_occurred,
- icache_parity_tl1_occurred
-#ifndef CONFIG_SMP
- , cpu_data(0).clock_tick
-#endif
- );
-#ifdef CONFIG_SMP
- smp_bogo(m);
-#endif
- mmu_info(m);
-#ifdef CONFIG_SMP
- smp_info(m);
-#endif
- return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
- /* The pointer we are returning is arbitrary,
- * it just has to be non-NULL and not IS_ERR
- * in the success case.
- */
- return *pos == 0 ? &c_start : NULL;
-}
-
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
- ++*pos;
- return c_start(m, pos);
-}
-
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-
-const struct seq_operations cpuinfo_op = {
- .start =c_start,
- .next = c_next,
- .stop = c_stop,
- .show = show_cpuinfo,
-};
-
extern int stop_a_enabled;
void sun_do_break(void)
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 91c10fb70858..d5b3958be0b4 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -37,8 +37,6 @@
#include "irq.h"
volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
-unsigned char boot_cpu_id = 0;
-unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
cpumask_t smp_commenced_mask = CPU_MASK_NONE;
@@ -53,6 +51,7 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE;
void __cpuinit smp_store_cpu_info(int id)
{
int cpu_node;
+ int mid;
cpu_data(id).udelay_val = loops_per_jiffy;
@@ -60,10 +59,13 @@ void __cpuinit smp_store_cpu_info(int id)
cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
"clock-frequency", 0);
cpu_data(id).prom_node = cpu_node;
- cpu_data(id).mid = cpu_get_hwmid(cpu_node);
+ mid = cpu_get_hwmid(cpu_node);
- if (cpu_data(id).mid < 0)
- panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
+ if (mid < 0) {
+ printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
+ mid = 0;
+ }
+ cpu_data(id).mid = mid;
}
void __init smp_cpus_done(unsigned int max_cpus)
@@ -125,13 +127,58 @@ struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
void smp_send_reschedule(int cpu)
{
- /* See sparc64 */
+ /*
+ * CPU model dependent way of implementing IPI generation targeting
+ * a single CPU. The trap handler needs only to do trap entry/return
+ * to call schedule.
+ */
+ BTFIXUP_CALL(smp_ipi_resched)(cpu);
}
void smp_send_stop(void)
{
}
+void arch_send_call_function_single_ipi(int cpu)
+{
+ /* trigger one IPI single call on one CPU */
+ BTFIXUP_CALL(smp_ipi_single)(cpu);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ int cpu;
+
+ /* trigger IPI mask call on each CPU */
+ for_each_cpu(cpu, mask)
+ BTFIXUP_CALL(smp_ipi_mask_one)(cpu);
+}
+
+void smp_resched_interrupt(void)
+{
+ irq_enter();
+ scheduler_ipi();
+ local_cpu_data().irq_resched_count++;
+ irq_exit();
+ /* re-schedule routine called by interrupt return code. */
+}
+
+void smp_call_function_single_interrupt(void)
+{
+ irq_enter();
+ generic_smp_call_function_single_interrupt();
+ local_cpu_data().irq_call_count++;
+ irq_exit();
+}
+
+void smp_call_function_interrupt(void)
+{
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ local_cpu_data().irq_call_count++;
+ irq_exit();
+}
+
void smp_flush_cache_all(void)
{
xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
@@ -147,9 +194,10 @@ void smp_flush_tlb_all(void)
void smp_flush_cache_mm(struct mm_struct *mm)
{
if(mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask = *mm_cpumask(mm);
- cpu_clear(smp_processor_id(), cpu_mask);
- if (!cpus_empty(cpu_mask))
+ cpumask_t cpu_mask;
+ cpumask_copy(&cpu_mask, mm_cpumask(mm));
+ cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+ if (!cpumask_empty(&cpu_mask))
xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
local_flush_cache_mm(mm);
}
@@ -158,9 +206,10 @@ void smp_flush_cache_mm(struct mm_struct *mm)
void smp_flush_tlb_mm(struct mm_struct *mm)
{
if(mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask = *mm_cpumask(mm);
- cpu_clear(smp_processor_id(), cpu_mask);
- if (!cpus_empty(cpu_mask)) {
+ cpumask_t cpu_mask;
+ cpumask_copy(&cpu_mask, mm_cpumask(mm));
+ cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+ if (!cpumask_empty(&cpu_mask)) {
xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
cpumask_copy(mm_cpumask(mm),
@@ -176,9 +225,10 @@ void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask = *mm_cpumask(mm);
- cpu_clear(smp_processor_id(), cpu_mask);
- if (!cpus_empty(cpu_mask))
+ cpumask_t cpu_mask;
+ cpumask_copy(&cpu_mask, mm_cpumask(mm));
+ cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+ if (!cpumask_empty(&cpu_mask))
xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
local_flush_cache_range(vma, start, end);
}
@@ -190,9 +240,10 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask = *mm_cpumask(mm);
- cpu_clear(smp_processor_id(), cpu_mask);
- if (!cpus_empty(cpu_mask))
+ cpumask_t cpu_mask;
+ cpumask_copy(&cpu_mask, mm_cpumask(mm));
+ cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+ if (!cpumask_empty(&cpu_mask))
xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
local_flush_tlb_range(vma, start, end);
}
@@ -203,9 +254,10 @@ void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
struct mm_struct *mm = vma->vm_mm;
if(mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask = *mm_cpumask(mm);
- cpu_clear(smp_processor_id(), cpu_mask);
- if (!cpus_empty(cpu_mask))
+ cpumask_t cpu_mask;
+ cpumask_copy(&cpu_mask, mm_cpumask(mm));
+ cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+ if (!cpumask_empty(&cpu_mask))
xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
local_flush_cache_page(vma, page);
}
@@ -216,19 +268,15 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
struct mm_struct *mm = vma->vm_mm;
if(mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask = *mm_cpumask(mm);
- cpu_clear(smp_processor_id(), cpu_mask);
- if (!cpus_empty(cpu_mask))
+ cpumask_t cpu_mask;
+ cpumask_copy(&cpu_mask, mm_cpumask(mm));
+ cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+ if (!cpumask_empty(&cpu_mask))
xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
local_flush_tlb_page(vma, page);
}
}
-void smp_reschedule_irq(void)
-{
- set_need_resched();
-}
-
void smp_flush_page_to_ram(unsigned long page)
{
/* Current theory is that those who call this are the one's
@@ -245,9 +293,10 @@ void smp_flush_page_to_ram(unsigned long page)
void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
{
- cpumask_t cpu_mask = *mm_cpumask(mm);
- cpu_clear(smp_processor_id(), cpu_mask);
- if (!cpus_empty(cpu_mask))
+ cpumask_t cpu_mask;
+ cpumask_copy(&cpu_mask, mm_cpumask(mm));
+ cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+ if (!cpumask_empty(&cpu_mask))
xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
local_flush_sig_insns(mm, insn_addr);
}
@@ -401,7 +450,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
};
if (!ret) {
- cpu_set(cpu, smp_commenced_mask);
+ cpumask_set_cpu(cpu, &smp_commenced_mask);
while (!cpu_online(cpu))
mb();
}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 555a76d1f4a1..99cb17251bb5 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -121,11 +121,11 @@ void __cpuinit smp_callin(void)
/* inform the notifiers about the new cpu */
notify_cpu_starting(cpuid);
- while (!cpu_isset(cpuid, smp_commenced_mask))
+ while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
rmb();
ipi_call_lock_irq();
- cpu_set(cpuid, cpu_online_map);
+ set_cpu_online(cpuid, true);
ipi_call_unlock_irq();
/* idle thread is expected to have preempt disabled */
@@ -189,7 +189,7 @@ static inline long get_delta (long *rt, long *master)
void smp_synchronize_tick_client(void)
{
long i, delta, adj, adjust_latency = 0, done = 0;
- unsigned long flags, rt, master_time_stamp, bound;
+ unsigned long flags, rt, master_time_stamp;
#if DEBUG_TICK_SYNC
struct {
long rt; /* roundtrip time */
@@ -208,10 +208,8 @@ void smp_synchronize_tick_client(void)
{
for (i = 0; i < NUM_ROUNDS; i++) {
delta = get_delta(&rt, &master_time_stamp);
- if (delta == 0) {
+ if (delta == 0)
done = 1; /* let's lock on to this... */
- bound = rt;
- }
if (!done) {
if (i > 0) {
@@ -787,7 +785,7 @@ static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask
/* Send cross call to all processors mentioned in MASK_P
* except self. Really, there are only two cases currently,
- * "&cpu_online_map" and "&mm->cpu_vm_mask".
+ * "cpu_online_mask" and "mm_cpumask(mm)".
*/
static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
{
@@ -799,7 +797,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
/* Send cross call to all processors except self. */
static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
{
- smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map);
+ smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
}
extern unsigned long xcall_sync_tick;
@@ -807,7 +805,7 @@ extern unsigned long xcall_sync_tick;
static void smp_start_sync_tick_client(int cpu)
{
xcall_deliver((u64) &xcall_sync_tick, 0, 0,
- &cpumask_of_cpu(cpu));
+ cpumask_of(cpu));
}
extern unsigned long xcall_call_function;
@@ -822,7 +820,7 @@ extern unsigned long xcall_call_function_single;
void arch_send_call_function_single_ipi(int cpu)
{
xcall_deliver((u64) &xcall_call_function_single, 0, 0,
- &cpumask_of_cpu(cpu));
+ cpumask_of(cpu));
}
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
@@ -920,7 +918,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
}
if (data0) {
xcall_deliver(data0, __pa(pg_addr),
- (u64) pg_addr, &cpumask_of_cpu(cpu));
+ (u64) pg_addr, cpumask_of(cpu));
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall);
#endif
@@ -933,13 +931,12 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
{
void *pg_addr;
- int this_cpu;
u64 data0;
if (tlb_type == hypervisor)
return;
- this_cpu = get_cpu();
+ preempt_disable();
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes);
@@ -957,14 +954,14 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
}
if (data0) {
xcall_deliver(data0, __pa(pg_addr),
- (u64) pg_addr, &cpu_online_map);
+ (u64) pg_addr, cpu_online_mask);
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall);
#endif
}
__local_flush_dcache_page(page);
- put_cpu();
+ preempt_enable();
}
void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
@@ -1200,32 +1197,32 @@ void __devinit smp_fill_in_sib_core_maps(void)
for_each_present_cpu(i) {
unsigned int j;
- cpus_clear(cpu_core_map[i]);
+ cpumask_clear(&cpu_core_map[i]);
if (cpu_data(i).core_id == 0) {
- cpu_set(i, cpu_core_map[i]);
+ cpumask_set_cpu(i, &cpu_core_map[i]);
continue;
}
for_each_present_cpu(j) {
if (cpu_data(i).core_id ==
cpu_data(j).core_id)
- cpu_set(j, cpu_core_map[i]);
+ cpumask_set_cpu(j, &cpu_core_map[i]);
}
}
for_each_present_cpu(i) {
unsigned int j;
- cpus_clear(per_cpu(cpu_sibling_map, i));
+ cpumask_clear(&per_cpu(cpu_sibling_map, i));
if (cpu_data(i).proc_id == -1) {
- cpu_set(i, per_cpu(cpu_sibling_map, i));
+ cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
continue;
}
for_each_present_cpu(j) {
if (cpu_data(i).proc_id ==
cpu_data(j).proc_id)
- cpu_set(j, per_cpu(cpu_sibling_map, i));
+ cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
}
}
}
@@ -1235,10 +1232,10 @@ int __cpuinit __cpu_up(unsigned int cpu)
int ret = smp_boot_one_cpu(cpu);
if (!ret) {
- cpu_set(cpu, smp_commenced_mask);
- while (!cpu_isset(cpu, cpu_online_map))
+ cpumask_set_cpu(cpu, &smp_commenced_mask);
+ while (!cpu_online(cpu))
mb();
- if (!cpu_isset(cpu, cpu_online_map)) {
+ if (!cpu_online(cpu)) {
ret = -ENODEV;
} else {
/* On SUN4V, writes to %tick and %stick are
@@ -1272,7 +1269,7 @@ void cpu_play_dead(void)
tb->nonresum_mondo_pa, 0);
}
- cpu_clear(cpu, smp_commenced_mask);
+ cpumask_clear_cpu(cpu, &smp_commenced_mask);
membar_safe("#Sync");
local_irq_disable();
@@ -1293,13 +1290,13 @@ int __cpu_disable(void)
cpuinfo_sparc *c;
int i;
- for_each_cpu_mask(i, cpu_core_map[cpu])
- cpu_clear(cpu, cpu_core_map[i]);
- cpus_clear(cpu_core_map[cpu]);
+ for_each_cpu(i, &cpu_core_map[cpu])
+ cpumask_clear_cpu(cpu, &cpu_core_map[i]);
+ cpumask_clear(&cpu_core_map[cpu]);
- for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
- cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
- cpus_clear(per_cpu(cpu_sibling_map, cpu));
+ for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
+ cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
+ cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
c = &cpu_data(cpu);
@@ -1316,7 +1313,7 @@ int __cpu_disable(void)
local_irq_disable();
ipi_call_lock();
- cpu_clear(cpu, cpu_online_map);
+ set_cpu_online(cpu, false);
ipi_call_unlock();
cpu_map_rebuild();
@@ -1330,11 +1327,11 @@ void __cpu_die(unsigned int cpu)
for (i = 0; i < 100; i++) {
smp_rmb();
- if (!cpu_isset(cpu, smp_commenced_mask))
+ if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
break;
msleep(100);
}
- if (cpu_isset(cpu, smp_commenced_mask)) {
+ if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
printk(KERN_ERR "CPU %u didn't die...\n", cpu);
} else {
#if defined(CONFIG_SUN_LDOMS)
@@ -1344,7 +1341,7 @@ void __cpu_die(unsigned int cpu)
do {
hv_err = sun4v_cpu_stop(cpu);
if (hv_err == HV_EOK) {
- cpu_clear(cpu, cpu_present_map);
+ set_cpu_present(cpu, false);
break;
}
} while (--limit > 0);
@@ -1365,12 +1362,13 @@ void __init smp_cpus_done(unsigned int max_cpus)
void smp_send_reschedule(int cpu)
{
xcall_deliver((u64) &xcall_receive_signal, 0, 0,
- &cpumask_of_cpu(cpu));
+ cpumask_of(cpu));
}
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
+ scheduler_ipi();
}
/* This is a nop because we capture all other cpus
diff --git a/arch/sparc/kernel/sun4c_irq.c b/arch/sparc/kernel/sun4c_irq.c
index 892fb884910a..f6bf25a2ff80 100644
--- a/arch/sparc/kernel/sun4c_irq.c
+++ b/arch/sparc/kernel/sun4c_irq.c
@@ -1,5 +1,5 @@
-/* sun4c_irq.c
- * arch/sparc/kernel/sun4c_irq.c:
+/*
+ * sun4c irq support
*
* djhr: Hacked out of irq.c into a CPU dependent version.
*
@@ -9,31 +9,41 @@
* Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
*/
-#include <linux/errno.h>
-#include <linux/linkage.h>
-#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/interrupt.h>
#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include "irq.h"
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/psr.h>
-#include <asm/vaddrs.h>
-#include <asm/timer.h>
-#include <asm/openprom.h>
#include <asm/oplib.h>
-#include <asm/traps.h>
+#include <asm/timer.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/idprom.h>
-#include <asm/machines.h>
+
+#include "irq.h"
+
+/* Sun4c interrupts are typically laid out as follows:
+ *
+ * 1 - Software interrupt, SBUS level 1
+ * 2 - SBUS level 2
+ * 3 - ESP SCSI, SBUS level 3
+ * 4 - Software interrupt
+ * 5 - Lance ethernet, SBUS level 4
+ * 6 - Software interrupt
+ * 7 - Graphics card, SBUS level 5
+ * 8 - SBUS level 6
+ * 9 - SBUS level 7
+ * 10 - Counter timer
+ * 11 - Floppy
+ * 12 - Zilog uart
+ * 13 - CS4231 audio
+ * 14 - Profiling timer
+ * 15 - NMI
+ *
+ * The interrupt enable bits in the interrupt mask register are
+ * really only used to enable/disable the timer interrupts, and
+ * for signalling software interrupts. There is also a master
+ * interrupt enable bit in this register.
+ *
+ * Interrupts are enabled by setting the SUN4C_INT_* bits, they
+ * are disabled by clearing those bits.
+ */
/*
* Bit field defines for the interrupt registers on various
@@ -49,73 +59,100 @@
#define SUN4C_INT_E4 0x04 /* Enable level 4 IRQ. */
#define SUN4C_INT_E1 0x02 /* Enable level 1 IRQ. */
-/* Pointer to the interrupt enable byte
- *
- * Dave Redman (djhr@tadpole.co.uk)
- * What you may not be aware of is that entry.S requires this variable.
- *
- * --- linux_trap_nmi_sun4c --
- *
- * so don't go making it static, like I tried. sigh.
+/*
+ * Pointer to the interrupt enable byte
+ * Used by entry.S
*/
-unsigned char __iomem *interrupt_enable = NULL;
+unsigned char __iomem *interrupt_enable;
-static void sun4c_disable_irq(unsigned int irq_nr)
+static void sun4c_mask_irq(struct irq_data *data)
{
- unsigned long flags;
- unsigned char current_mask, new_mask;
-
- local_irq_save(flags);
- irq_nr &= (NR_IRQS - 1);
- current_mask = sbus_readb(interrupt_enable);
- switch(irq_nr) {
- case 1:
- new_mask = ((current_mask) & (~(SUN4C_INT_E1)));
- break;
- case 8:
- new_mask = ((current_mask) & (~(SUN4C_INT_E8)));
- break;
- case 10:
- new_mask = ((current_mask) & (~(SUN4C_INT_E10)));
- break;
- case 14:
- new_mask = ((current_mask) & (~(SUN4C_INT_E14)));
- break;
- default:
+ unsigned long mask = (unsigned long)data->chip_data;
+
+ if (mask) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mask = sbus_readb(interrupt_enable) & ~mask;
+ sbus_writeb(mask, interrupt_enable);
local_irq_restore(flags);
- return;
}
- sbus_writeb(new_mask, interrupt_enable);
- local_irq_restore(flags);
}
-static void sun4c_enable_irq(unsigned int irq_nr)
+static void sun4c_unmask_irq(struct irq_data *data)
{
- unsigned long flags;
- unsigned char current_mask, new_mask;
-
- local_irq_save(flags);
- irq_nr &= (NR_IRQS - 1);
- current_mask = sbus_readb(interrupt_enable);
- switch(irq_nr) {
- case 1:
- new_mask = ((current_mask) | SUN4C_INT_E1);
- break;
- case 8:
- new_mask = ((current_mask) | SUN4C_INT_E8);
- break;
- case 10:
- new_mask = ((current_mask) | SUN4C_INT_E10);
- break;
- case 14:
- new_mask = ((current_mask) | SUN4C_INT_E14);
- break;
- default:
+ unsigned long mask = (unsigned long)data->chip_data;
+
+ if (mask) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mask = sbus_readb(interrupt_enable) | mask;
+ sbus_writeb(mask, interrupt_enable);
local_irq_restore(flags);
- return;
}
- sbus_writeb(new_mask, interrupt_enable);
- local_irq_restore(flags);
+}
+
+static unsigned int sun4c_startup_irq(struct irq_data *data)
+{
+ irq_link(data->irq);
+ sun4c_unmask_irq(data);
+
+ return 0;
+}
+
+static void sun4c_shutdown_irq(struct irq_data *data)
+{
+ sun4c_mask_irq(data);
+ irq_unlink(data->irq);
+}
+
+static struct irq_chip sun4c_irq = {
+ .name = "sun4c",
+ .irq_startup = sun4c_startup_irq,
+ .irq_shutdown = sun4c_shutdown_irq,
+ .irq_mask = sun4c_mask_irq,
+ .irq_unmask = sun4c_unmask_irq,
+};
+
+static unsigned int sun4c_build_device_irq(struct platform_device *op,
+ unsigned int real_irq)
+{
+ unsigned int irq;
+
+ if (real_irq >= 16) {
+ prom_printf("Bogus sun4c IRQ %u\n", real_irq);
+ prom_halt();
+ }
+
+ irq = irq_alloc(real_irq, real_irq);
+ if (irq) {
+ unsigned long mask = 0UL;
+
+ switch (real_irq) {
+ case 1:
+ mask = SUN4C_INT_E1;
+ break;
+ case 8:
+ mask = SUN4C_INT_E8;
+ break;
+ case 10:
+ mask = SUN4C_INT_E10;
+ break;
+ case 14:
+ mask = SUN4C_INT_E14;
+ break;
+ default:
+ /* All the rest are either always enabled,
+ * or are for signalling software interrupts.
+ */
+ break;
+ }
+ irq_set_chip_and_handler_name(irq, &sun4c_irq,
+ handle_level_irq, "level");
+ irq_set_chip_data(irq, (void *)mask);
+ }
+ return irq;
}
struct sun4c_timer_info {
@@ -139,8 +176,9 @@ static void sun4c_load_profile_irq(int cpu, unsigned int limit)
static void __init sun4c_init_timers(irq_handler_t counter_fn)
{
- const struct linux_prom_irqs *irq;
+ const struct linux_prom_irqs *prom_irqs;
struct device_node *dp;
+ unsigned int irq;
const u32 *addr;
int err;
@@ -158,9 +196,9 @@ static void __init sun4c_init_timers(irq_handler_t counter_fn)
sun4c_timers = (void __iomem *) (unsigned long) addr[0];
- irq = of_get_property(dp, "intr", NULL);
+ prom_irqs = of_get_property(dp, "intr", NULL);
of_node_put(dp);
- if (!irq) {
+ if (!prom_irqs) {
prom_printf("sun4c_init_timers: No intr property\n");
prom_halt();
}
@@ -173,19 +211,21 @@ static void __init sun4c_init_timers(irq_handler_t counter_fn)
master_l10_counter = &sun4c_timers->l10_count;
- err = request_irq(irq[0].pri, counter_fn,
- (IRQF_DISABLED | SA_STATIC_ALLOC),
- "timer", NULL);
+ irq = sun4c_build_device_irq(NULL, prom_irqs[0].pri);
+ err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
if (err) {
prom_printf("sun4c_init_timers: request_irq() fails with %d\n", err);
prom_halt();
}
-
- sun4c_disable_irq(irq[1].pri);
+
+ /* disable timer interrupt */
+ sun4c_mask_irq(irq_get_irq_data(irq));
}
#ifdef CONFIG_SMP
-static void sun4c_nop(void) {}
+static void sun4c_nop(void)
+{
+}
#endif
void __init sun4c_init_IRQ(void)
@@ -208,13 +248,12 @@ void __init sun4c_init_IRQ(void)
interrupt_enable = (void __iomem *) (unsigned long) addr[0];
- BTFIXUPSET_CALL(enable_irq, sun4c_enable_irq, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(disable_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(enable_pil_irq, sun4c_enable_irq, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(disable_pil_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP);
- sparc_init_timers = sun4c_init_timers;
+
+ sparc_irq_config.init_timers = sun4c_init_timers;
+ sparc_irq_config.build_device_irq = sun4c_build_device_irq;
+
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(clear_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index e11b4612dabb..a9ea60eb2c10 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -1,50 +1,41 @@
/*
- * arch/sparc/kernel/sun4d_irq.c:
- * SS1000/SC2000 interrupt handling.
+ * SS1000/SC2000 interrupt handling.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Heavily based on arch/sparc/kernel/irq.c.
*/
-#include <linux/errno.h>
-#include <linux/linkage.h>
#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
#include <linux/seq_file.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/psr.h>
-#include <asm/smp.h>
-#include <asm/vaddrs.h>
+
#include <asm/timer.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
#include <asm/traps.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sbi.h>
#include <asm/cacheflush.h>
-#include <asm/irq_regs.h>
+#include <asm/setup.h>
#include "kernel.h"
#include "irq.h"
-/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
-/* #define DISTRIBUTE_IRQS */
+/* Sun4d interrupts fall roughly into two categories. SBUS and
+ * cpu local. CPU local interrupts cover the timer interrupts
+ * and whatnot, and we encode those as normal PILs between
+ * 0 and 15.
+ * SBUS interrupts are encodes as a combination of board, level and slot.
+ */
+
+struct sun4d_handler_data {
+ unsigned int cpuid; /* target cpu */
+ unsigned int real_irq; /* interrupt level */
+};
+
+
+static unsigned int sun4d_encode_irq(int board, int lvl, int slot)
+{
+ return (board + 1) << 5 | (lvl << 2) | slot;
+}
struct sun4d_timer_regs {
u32 l10_timer_limit;
@@ -56,320 +47,205 @@ struct sun4d_timer_regs {
static struct sun4d_timer_regs __iomem *sun4d_timers;
-#define TIMER_IRQ 10
-
-#define MAX_STATIC_ALLOC 4
-extern int static_irq_count;
-static unsigned char sbus_tid[32];
+#define SUN4D_TIMER_IRQ 10
-static struct irqaction *irq_action[NR_IRQS];
-extern spinlock_t irq_action_lock;
-
-static struct sbus_action {
- struct irqaction *action;
- /* For SMP this needs to be extended */
-} *sbus_actions;
+/* Specify which cpu handle interrupts from which board.
+ * Index is board - value is cpu.
+ */
+static unsigned char board_to_cpu[32];
static int pil_to_sbus[] = {
- 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
+ 0,
+ 0,
+ 1,
+ 2,
+ 0,
+ 3,
+ 0,
+ 4,
+ 0,
+ 5,
+ 0,
+ 6,
+ 0,
+ 7,
+ 0,
+ 0,
};
-static int sbus_to_pil[] = {
- 0, 2, 3, 5, 7, 9, 11, 13,
-};
-
-static int nsbi;
-
/* Exported for sun4d_smp.c */
DEFINE_SPINLOCK(sun4d_imsk_lock);
-int show_sun4d_interrupts(struct seq_file *p, void *v)
+/* SBUS interrupts are encoded integers including the board number
+ * (plus one), the SBUS level, and the SBUS slot number. Sun4D
+ * IRQ dispatch is done by:
+ *
+ * 1) Reading the BW local interrupt table in order to get the bus
+ * interrupt mask.
+ *
+ * This table is indexed by SBUS interrupt level which can be
+ * derived from the PIL we got interrupted on.
+ *
+ * 2) For each bus showing interrupt pending from #1, read the
+ * SBI interrupt state register. This will indicate which slots
+ * have interrupts pending for that SBUS interrupt level.
+ *
+ * 3) Call the genreric IRQ support.
+ */
+static void sun4d_sbus_handler_irq(int sbusl)
{
- int i = *(loff_t *) v, j = 0, k = 0, sbusl;
- struct irqaction * action;
- unsigned long flags;
-#ifdef CONFIG_SMP
- int x;
-#endif
-
- spin_lock_irqsave(&irq_action_lock, flags);
- if (i < NR_IRQS) {
- sbusl = pil_to_sbus[i];
- if (!sbusl) {
- action = *(i + irq_action);
- if (!action)
- goto out_unlock;
- } else {
- for (j = 0; j < nsbi; j++) {
- for (k = 0; k < 4; k++)
- if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
- goto found_it;
- }
- goto out_unlock;
- }
-found_it: seq_printf(p, "%3d: ", i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for_each_online_cpu(x)
- seq_printf(p, "%10u ",
- kstat_cpu(cpu_logical_map(x)).irqs[i]);
-#endif
- seq_printf(p, "%c %s",
- (action->flags & IRQF_DISABLED) ? '+' : ' ',
- action->name);
- action = action->next;
- for (;;) {
- for (; action; action = action->next) {
- seq_printf(p, ",%s %s",
- (action->flags & IRQF_DISABLED) ? " +" : "",
- action->name);
- }
- if (!sbusl) break;
- k++;
- if (k < 4)
- action = sbus_actions [(j << 5) + (sbusl << 2) + k].action;
- else {
- j++;
- if (j == nsbi) break;
- k = 0;
- action = sbus_actions [(j << 5) + (sbusl << 2)].action;
+ unsigned int bus_mask;
+ unsigned int sbino, slot;
+ unsigned int sbil;
+
+ bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
+ bw_clear_intr_mask(sbusl, bus_mask);
+
+ sbil = (sbusl << 2);
+ /* Loop for each pending SBI */
+ for (sbino = 0; bus_mask; sbino++) {
+ unsigned int idx, mask;
+
+ bus_mask >>= 1;
+ if (!(bus_mask & 1))
+ continue;
+ /* XXX This seems to ACK the irq twice. acquire_sbi()
+ * XXX uses swap, therefore this writes 0xf << sbil,
+ * XXX then later release_sbi() will write the individual
+ * XXX bits which were set again.
+ */
+ mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
+ mask &= (0xf << sbil);
+
+ /* Loop for each pending SBI slot */
+ idx = 0;
+ slot = (1 << sbil);
+ while (mask != 0) {
+ unsigned int pil;
+ struct irq_bucket *p;
+
+ idx++;
+ slot <<= 1;
+ if (!(mask & slot))
+ continue;
+
+ mask &= ~slot;
+ pil = sun4d_encode_irq(sbino, sbil, idx);
+
+ p = irq_map[pil];
+ while (p) {
+ struct irq_bucket *next;
+
+ next = p->next;
+ generic_handle_irq(p->irq);
+ p = next;
}
+ release_sbi(SBI2DEVID(sbino), slot);
}
- seq_putc(p, '\n');
}
-out_unlock:
- spin_unlock_irqrestore(&irq_action_lock, flags);
- return 0;
}
-void sun4d_free_irq(unsigned int irq, void *dev_id)
-{
- struct irqaction *action, **actionp;
- struct irqaction *tmp = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&irq_action_lock, flags);
- if (irq < 15)
- actionp = irq + irq_action;
- else
- actionp = &(sbus_actions[irq - (1 << 5)].action);
- action = *actionp;
- if (!action) {
- printk("Trying to free free IRQ%d\n",irq);
- goto out_unlock;
- }
- if (dev_id) {
- for (; action; action = action->next) {
- if (action->dev_id == dev_id)
- break;
- tmp = action;
- }
- if (!action) {
- printk("Trying to free free shared IRQ%d\n",irq);
- goto out_unlock;
- }
- } else if (action->flags & IRQF_SHARED) {
- printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
- goto out_unlock;
- }
- if (action->flags & SA_STATIC_ALLOC)
- {
- /* This interrupt is marked as specially allocated
- * so it is a bad idea to free it.
- */
- printk("Attempt to free statically allocated IRQ%d (%s)\n",
- irq, action->name);
- goto out_unlock;
- }
-
- if (tmp)
- tmp->next = action->next;
- else
- *actionp = action->next;
-
- spin_unlock_irqrestore(&irq_action_lock, flags);
-
- synchronize_irq(irq);
-
- spin_lock_irqsave(&irq_action_lock, flags);
-
- kfree(action);
-
- if (!(*actionp))
- __disable_irq(irq);
-
-out_unlock:
- spin_unlock_irqrestore(&irq_action_lock, flags);
-}
-
-extern void unexpected_irq(int, void *, struct pt_regs *);
-
-void sun4d_handler_irq(int irq, struct pt_regs * regs)
+void sun4d_handler_irq(int pil, struct pt_regs *regs)
{
struct pt_regs *old_regs;
- struct irqaction * action;
- int cpu = smp_processor_id();
/* SBUS IRQ level (1 - 7) */
- int sbusl = pil_to_sbus[irq];
-
+ int sbusl = pil_to_sbus[pil];
+
/* FIXME: Is this necessary?? */
cc_get_ipen();
-
- cc_set_iclr(1 << irq);
-
+
+ cc_set_iclr(1 << pil);
+
+#ifdef CONFIG_SMP
+ /*
+ * Check IPI data structures after IRQ has been cleared. Hard and Soft
+ * IRQ can happen at the same time, so both cases are always handled.
+ */
+ if (pil == SUN4D_IPI_IRQ)
+ sun4d_ipi_interrupt();
+#endif
+
old_regs = set_irq_regs(regs);
irq_enter();
- kstat_cpu(cpu).irqs[irq]++;
- if (!sbusl) {
- action = *(irq + irq_action);
- if (!action)
- unexpected_irq(irq, NULL, regs);
- do {
- action->handler(irq, action->dev_id);
- action = action->next;
- } while (action);
+ if (sbusl == 0) {
+ /* cpu interrupt */
+ struct irq_bucket *p;
+
+ p = irq_map[pil];
+ while (p) {
+ struct irq_bucket *next;
+
+ next = p->next;
+ generic_handle_irq(p->irq);
+ p = next;
+ }
} else {
- int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
- int sbino;
- struct sbus_action *actionp;
- unsigned mask, slot;
- int sbil = (sbusl << 2);
-
- bw_clear_intr_mask(sbusl, bus_mask);
-
- /* Loop for each pending SBI */
- for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1)
- if (bus_mask & 1) {
- mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
- mask &= (0xf << sbil);
- actionp = sbus_actions + (sbino << 5) + (sbil);
- /* Loop for each pending SBI slot */
- for (slot = (1 << sbil); mask; slot <<= 1, actionp++)
- if (mask & slot) {
- mask &= ~slot;
- action = actionp->action;
-
- if (!action)
- unexpected_irq(irq, NULL, regs);
- do {
- action->handler(irq, action->dev_id);
- action = action->next;
- } while (action);
- release_sbi(SBI2DEVID(sbino), slot);
- }
- }
+ /* SBUS interrupt */
+ sun4d_sbus_handler_irq(sbusl);
}
irq_exit();
set_irq_regs(old_regs);
}
-int sun4d_request_irq(unsigned int irq,
- irq_handler_t handler,
- unsigned long irqflags, const char * devname, void *dev_id)
+
+static void sun4d_mask_irq(struct irq_data *data)
{
- struct irqaction *action, *tmp = NULL, **actionp;
+ struct sun4d_handler_data *handler_data = data->handler_data;
+ unsigned int real_irq;
+#ifdef CONFIG_SMP
+ int cpuid = handler_data->cpuid;
unsigned long flags;
- int ret;
-
- if(irq > 14 && irq < (1 << 5)) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!handler) {
- ret = -EINVAL;
- goto out;
- }
-
- spin_lock_irqsave(&irq_action_lock, flags);
-
- if (irq >= (1 << 5))
- actionp = &(sbus_actions[irq - (1 << 5)].action);
- else
- actionp = irq + irq_action;
- action = *actionp;
-
- if (action) {
- if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) {
- for (tmp = action; tmp->next; tmp = tmp->next);
- } else {
- ret = -EBUSY;
- goto out_unlock;
- }
- if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) {
- printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
- ret = -EBUSY;
- goto out_unlock;
- }
- action = NULL; /* Or else! */
- }
-
- /* If this is flagged as statically allocated then we use our
- * private struct which is never freed.
- */
- if (irqflags & SA_STATIC_ALLOC) {
- if (static_irq_count < MAX_STATIC_ALLOC)
- action = &static_irqaction[static_irq_count++];
- else
- printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
- }
-
- if (action == NULL)
- action = kmalloc(sizeof(struct irqaction),
- GFP_ATOMIC);
-
- if (!action) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- action->handler = handler;
- action->flags = irqflags;
- action->name = devname;
- action->next = NULL;
- action->dev_id = dev_id;
-
- if (tmp)
- tmp->next = action;
- else
- *actionp = action;
-
- __enable_irq(irq);
-
- ret = 0;
-out_unlock:
- spin_unlock_irqrestore(&irq_action_lock, flags);
-out:
- return ret;
+#endif
+ real_irq = handler_data->real_irq;
+#ifdef CONFIG_SMP
+ spin_lock_irqsave(&sun4d_imsk_lock, flags);
+ cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | (1 << real_irq));
+ spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
+#else
+ cc_set_imsk(cc_get_imsk() | (1 << real_irq));
+#endif
}
-static void sun4d_disable_irq(unsigned int irq)
+static void sun4d_unmask_irq(struct irq_data *data)
{
- int tid = sbus_tid[(irq >> 5) - 1];
+ struct sun4d_handler_data *handler_data = data->handler_data;
+ unsigned int real_irq;
+#ifdef CONFIG_SMP
+ int cpuid = handler_data->cpuid;
unsigned long flags;
-
- if (irq < NR_IRQS)
- return;
+#endif
+ real_irq = handler_data->real_irq;
+#ifdef CONFIG_SMP
spin_lock_irqsave(&sun4d_imsk_lock, flags);
- cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7]));
+ cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | ~(1 << real_irq));
spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
+#else
+ cc_set_imsk(cc_get_imsk() | ~(1 << real_irq));
+#endif
}
-static void sun4d_enable_irq(unsigned int irq)
+static unsigned int sun4d_startup_irq(struct irq_data *data)
{
- int tid = sbus_tid[(irq >> 5) - 1];
- unsigned long flags;
-
- if (irq < NR_IRQS)
- return;
+ irq_link(data->irq);
+ sun4d_unmask_irq(data);
+ return 0;
+}
- spin_lock_irqsave(&sun4d_imsk_lock, flags);
- cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
- spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
+static void sun4d_shutdown_irq(struct irq_data *data)
+{
+ sun4d_mask_irq(data);
+ irq_unlink(data->irq);
}
+struct irq_chip sun4d_irq = {
+ .name = "sun4d",
+ .irq_startup = sun4d_startup_irq,
+ .irq_shutdown = sun4d_shutdown_irq,
+ .irq_unmask = sun4d_unmask_irq,
+ .irq_mask = sun4d_mask_irq,
+};
+
#ifdef CONFIG_SMP
static void sun4d_set_cpu_int(int cpu, int level)
{
@@ -389,44 +265,6 @@ void __init sun4d_distribute_irqs(void)
{
struct device_node *dp;
-#ifdef DISTRIBUTE_IRQS
- cpumask_t sbus_serving_map;
-
- sbus_serving_map = cpu_present_map;
- for_each_node_by_name(dp, "sbi") {
- int board = of_getintprop_default(dp, "board#", 0);
-
- if ((board * 2) == boot_cpu_id && cpu_isset(board * 2 + 1, cpu_present_map))
- sbus_tid[board] = (board * 2 + 1);
- else if (cpu_isset(board * 2, cpu_present_map))
- sbus_tid[board] = (board * 2);
- else if (cpu_isset(board * 2 + 1, cpu_present_map))
- sbus_tid[board] = (board * 2 + 1);
- else
- sbus_tid[board] = 0xff;
- if (sbus_tid[board] != 0xff)
- cpu_clear(sbus_tid[board], sbus_serving_map);
- }
- for_each_node_by_name(dp, "sbi") {
- int board = of_getintprop_default(dp, "board#", 0);
- if (sbus_tid[board] == 0xff) {
- int i = 31;
-
- if (cpus_empty(sbus_serving_map))
- sbus_serving_map = cpu_present_map;
- while (cpu_isset(i, sbus_serving_map))
- i--;
- sbus_tid[board] = i;
- cpu_clear(i, sbus_serving_map);
- }
- }
- for_each_node_by_name(dp, "sbi") {
- int devid = of_getintprop_default(dp, "device-id", 0);
- int board = of_getintprop_default(dp, "board#", 0);
- printk("sbus%d IRQs directed to CPU%d\n", board, sbus_tid[board]);
- set_sbi_tid(devid, sbus_tid[board] << 3);
- }
-#else
int cpuid = cpu_logical_map(1);
if (cpuid == -1)
@@ -434,14 +272,13 @@ void __init sun4d_distribute_irqs(void)
for_each_node_by_name(dp, "sbi") {
int devid = of_getintprop_default(dp, "device-id", 0);
int board = of_getintprop_default(dp, "board#", 0);
- sbus_tid[board] = cpuid;
+ board_to_cpu[board] = cpuid;
set_sbi_tid(devid, cpuid << 3);
}
- printk("All sbus IRQs directed to CPU%d\n", cpuid);
-#endif
+ printk(KERN_ERR "All sbus IRQs directed to CPU%d\n", cpuid);
}
#endif
-
+
static void sun4d_clear_clock_irq(void)
{
sbus_readl(&sun4d_timers->l10_timer_limit);
@@ -462,14 +299,83 @@ static void __init sun4d_load_profile_irqs(void)
}
}
+unsigned int sun4d_build_device_irq(struct platform_device *op,
+ unsigned int real_irq)
+{
+ struct device_node *dp = op->dev.of_node;
+ struct device_node *io_unit, *sbi = dp->parent;
+ const struct linux_prom_registers *regs;
+ struct sun4d_handler_data *handler_data;
+ unsigned int pil;
+ unsigned int irq;
+ int board, slot;
+ int sbusl;
+
+ irq = 0;
+ while (sbi) {
+ if (!strcmp(sbi->name, "sbi"))
+ break;
+
+ sbi = sbi->parent;
+ }
+ if (!sbi)
+ goto err_out;
+
+ regs = of_get_property(dp, "reg", NULL);
+ if (!regs)
+ goto err_out;
+
+ slot = regs->which_io;
+
+ /*
+ * If SBI's parent is not io-unit or the io-unit lacks
+ * a "board#" property, something is very wrong.
+ */
+ if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
+ printk("%s: Error, parent is not io-unit.\n", sbi->full_name);
+ goto err_out;
+ }
+ io_unit = sbi->parent;
+ board = of_getintprop_default(io_unit, "board#", -1);
+ if (board == -1) {
+ printk("%s: Error, lacks board# property.\n", io_unit->full_name);
+ goto err_out;
+ }
+
+ sbusl = pil_to_sbus[real_irq];
+ if (sbusl)
+ pil = sun4d_encode_irq(board, sbusl, slot);
+ else
+ pil = real_irq;
+
+ irq = irq_alloc(real_irq, pil);
+ if (irq == 0)
+ goto err_out;
+
+ handler_data = irq_get_handler_data(irq);
+ if (unlikely(handler_data))
+ goto err_out;
+
+ handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC);
+ if (unlikely(!handler_data)) {
+ prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n");
+ prom_halt();
+ }
+ handler_data->cpuid = board_to_cpu[board];
+ handler_data->real_irq = real_irq;
+ irq_set_chip_and_handler_name(irq, &sun4d_irq,
+ handle_level_irq, "level");
+ irq_set_handler_data(irq, handler_data);
+
+err_out:
+ return real_irq;
+}
+
static void __init sun4d_fixup_trap_table(void)
{
#ifdef CONFIG_SMP
unsigned long flags;
- extern unsigned long lvl14_save[4];
struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
- extern unsigned int real_irq_entry[], smp4d_ticker[];
- extern unsigned int patchme_maybe_smp_msg[];
/* Adjust so that we jump directly to smp4d_ticker */
lvl14_save[2] += smp4d_ticker - real_irq_entry;
@@ -493,6 +399,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
{
struct device_node *dp;
struct resource res;
+ unsigned int irq;
const u32 *reg;
int err;
@@ -527,11 +434,11 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
master_l10_counter = &sun4d_timers->l10_cur_count;
- err = request_irq(TIMER_IRQ, counter_fn,
- (IRQF_DISABLED | SA_STATIC_ALLOC),
- "timer", NULL);
+ irq = sun4d_build_device_irq(NULL, SUN4D_TIMER_IRQ);
+ err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
if (err) {
- prom_printf("sun4d_init_timers: request_irq() failed with %d\n", err);
+ prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
+ err);
prom_halt();
}
sun4d_load_profile_irqs();
@@ -541,32 +448,22 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
void __init sun4d_init_sbi_irq(void)
{
struct device_node *dp;
- int target_cpu = 0;
+ int target_cpu;
-#ifdef CONFIG_SMP
target_cpu = boot_cpu_id;
-#endif
-
- nsbi = 0;
- for_each_node_by_name(dp, "sbi")
- nsbi++;
- sbus_actions = kzalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
- if (!sbus_actions) {
- prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n");
- prom_halt();
- }
for_each_node_by_name(dp, "sbi") {
int devid = of_getintprop_default(dp, "device-id", 0);
int board = of_getintprop_default(dp, "board#", 0);
unsigned int mask;
set_sbi_tid(devid, target_cpu << 3);
- sbus_tid[board] = target_cpu;
+ board_to_cpu[board] = target_cpu;
/* Get rid of pending irqs from PROM */
mask = acquire_sbi(devid, 0xffffffff);
if (mask) {
- printk ("Clearing pending IRQs %08x on SBI %d\n", mask, board);
+ printk(KERN_ERR "Clearing pending IRQs %08x on SBI %d\n",
+ mask, board);
release_sbi(devid, mask);
}
}
@@ -576,11 +473,12 @@ void __init sun4d_init_IRQ(void)
{
local_irq_disable();
- BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
- sparc_init_timers = sun4d_init_timers;
+
+ sparc_irq_config.init_timers = sun4d_init_timers;
+ sparc_irq_config.build_device_irq = sun4d_build_device_irq;
+
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 482f2ab92692..133387980b56 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -1,4 +1,4 @@
-/* sun4d_smp.c: Sparc SS1000/SC2000 SMP support.
+/* Sparc SS1000/SC2000 SMP support.
*
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*
@@ -6,59 +6,23 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
-#include <asm/head.h>
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
#include <linux/profile.h>
#include <linux/delay.h>
#include <linux/cpu.h>
-#include <asm/ptrace.h>
-#include <asm/atomic.h>
-#include <asm/irq_regs.h>
-
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/oplib.h>
#include <asm/sbi.h>
+#include <asm/mmu.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
-#include <asm/cpudata.h>
+#include "kernel.h"
#include "irq.h"
-#define IRQ_CROSS_CALL 15
-extern ctxd_t *srmmu_ctx_table_phys;
+#define IRQ_CROSS_CALL 15
-static volatile int smp_processors_ready = 0;
+static volatile int smp_processors_ready;
static int smp_highest_cpu;
-extern volatile unsigned long cpu_callin_map[NR_CPUS];
-extern cpuinfo_sparc cpu_data[NR_CPUS];
-extern unsigned char boot_cpu_id;
-extern volatile int smp_process_available;
-
-extern cpumask_t smp_commenced_mask;
-
-extern int __smp4d_processor_id(void);
-
-/* #define SMP_DEBUG */
-
-#ifdef SMP_DEBUG
-#define SMP_PRINTK(x) printk x
-#else
-#define SMP_PRINTK(x)
-#endif
static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned long val)
{
@@ -68,9 +32,8 @@ static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned lon
return val;
}
+static void smp4d_ipi_init(void);
static void smp_setup_percpu_timer(void);
-extern void cpu_probe(void);
-extern void sun4d_distribute_irqs(void);
static unsigned char cpu_leds[32];
@@ -86,9 +49,8 @@ static inline void show_leds(int cpuid)
void __cpuinit smp4d_callin(void)
{
int cpuid = hard_smp4d_processor_id();
- extern spinlock_t sun4d_imsk_lock;
unsigned long flags;
-
+
/* Show we are alive */
cpu_leds[cpuid] = 0x6;
show_leds(cpuid);
@@ -118,15 +80,13 @@ void __cpuinit smp4d_callin(void)
sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
local_flush_cache_all();
local_flush_tlb_all();
-
- cpu_probe();
- while((unsigned long)current_set[cpuid] < PAGE_OFFSET)
+ while ((unsigned long)current_set[cpuid] < PAGE_OFFSET)
barrier();
-
- while(current_set[cpuid]->cpu != cpuid)
+
+ while (current_set[cpuid]->cpu != cpuid)
barrier();
-
+
/* Fix idle thread fields. */
__asm__ __volatile__("ld [%0], %%g6\n\t"
: : "r" (&current_set[cpuid])
@@ -134,17 +94,17 @@ void __cpuinit smp4d_callin(void)
cpu_leds[cpuid] = 0x9;
show_leds(cpuid);
-
+
/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
local_flush_cache_all();
local_flush_tlb_all();
-
+
local_irq_enable(); /* We don't allow PIL 14 yet */
-
- while (!cpu_isset(cpuid, smp_commenced_mask))
+
+ while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
barrier();
spin_lock_irqsave(&sun4d_imsk_lock, flags);
@@ -154,17 +114,12 @@ void __cpuinit smp4d_callin(void)
}
-extern void init_IRQ(void);
-extern void cpu_panic(void);
-
/*
* Cycle through the processors asking the PROM to start each one.
*/
-
-extern struct linux_prom_registers smp_penguin_ctable;
-
void __init smp4d_boot_cpus(void)
{
+ smp4d_ipi_init();
if (boot_cpu_id)
current_set[0] = NULL;
smp_setup_percpu_timer();
@@ -173,43 +128,42 @@ void __init smp4d_boot_cpus(void)
int __cpuinit smp4d_boot_one_cpu(int i)
{
- extern unsigned long sun4d_cpu_startup;
- unsigned long *entry = &sun4d_cpu_startup;
- struct task_struct *p;
- int timeout;
- int cpu_node;
+ unsigned long *entry = &sun4d_cpu_startup;
+ struct task_struct *p;
+ int timeout;
+ int cpu_node;
- cpu_find_by_instance(i, &cpu_node,NULL);
- /* Cook up an idler for this guy. */
- p = fork_idle(i);
- current_set[i] = task_thread_info(p);
+ cpu_find_by_instance(i, &cpu_node, NULL);
+ /* Cook up an idler for this guy. */
+ p = fork_idle(i);
+ current_set[i] = task_thread_info(p);
+
+ /*
+ * Initialize the contexts table
+ * Since the call to prom_startcpu() trashes the structure,
+ * we need to re-initialize it for each cpu
+ */
+ smp_penguin_ctable.which_io = 0;
+ smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
+ smp_penguin_ctable.reg_size = 0;
+
+ /* whirrr, whirrr, whirrrrrrrrr... */
+ printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
+ local_flush_cache_all();
+ prom_startcpu(cpu_node,
+ &smp_penguin_ctable, 0, (char *)entry);
+
+ printk(KERN_INFO "prom_startcpu returned :)\n");
+
+ /* wheee... it's going... */
+ for (timeout = 0; timeout < 10000; timeout++) {
+ if (cpu_callin_map[i])
+ break;
+ udelay(200);
+ }
- /*
- * Initialize the contexts table
- * Since the call to prom_startcpu() trashes the structure,
- * we need to re-initialize it for each cpu
- */
- smp_penguin_ctable.which_io = 0;
- smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
- smp_penguin_ctable.reg_size = 0;
-
- /* whirrr, whirrr, whirrrrrrrrr... */
- SMP_PRINTK(("Starting CPU %d at %p\n", i, entry));
- local_flush_cache_all();
- prom_startcpu(cpu_node,
- &smp_penguin_ctable, 0, (char *)entry);
-
- SMP_PRINTK(("prom_startcpu returned :)\n"));
-
- /* wheee... it's going... */
- for(timeout = 0; timeout < 10000; timeout++) {
- if(cpu_callin_map[i])
- break;
- udelay(200);
- }
-
if (!(cpu_callin_map[i])) {
- printk("Processor %d is stuck.\n", i);
+ printk(KERN_ERR "Processor %d is stuck.\n", i);
return -ENODEV;
}
@@ -237,6 +191,80 @@ void __init smp4d_smp_done(void)
sun4d_distribute_irqs();
}
+/* Memory structure giving interrupt handler information about IPI generated */
+struct sun4d_ipi_work {
+ int single;
+ int msk;
+ int resched;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct sun4d_ipi_work, sun4d_ipi_work);
+
+/* Initialize IPIs on the SUN4D SMP machine */
+static void __init smp4d_ipi_init(void)
+{
+ int cpu;
+ struct sun4d_ipi_work *work;
+
+ printk(KERN_INFO "smp4d: setup IPI at IRQ %d\n", SUN4D_IPI_IRQ);
+
+ for_each_possible_cpu(cpu) {
+ work = &per_cpu(sun4d_ipi_work, cpu);
+ work->single = work->msk = work->resched = 0;
+ }
+}
+
+void sun4d_ipi_interrupt(void)
+{
+ struct sun4d_ipi_work *work = &__get_cpu_var(sun4d_ipi_work);
+
+ if (work->single) {
+ work->single = 0;
+ smp_call_function_single_interrupt();
+ }
+ if (work->msk) {
+ work->msk = 0;
+ smp_call_function_interrupt();
+ }
+ if (work->resched) {
+ work->resched = 0;
+ smp_resched_interrupt();
+ }
+}
+
+static void smp4d_ipi_single(int cpu)
+{
+ struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
+
+ /* Mark work */
+ work->single = 1;
+
+ /* Generate IRQ on the CPU */
+ sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
+}
+
+static void smp4d_ipi_mask_one(int cpu)
+{
+ struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
+
+ /* Mark work */
+ work->msk = 1;
+
+ /* Generate IRQ on the CPU */
+ sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
+}
+
+static void smp4d_ipi_resched(int cpu)
+{
+ struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
+
+ /* Mark work */
+ work->resched = 1;
+
+ /* Generate IRQ on the CPU (any IRQ will cause resched) */
+ sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
+}
+
static struct smp_funcall {
smpfunc_t func;
unsigned long arg1;
@@ -255,14 +283,17 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
- if(smp_processors_ready) {
+ if (smp_processors_ready) {
register int high = smp_highest_cpu;
unsigned long flags;
spin_lock_irqsave(&cross_call_lock, flags);
{
- /* If you make changes here, make sure gcc generates proper code... */
+ /*
+ * If you make changes here, make sure
+ * gcc generates proper code...
+ */
register smpfunc_t f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
@@ -282,10 +313,10 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
{
register int i;
- cpu_clear(smp_processor_id(), mask);
- cpus_and(mask, cpu_online_map, mask);
- for(i = 0; i <= high; i++) {
- if (cpu_isset(i, mask)) {
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+ cpumask_and(&mask, cpu_online_mask, &mask);
+ for (i = 0; i <= high; i++) {
+ if (cpumask_test_cpu(i, &mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
sun4d_send_ipi(i, IRQ_CROSS_CALL);
@@ -298,19 +329,19 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
i = 0;
do {
- if (!cpu_isset(i, mask))
+ if (!cpumask_test_cpu(i, &mask))
continue;
- while(!ccall_info.processors_in[i])
+ while (!ccall_info.processors_in[i])
barrier();
- } while(++i <= high);
+ } while (++i <= high);
i = 0;
do {
- if (!cpu_isset(i, mask))
+ if (!cpumask_test_cpu(i, &mask))
continue;
- while(!ccall_info.processors_out[i])
+ while (!ccall_info.processors_out[i])
barrier();
- } while(++i <= high);
+ } while (++i <= high);
}
spin_unlock_irqrestore(&cross_call_lock, flags);
@@ -336,7 +367,7 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd };
old_regs = set_irq_regs(regs);
- bw_get_prof_limit(cpu);
+ bw_get_prof_limit(cpu);
bw_clear_intr_mask(0, 1); /* INTR_TABLE[0] & 1 is Profile IRQ */
cpu_tick[cpu]++;
@@ -349,7 +380,7 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
profile_tick(CPU_PROFILING);
- if(!--prof_counter(cpu)) {
+ if (!--prof_counter(cpu)) {
int user = user_mode(regs);
irq_enter();
@@ -361,8 +392,6 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
set_irq_regs(old_regs);
}
-extern unsigned int lvl14_resolution;
-
static void __cpuinit smp_setup_percpu_timer(void)
{
int cpu = hard_smp4d_processor_id();
@@ -374,16 +403,16 @@ static void __cpuinit smp_setup_percpu_timer(void)
void __init smp4d_blackbox_id(unsigned *addr)
{
int rd = *addr & 0x3e000000;
-
+
addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
- addr[1] = 0x01000000; /* nop */
- addr[2] = 0x01000000; /* nop */
+ addr[1] = 0x01000000; /* nop */
+ addr[2] = 0x01000000; /* nop */
}
void __init smp4d_blackbox_current(unsigned *addr)
{
int rd = *addr & 0x3e000000;
-
+
addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
addr[2] = 0x81282002 | rd | (rd >> 11); /* sll reg, 2, reg */
addr[4] = 0x01000000; /* nop */
@@ -392,17 +421,19 @@ void __init smp4d_blackbox_current(unsigned *addr)
void __init sun4d_init_smp(void)
{
int i;
- extern unsigned int t_nmi[], linux_trap_ipi15_sun4d[], linux_trap_ipi15_sun4m[];
/* Patch ipi15 trap table */
t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
-
+
/* And set btfixup... */
BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id);
BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
-
+ BTFIXUPSET_CALL(smp_ipi_resched, smp4d_ipi_resched, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(smp_ipi_single, smp4d_ipi_single, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(smp_ipi_mask_one, smp4d_ipi_mask_one, BTFIXUPCALL_NORM);
+
for (i = 0; i < NR_CPUS; i++) {
ccall_info.processors_in[i] = 1;
ccall_info.processors_out[i] = 1;
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
index 7f3b97ff62c1..422c16dad1f6 100644
--- a/arch/sparc/kernel/sun4m_irq.c
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -1,5 +1,5 @@
-/* sun4m_irq.c
- * arch/sparc/kernel/sun4m_irq.c:
+/*
+ * sun4m irq support
*
* djhr: Hacked out of irq.c into a CPU dependent version.
*
@@ -9,101 +9,44 @@
* Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
*/
-#include <linux/errno.h>
-#include <linux/linkage.h>
-#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/psr.h>
-#include <asm/vaddrs.h>
#include <asm/timer.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
#include <asm/traps.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-#include <asm/smp.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
#include "irq.h"
+#include "kernel.h"
-struct sun4m_irq_percpu {
- u32 pending;
- u32 clear;
- u32 set;
-};
-
-struct sun4m_irq_global {
- u32 pending;
- u32 mask;
- u32 mask_clear;
- u32 mask_set;
- u32 interrupt_target;
-};
-
-/* Code in entry.S needs to get at these register mappings. */
-struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
-struct sun4m_irq_global __iomem *sun4m_irq_global;
-
-/* Dave Redman (djhr@tadpole.co.uk)
- * The sun4m interrupt registers.
- */
-#define SUN4M_INT_ENABLE 0x80000000
-#define SUN4M_INT_E14 0x00000080
-#define SUN4M_INT_E10 0x00080000
-
-#define SUN4M_HARD_INT(x) (0x000000001 << (x))
-#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
-
-#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
-#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
-#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */
-#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */
-#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */
-#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
-#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
-#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
-#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
-#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
-#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
-#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
-#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
-#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
-#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
-#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */
-
-#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \
- SUN4M_INT_M2S_WRITE_ERR | \
- SUN4M_INT_ECC_ERR | \
- SUN4M_INT_VME_ERR)
-
-#define SUN4M_INT_SBUS(x) (1 << (x+7))
-#define SUN4M_INT_VME(x) (1 << (x))
-
-/* Interrupt levels used by OBP */
-#define OBP_INT_LEVEL_SOFT 0x10
-#define OBP_INT_LEVEL_ONBOARD 0x20
-#define OBP_INT_LEVEL_SBUS 0x30
-#define OBP_INT_LEVEL_VME 0x40
-
-/* Interrupt level assignment on sun4m:
+/* Sample sun4m IRQ layout:
+ *
+ * 0x22 - Power
+ * 0x24 - ESP SCSI
+ * 0x26 - Lance ethernet
+ * 0x2b - Floppy
+ * 0x2c - Zilog uart
+ * 0x32 - SBUS level 0
+ * 0x33 - Parallel port, SBUS level 1
+ * 0x35 - SBUS level 2
+ * 0x37 - SBUS level 3
+ * 0x39 - Audio, Graphics card, SBUS level 4
+ * 0x3b - SBUS level 5
+ * 0x3d - SBUS level 6
+ *
+ * Each interrupt source has a mask bit in the interrupt registers.
+ * When the mask bit is set, this blocks interrupt deliver. So you
+ * clear the bit to enable the interrupt.
+ *
+ * Interrupts numbered less than 0x10 are software triggered interrupts
+ * and unused by Linux.
+ *
+ * Interrupt level assignment on sun4m:
*
* level source
* ------------------------------------------------------------
- * 1 softint-1
+ * 1 softint-1
* 2 softint-2, VME/SBUS level 1
* 3 softint-3, VME/SBUS level 2
* 4 softint-4, onboard SCSI
@@ -138,10 +81,10 @@ struct sun4m_irq_global __iomem *sun4m_irq_global;
* 'intr' property IRQ priority values from ss4, ss5, ss10, ss20, and
* Tadpole S3 GX systems.
*
- * esp: 0x24 onboard ESP SCSI
- * le: 0x26 onboard Lance ETHERNET
+ * esp: 0x24 onboard ESP SCSI
+ * le: 0x26 onboard Lance ETHERNET
* p9100: 0x32 SBUS level 1 P9100 video
- * bpp: 0x33 SBUS level 2 BPP parallel port device
+ * bpp: 0x33 SBUS level 2 BPP parallel port device
* DBRI: 0x39 SBUS level 5 DBRI ISDN audio
* SUNW,leo: 0x39 SBUS level 5 LEO video
* pcmcia: 0x3b SBUS level 6 PCMCIA controller
@@ -152,8 +95,62 @@ struct sun4m_irq_global __iomem *sun4m_irq_global;
* power: 0x22 onboard power device (XXX unknown mask bit XXX)
*/
-static unsigned long irq_mask[0x50] = {
- /* SMP */
+
+/* Code in entry.S needs to get at these register mappings. */
+struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
+struct sun4m_irq_global __iomem *sun4m_irq_global;
+
+struct sun4m_handler_data {
+ bool percpu;
+ long mask;
+};
+
+/* Dave Redman (djhr@tadpole.co.uk)
+ * The sun4m interrupt registers.
+ */
+#define SUN4M_INT_ENABLE 0x80000000
+#define SUN4M_INT_E14 0x00000080
+#define SUN4M_INT_E10 0x00080000
+
+#define SUN4M_HARD_INT(x) (0x000000001 << (x))
+#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
+
+#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
+#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
+#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */
+#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */
+#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */
+#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
+#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
+#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
+#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
+#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
+#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
+#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
+#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
+#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
+#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
+#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */
+
+#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \
+ SUN4M_INT_M2S_WRITE_ERR | \
+ SUN4M_INT_ECC_ERR | \
+ SUN4M_INT_VME_ERR)
+
+#define SUN4M_INT_SBUS(x) (1 << (x+7))
+#define SUN4M_INT_VME(x) (1 << (x))
+
+/* Interrupt levels used by OBP */
+#define OBP_INT_LEVEL_SOFT 0x10
+#define OBP_INT_LEVEL_ONBOARD 0x20
+#define OBP_INT_LEVEL_SBUS 0x30
+#define OBP_INT_LEVEL_VME 0x40
+
+#define SUN4M_TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10)
+#define SUN4M_PROFILE_IRQ (OBP_INT_LEVEL_ONBOARD | 14)
+
+static unsigned long sun4m_imask[0x50] = {
+ /* 0x00 - SMP */
0, SUN4M_SOFT_INT(1),
SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3),
SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5),
@@ -162,7 +159,7 @@ static unsigned long irq_mask[0x50] = {
SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11),
SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13),
SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15),
- /* soft */
+ /* 0x10 - soft */
0, SUN4M_SOFT_INT(1),
SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3),
SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5),
@@ -171,122 +168,129 @@ static unsigned long irq_mask[0x50] = {
SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11),
SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13),
SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15),
- /* onboard */
+ /* 0x20 - onboard */
0, 0, 0, 0,
SUN4M_INT_SCSI, 0, SUN4M_INT_ETHERNET, 0,
SUN4M_INT_VIDEO, SUN4M_INT_MODULE,
SUN4M_INT_REALTIME, SUN4M_INT_FLOPPY,
(SUN4M_INT_SERIAL | SUN4M_INT_KBDMS),
- SUN4M_INT_AUDIO, 0, SUN4M_INT_MODULE_ERR,
- /* sbus */
+ SUN4M_INT_AUDIO, SUN4M_INT_E14, SUN4M_INT_MODULE_ERR,
+ /* 0x30 - sbus */
0, 0, SUN4M_INT_SBUS(0), SUN4M_INT_SBUS(1),
0, SUN4M_INT_SBUS(2), 0, SUN4M_INT_SBUS(3),
0, SUN4M_INT_SBUS(4), 0, SUN4M_INT_SBUS(5),
0, SUN4M_INT_SBUS(6), 0, 0,
- /* vme */
+ /* 0x40 - vme */
0, 0, SUN4M_INT_VME(0), SUN4M_INT_VME(1),
0, SUN4M_INT_VME(2), 0, SUN4M_INT_VME(3),
0, SUN4M_INT_VME(4), 0, SUN4M_INT_VME(5),
0, SUN4M_INT_VME(6), 0, 0
};
-static unsigned long sun4m_get_irqmask(unsigned int irq)
-{
- unsigned long mask;
-
- if (irq < 0x50)
- mask = irq_mask[irq];
- else
- mask = 0;
-
- if (!mask)
- printk(KERN_ERR "sun4m_get_irqmask: IRQ%d has no valid mask!\n",
- irq);
-
- return mask;
-}
-
-static void sun4m_disable_irq(unsigned int irq_nr)
+static void sun4m_mask_irq(struct irq_data *data)
{
- unsigned long mask, flags;
+ struct sun4m_handler_data *handler_data = data->handler_data;
int cpu = smp_processor_id();
- mask = sun4m_get_irqmask(irq_nr);
- local_irq_save(flags);
- if (irq_nr > 15)
- sbus_writel(mask, &sun4m_irq_global->mask_set);
- else
- sbus_writel(mask, &sun4m_irq_percpu[cpu]->set);
- local_irq_restore(flags);
+ if (handler_data->mask) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (handler_data->percpu) {
+ sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->set);
+ } else {
+ sbus_writel(handler_data->mask, &sun4m_irq_global->mask_set);
+ }
+ local_irq_restore(flags);
+ }
}
-static void sun4m_enable_irq(unsigned int irq_nr)
+static void sun4m_unmask_irq(struct irq_data *data)
{
- unsigned long mask, flags;
+ struct sun4m_handler_data *handler_data = data->handler_data;
int cpu = smp_processor_id();
- /* Dreadful floppy hack. When we use 0x2b instead of
- * 0x0b the system blows (it starts to whistle!).
- * So we continue to use 0x0b. Fixme ASAP. --P3
- */
- if (irq_nr != 0x0b) {
- mask = sun4m_get_irqmask(irq_nr);
- local_irq_save(flags);
- if (irq_nr > 15)
- sbus_writel(mask, &sun4m_irq_global->mask_clear);
- else
- sbus_writel(mask, &sun4m_irq_percpu[cpu]->clear);
- local_irq_restore(flags);
- } else {
+ if (handler_data->mask) {
+ unsigned long flags;
+
local_irq_save(flags);
- sbus_writel(SUN4M_INT_FLOPPY, &sun4m_irq_global->mask_clear);
+ if (handler_data->percpu) {
+ sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->clear);
+ } else {
+ sbus_writel(handler_data->mask, &sun4m_irq_global->mask_clear);
+ }
local_irq_restore(flags);
}
}
-static unsigned long cpu_pil_to_imask[16] = {
-/*0*/ 0x00000000,
-/*1*/ 0x00000000,
-/*2*/ SUN4M_INT_SBUS(0) | SUN4M_INT_VME(0),
-/*3*/ SUN4M_INT_SBUS(1) | SUN4M_INT_VME(1),
-/*4*/ SUN4M_INT_SCSI,
-/*5*/ SUN4M_INT_SBUS(2) | SUN4M_INT_VME(2),
-/*6*/ SUN4M_INT_ETHERNET,
-/*7*/ SUN4M_INT_SBUS(3) | SUN4M_INT_VME(3),
-/*8*/ SUN4M_INT_VIDEO,
-/*9*/ SUN4M_INT_SBUS(4) | SUN4M_INT_VME(4) | SUN4M_INT_MODULE_ERR,
-/*10*/ SUN4M_INT_REALTIME,
-/*11*/ SUN4M_INT_SBUS(5) | SUN4M_INT_VME(5) | SUN4M_INT_FLOPPY,
-/*12*/ SUN4M_INT_SERIAL | SUN4M_INT_KBDMS,
-/*13*/ SUN4M_INT_SBUS(6) | SUN4M_INT_VME(6) | SUN4M_INT_AUDIO,
-/*14*/ SUN4M_INT_E14,
-/*15*/ SUN4M_INT_ERROR
-};
+static unsigned int sun4m_startup_irq(struct irq_data *data)
+{
+ irq_link(data->irq);
+ sun4m_unmask_irq(data);
+ return 0;
+}
-/* We assume the caller has disabled local interrupts when these are called,
- * or else very bizarre behavior will result.
- */
-static void sun4m_disable_pil_irq(unsigned int pil)
+static void sun4m_shutdown_irq(struct irq_data *data)
{
- sbus_writel(cpu_pil_to_imask[pil], &sun4m_irq_global->mask_set);
+ sun4m_mask_irq(data);
+ irq_unlink(data->irq);
}
-static void sun4m_enable_pil_irq(unsigned int pil)
+static struct irq_chip sun4m_irq = {
+ .name = "sun4m",
+ .irq_startup = sun4m_startup_irq,
+ .irq_shutdown = sun4m_shutdown_irq,
+ .irq_mask = sun4m_mask_irq,
+ .irq_unmask = sun4m_unmask_irq,
+};
+
+
+static unsigned int sun4m_build_device_irq(struct platform_device *op,
+ unsigned int real_irq)
{
- sbus_writel(cpu_pil_to_imask[pil], &sun4m_irq_global->mask_clear);
+ struct sun4m_handler_data *handler_data;
+ unsigned int irq;
+ unsigned int pil;
+
+ if (real_irq >= OBP_INT_LEVEL_VME) {
+ prom_printf("Bogus sun4m IRQ %u\n", real_irq);
+ prom_halt();
+ }
+ pil = (real_irq & 0xf);
+ irq = irq_alloc(real_irq, pil);
+
+ if (irq == 0)
+ goto out;
+
+ handler_data = irq_get_handler_data(irq);
+ if (unlikely(handler_data))
+ goto out;
+
+ handler_data = kzalloc(sizeof(struct sun4m_handler_data), GFP_ATOMIC);
+ if (unlikely(!handler_data)) {
+ prom_printf("IRQ: kzalloc(sun4m_handler_data) failed.\n");
+ prom_halt();
+ }
+
+ handler_data->mask = sun4m_imask[real_irq];
+ handler_data->percpu = real_irq < OBP_INT_LEVEL_ONBOARD;
+ irq_set_chip_and_handler_name(irq, &sun4m_irq,
+ handle_level_irq, "level");
+ irq_set_handler_data(irq, handler_data);
+
+out:
+ return irq;
}
#ifdef CONFIG_SMP
static void sun4m_send_ipi(int cpu, int level)
{
- unsigned long mask = sun4m_get_irqmask(level);
- sbus_writel(mask, &sun4m_irq_percpu[cpu]->set);
+ sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set);
}
static void sun4m_clear_ipi(int cpu, int level)
{
- unsigned long mask = sun4m_get_irqmask(level);
- sbus_writel(mask, &sun4m_irq_percpu[cpu]->clear);
+ sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->clear);
}
static void sun4m_set_udt(int cpu)
@@ -314,7 +318,6 @@ struct sun4m_timer_global {
static struct sun4m_timer_global __iomem *timers_global;
-#define TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10)
unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10);
@@ -350,7 +353,15 @@ void sun4m_nmi(struct pt_regs *regs)
prom_halt();
}
-/* Exported for sun4m_smp.c */
+void sun4m_unmask_profile_irq(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sbus_writel(sun4m_imask[SUN4M_PROFILE_IRQ], &sun4m_irq_global->mask_clear);
+ local_irq_restore(flags);
+}
+
void sun4m_clear_profile_irq(int cpu)
{
sbus_readl(&timers_percpu[cpu]->l14_limit);
@@ -365,6 +376,7 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
{
struct device_node *dp = of_find_node_by_name(NULL, "counter");
int i, err, len, num_cpu_timers;
+ unsigned int irq;
const u32 *addr;
if (!dp) {
@@ -391,8 +403,9 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
master_l10_counter = &timers_global->l10_count;
- err = request_irq(TIMER_IRQ, counter_fn,
- (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL);
+ irq = sun4m_build_device_irq(NULL, SUN4M_TIMER_IRQ);
+
+ err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
if (err) {
printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n",
err);
@@ -407,7 +420,6 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
#ifdef CONFIG_SMP
{
unsigned long flags;
- extern unsigned long lvl14_save[4];
struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
/* For SMP we use the level 14 ticker, however the bootup code
@@ -460,13 +472,12 @@ void __init sun4m_init_IRQ(void)
if (num_cpu_iregs == 4)
sbus_writel(0, &sun4m_irq_global->interrupt_target);
- BTFIXUPSET_CALL(enable_irq, sun4m_enable_irq, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(disable_irq, sun4m_disable_irq, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(enable_pil_irq, sun4m_enable_pil_irq, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(disable_pil_irq, sun4m_disable_pil_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM);
- sparc_init_timers = sun4m_init_timers;
+
+ sparc_irq_config.init_timers = sun4m_init_timers;
+ sparc_irq_config.build_device_irq = sun4m_build_device_irq;
+
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM);
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 762d6eedd944..594768686525 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -1,59 +1,25 @@
-/* sun4m_smp.c: Sparc SUN4M SMP support.
+/*
+ * sun4m SMP support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
-#include <asm/head.h>
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
#include <linux/profile.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-#include <asm/irq_regs.h>
-
-#include <asm/ptrace.h>
-#include <asm/atomic.h>
-
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/oplib.h>
-#include <asm/cpudata.h>
#include "irq.h"
+#include "kernel.h"
+#define IRQ_IPI_SINGLE 12
+#define IRQ_IPI_MASK 13
+#define IRQ_IPI_RESCHED 14
#define IRQ_CROSS_CALL 15
-extern ctxd_t *srmmu_ctx_table_phys;
-
-extern volatile unsigned long cpu_callin_map[NR_CPUS];
-extern unsigned char boot_cpu_id;
-
-extern cpumask_t smp_commenced_mask;
-
-extern int __smp4m_processor_id(void);
-
-/*#define SMP_DEBUG*/
-
-#ifdef SMP_DEBUG
-#define SMP_PRINTK(x) printk x
-#else
-#define SMP_PRINTK(x)
-#endif
-
static inline unsigned long
swap_ulong(volatile unsigned long *ptr, unsigned long val)
{
@@ -63,8 +29,8 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val)
return val;
}
+static void smp4m_ipi_init(void);
static void smp_setup_percpu_timer(void);
-extern void cpu_probe(void);
void __cpuinit smp4m_callin(void)
{
@@ -96,8 +62,6 @@ void __cpuinit smp4m_callin(void)
/* XXX: What's up with all the flushes? */
local_flush_cache_all();
local_flush_tlb_all();
-
- cpu_probe();
/* Fix idle thread fields. */
__asm__ __volatile__("ld [%0], %%g6\n\t"
@@ -108,7 +72,7 @@ void __cpuinit smp4m_callin(void)
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
- while (!cpu_isset(cpuid, smp_commenced_mask))
+ while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
mb();
local_irq_enable();
@@ -119,18 +83,15 @@ void __cpuinit smp4m_callin(void)
/*
* Cycle through the processors asking the PROM to start each one.
*/
-
-extern struct linux_prom_registers smp_penguin_ctable;
-
void __init smp4m_boot_cpus(void)
{
+ smp4m_ipi_init();
smp_setup_percpu_timer();
local_flush_cache_all();
}
int __cpuinit smp4m_boot_one_cpu(int i)
{
- extern unsigned long sun4m_cpu_startup;
unsigned long *entry = &sun4m_cpu_startup;
struct task_struct *p;
int timeout;
@@ -142,7 +103,7 @@ int __cpuinit smp4m_boot_one_cpu(int i)
p = fork_idle(i);
current_set[i] = task_thread_info(p);
/* See trampoline.S for details... */
- entry += ((i-1) * 3);
+ entry += ((i - 1) * 3);
/*
* Initialize the contexts table
@@ -154,20 +115,19 @@ int __cpuinit smp4m_boot_one_cpu(int i)
smp_penguin_ctable.reg_size = 0;
/* whirrr, whirrr, whirrrrrrrrr... */
- printk("Starting CPU %d at %p\n", i, entry);
+ printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
local_flush_cache_all();
- prom_startcpu(cpu_node,
- &smp_penguin_ctable, 0, (char *)entry);
+ prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
/* wheee... it's going... */
- for(timeout = 0; timeout < 10000; timeout++) {
- if(cpu_callin_map[i])
+ for (timeout = 0; timeout < 10000; timeout++) {
+ if (cpu_callin_map[i])
break;
udelay(200);
}
if (!(cpu_callin_map[i])) {
- printk("Processor %d is stuck.\n", i);
+ printk(KERN_ERR "Processor %d is stuck.\n", i);
return -ENODEV;
}
@@ -193,17 +153,25 @@ void __init smp4m_smp_done(void)
/* Ok, they are spinning and ready to go. */
}
-/* At each hardware IRQ, we get this called to forward IRQ reception
- * to the next processor. The caller must disable the IRQ level being
- * serviced globally so that there are no double interrupts received.
- *
- * XXX See sparc64 irq.c.
- */
-void smp4m_irq_rotate(int cpu)
+
+/* Initialize IPIs on the SUN4M SMP machine */
+static void __init smp4m_ipi_init(void)
{
- int next = cpu_data(cpu).next;
- if (next != cpu)
- set_irq_udt(next);
+}
+
+static void smp4m_ipi_resched(int cpu)
+{
+ set_cpu_int(cpu, IRQ_IPI_RESCHED);
+}
+
+static void smp4m_ipi_single(int cpu)
+{
+ set_cpu_int(cpu, IRQ_IPI_SINGLE);
+}
+
+static void smp4m_ipi_mask_one(int cpu)
+{
+ set_cpu_int(cpu, IRQ_IPI_MASK);
}
static struct smp_funcall {
@@ -241,10 +209,10 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
{
register int i;
- cpu_clear(smp_processor_id(), mask);
- cpus_and(mask, cpu_online_map, mask);
- for(i = 0; i < ncpus; i++) {
- if (cpu_isset(i, mask)) {
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+ cpumask_and(&mask, cpu_online_mask, &mask);
+ for (i = 0; i < ncpus; i++) {
+ if (cpumask_test_cpu(i, &mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
set_cpu_int(i, IRQ_CROSS_CALL);
@@ -260,21 +228,20 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
i = 0;
do {
- if (!cpu_isset(i, mask))
+ if (!cpumask_test_cpu(i, &mask))
continue;
- while(!ccall_info.processors_in[i])
+ while (!ccall_info.processors_in[i])
barrier();
- } while(++i < ncpus);
+ } while (++i < ncpus);
i = 0;
do {
- if (!cpu_isset(i, mask))
+ if (!cpumask_test_cpu(i, &mask))
continue;
- while(!ccall_info.processors_out[i])
+ while (!ccall_info.processors_out[i])
barrier();
- } while(++i < ncpus);
+ } while (++i < ncpus);
}
-
spin_unlock_irqrestore(&cross_call_lock, flags);
}
@@ -289,8 +256,6 @@ void smp4m_cross_call_irq(void)
ccall_info.processors_out[i] = 1;
}
-extern void sun4m_clear_profile_irq(int cpu);
-
void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs;
@@ -302,7 +267,7 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
profile_tick(CPU_PROFILING);
- if(!--prof_counter(cpu)) {
+ if (!--prof_counter(cpu)) {
int user = user_mode(regs);
irq_enter();
@@ -314,8 +279,6 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
set_irq_regs(old_regs);
}
-extern unsigned int lvl14_resolution;
-
static void __cpuinit smp_setup_percpu_timer(void)
{
int cpu = smp_processor_id();
@@ -323,17 +286,17 @@ static void __cpuinit smp_setup_percpu_timer(void)
prof_counter(cpu) = prof_multiplier(cpu) = 1;
load_profile_irq(cpu, lvl14_resolution);
- if(cpu == boot_cpu_id)
- enable_pil_irq(14);
+ if (cpu == boot_cpu_id)
+ sun4m_unmask_profile_irq();
}
static void __init smp4m_blackbox_id(unsigned *addr)
{
int rd = *addr & 0x3e000000;
int rs1 = rd >> 11;
-
+
addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
- addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
+ addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */
}
@@ -341,9 +304,9 @@ static void __init smp4m_blackbox_current(unsigned *addr)
{
int rd = *addr & 0x3e000000;
int rs1 = rd >> 11;
-
+
addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
- addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
+ addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
addr[4] = 0x8008200c | rd | rs1; /* and reg, 0xc, reg */
}
@@ -353,4 +316,7 @@ void __init sun4m_init_smp(void)
BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(smp_ipi_resched, smp4m_ipi_resched, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(smp_ipi_single, smp4m_ipi_single, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(smp_ipi_mask_one, smp4m_ipi_mask_one, BTFIXUPCALL_NORM);
}
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index f836f4e93afe..96082d30def0 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -360,20 +360,25 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
}
EXPORT_SYMBOL(get_fb_unmapped_area);
-/* Essentially the same as PowerPC... */
-void arch_pick_mmap_layout(struct mm_struct *mm)
+/* Essentially the same as PowerPC. */
+static unsigned long mmap_rnd(void)
{
- unsigned long random_factor = 0UL;
- unsigned long gap;
+ unsigned long rnd = 0UL;
if (current->flags & PF_RANDOMIZE) {
- random_factor = get_random_int();
+ unsigned long val = get_random_int();
if (test_thread_flag(TIF_32BIT))
- random_factor &= ((1 * 1024 * 1024) - 1);
+ rnd = (val % (1UL << (22UL-PAGE_SHIFT)));
else
- random_factor = ((random_factor << PAGE_SHIFT) &
- 0xffffffffUL);
+ rnd = (val % (1UL << (29UL-PAGE_SHIFT)));
}
+ return (rnd << PAGE_SHIFT) * 2;
+}
+
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+ unsigned long random_factor = mmap_rnd();
+ unsigned long gap;
/*
* Fall back to the standard layout if the personality
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index 1eb8b00aed75..7408201d7efb 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -103,9 +103,10 @@ static unsigned long run_on_cpu(unsigned long cpu,
unsigned long (*func)(unsigned long),
unsigned long arg)
{
- cpumask_t old_affinity = current->cpus_allowed;
+ cpumask_t old_affinity;
unsigned long ret;
+ cpumask_copy(&old_affinity, tsk_cpus_allowed(current));
/* should return -EINVAL to userspace */
if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
return 0;
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index ec396e1916b9..6e492d59f6b1 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -83,5 +83,5 @@ sys_call_table:
/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
-/*330*/ .long sys_fanotify_mark, sys_prlimit64
-
+/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
+/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 8cfcaa549580..f566518483b5 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -84,7 +84,8 @@ sys_call_table32:
.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
-/*330*/ .word sys32_fanotify_mark, sys_prlimit64
+/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
+ .word sys_syncfs, compat_sys_sendmmsg, sys_setns
#endif /* CONFIG_COMPAT */
@@ -160,4 +161,5 @@ sys_call_table:
.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
-/*330*/ .word sys_fanotify_mark, sys_prlimit64
+/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
+ .word sys_syncfs, sys_sendmmsg, sys_setns
diff --git a/arch/sparc/kernel/tick14.c b/arch/sparc/kernel/tick14.c
deleted file mode 100644
index 138bbf5f8724..000000000000
--- a/arch/sparc/kernel/tick14.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/* tick14.c
- *
- * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
- *
- * This file handles the Sparc specific level14 ticker
- * This is really useful for profiling OBP uses it for keyboard
- * aborts and other stuff.
- */
-#include <linux/kernel.h>
-
-extern unsigned long lvl14_save[5];
-static unsigned long *linux_lvl14 = NULL;
-static unsigned long obp_lvl14[4];
-
-/*
- * Call with timer IRQ closed.
- * First time we do it with disable_irq, later prom code uses spin_lock_irq().
- */
-void install_linux_ticker(void)
-{
-
- if (!linux_lvl14)
- return;
- linux_lvl14[0] = lvl14_save[0];
- linux_lvl14[1] = lvl14_save[1];
- linux_lvl14[2] = lvl14_save[2];
- linux_lvl14[3] = lvl14_save[3];
-}
-
-void install_obp_ticker(void)
-{
-
- if (!linux_lvl14)
- return;
- linux_lvl14[0] = obp_lvl14[0];
- linux_lvl14[1] = obp_lvl14[1];
- linux_lvl14[2] = obp_lvl14[2];
- linux_lvl14[3] = obp_lvl14[3];
-}
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 9c743b1886ff..1060e0672a4b 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -85,7 +85,7 @@ int update_persistent_clock(struct timespec now)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
*/
#define TICK_SIZE (tick_nsec / 1000)
@@ -96,14 +96,9 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id)
profile_tick(CPU_PROFILING);
#endif
- /* Protect counter clear so that do_gettimeoffset works */
- write_seqlock(&xtime_lock);
-
clear_clock_irq();
- do_timer(1);
-
- write_sequnlock(&xtime_lock);
+ xtime_update(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
@@ -142,7 +137,7 @@ static struct platform_device m48t59_rtc = {
},
};
-static int __devinit clock_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit clock_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
const char *model = of_get_property(dp, "model", NULL);
@@ -150,6 +145,10 @@ static int __devinit clock_probe(struct platform_device *op, const struct of_dev
if (!model)
return -ENODEV;
+ /* Only the primary RTC has an address property */
+ if (!of_find_property(dp, "address", NULL))
+ return -ENODEV;
+
m48t59_rtc.resource = &op->resource[0];
if (!strcmp(model, "mk48t02")) {
/* Map the clock register io area read-only */
@@ -169,14 +168,14 @@ static int __devinit clock_probe(struct platform_device *op, const struct of_dev
return 0;
}
-static struct of_device_id __initdata clock_match[] = {
+static struct of_device_id clock_match[] = {
{
.name = "eeprom",
},
{},
};
-static struct of_platform_driver clock_driver = {
+static struct platform_driver clock_driver = {
.probe = clock_probe,
.driver = {
.name = "rtc",
@@ -189,7 +188,7 @@ static struct of_platform_driver clock_driver = {
/* Probe for the mostek real time clock chip. */
static int __init clock_init(void)
{
- return of_register_platform_driver(&clock_driver);
+ return platform_driver_register(&clock_driver);
}
/* Must be after subsys_initcall() so that busses are probed. Must
* be before device_initcall() because things like the RTC driver
@@ -224,19 +223,15 @@ static void __init sbus_time_init(void)
btfixup();
- sparc_init_timers(timer_interrupt);
+ sparc_irq_config.init_timers(timer_interrupt);
}
void __init time_init(void)
{
-#ifdef CONFIG_PCI
- extern void pci_time_init(void);
- if (pcic_present()) {
+ if (pcic_present())
pci_time_init();
- return;
- }
-#endif
- sbus_time_init();
+ else
+ sbus_time_init();
}
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 3bc9c9979b92..2b8d54b2d850 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -419,7 +419,7 @@ static struct platform_device rtc_cmos_device = {
.num_resources = 1,
};
-static int __devinit rtc_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit rtc_probe(struct platform_device *op)
{
struct resource *r;
@@ -442,7 +442,7 @@ static int __devinit rtc_probe(struct platform_device *op, const struct of_devic
return platform_device_register(&rtc_cmos_device);
}
-static struct of_device_id __initdata rtc_match[] = {
+static const struct of_device_id rtc_match[] = {
{
.name = "rtc",
.compatible = "m5819",
@@ -462,7 +462,7 @@ static struct of_device_id __initdata rtc_match[] = {
{},
};
-static struct of_platform_driver rtc_driver = {
+static struct platform_driver rtc_driver = {
.probe = rtc_probe,
.driver = {
.name = "rtc",
@@ -477,7 +477,7 @@ static struct platform_device rtc_bq4802_device = {
.num_resources = 1,
};
-static int __devinit bq4802_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit bq4802_probe(struct platform_device *op)
{
printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n",
@@ -487,7 +487,7 @@ static int __devinit bq4802_probe(struct platform_device *op, const struct of_de
return platform_device_register(&rtc_bq4802_device);
}
-static struct of_device_id __initdata bq4802_match[] = {
+static const struct of_device_id bq4802_match[] = {
{
.name = "rtc",
.compatible = "bq4802",
@@ -495,7 +495,7 @@ static struct of_device_id __initdata bq4802_match[] = {
{},
};
-static struct of_platform_driver bq4802_driver = {
+static struct platform_driver bq4802_driver = {
.probe = bq4802_probe,
.driver = {
.name = "bq4802",
@@ -534,7 +534,7 @@ static struct platform_device m48t59_rtc = {
},
};
-static int __devinit mostek_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit mostek_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
@@ -552,14 +552,14 @@ static int __devinit mostek_probe(struct platform_device *op, const struct of_de
return platform_device_register(&m48t59_rtc);
}
-static struct of_device_id __initdata mostek_match[] = {
+static const struct of_device_id mostek_match[] = {
{
.name = "eeprom",
},
{},
};
-static struct of_platform_driver mostek_driver = {
+static struct platform_driver mostek_driver = {
.probe = mostek_probe,
.driver = {
.name = "mostek",
@@ -586,9 +586,9 @@ static int __init clock_init(void)
if (tlb_type == hypervisor)
return platform_device_register(&rtc_sun4v_device);
- (void) of_register_platform_driver(&rtc_driver);
- (void) of_register_platform_driver(&mostek_driver);
- (void) of_register_platform_driver(&bq4802_driver);
+ (void) platform_driver_register(&rtc_driver);
+ (void) platform_driver_register(&mostek_driver);
+ (void) platform_driver_register(&bq4802_driver);
return 0;
}
@@ -816,14 +816,12 @@ void __init time_init(void)
clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
clocksource_tick.name = tick_ops->name;
- clocksource_calc_mult_shift(&clocksource_tick, freq, 4);
clocksource_tick.read = clocksource_tick_read;
+ clocksource_register_hz(&clocksource_tick, freq);
printk("clocksource: mult[%x] shift[%d]\n",
clocksource_tick.mult, clocksource_tick.shift);
- clocksource_register(&clocksource_tick);
-
sparc64_clockevent.name = tick_ops->name;
clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 1e9770936c3b..1ed547bd850f 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2152,7 +2152,7 @@ static void user_instruction_dump(unsigned int __user *pc)
void show_stack(struct task_struct *tsk, unsigned long *_ksp)
{
- unsigned long fp, thread_base, ksp;
+ unsigned long fp, ksp;
struct thread_info *tp;
int count = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -2173,7 +2173,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
flushw_all();
fp = ksp + STACK_BIAS;
- thread_base = (unsigned long) tp;
printk("Call Trace:\n");
do {
diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
index be183fe41443..1c8d33228b2a 100644
--- a/arch/sparc/kernel/una_asm_64.S
+++ b/arch/sparc/kernel/una_asm_64.S
@@ -127,7 +127,7 @@ do_int_load:
wr %o5, 0x0, %asi
retl
mov 0, %o0
- .size __do_int_load, .-__do_int_load
+ .size do_int_load, .-do_int_load
.section __ex_table,"a"
.word 4b, __retl_efault
diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c
index 8f982b76c712..531d54fc9829 100644
--- a/arch/sparc/kernel/us2e_cpufreq.c
+++ b/arch/sparc/kernel/us2e_cpufreq.c
@@ -237,7 +237,7 @@ static unsigned int us2e_freq_get(unsigned int cpu)
if (!cpu_online(cpu))
return 0;
- cpus_allowed = current->cpus_allowed;
+ cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
clock_tick = sparc64_get_clock_tick(cpu) / 1000;
@@ -258,7 +258,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
if (!cpu_online(cpu))
return;
- cpus_allowed = current->cpus_allowed;
+ cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
index f35d1e794548..9a8ceb700833 100644
--- a/arch/sparc/kernel/us3_cpufreq.c
+++ b/arch/sparc/kernel/us3_cpufreq.c
@@ -85,7 +85,7 @@ static unsigned int us3_freq_get(unsigned int cpu)
if (!cpu_online(cpu))
return 0;
- cpus_allowed = current->cpus_allowed;
+ cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
reg = read_safari_cfg();
@@ -105,7 +105,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
if (!cpu_online(cpu))
return;
- cpus_allowed = current->cpus_allowed;
+ cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
new_freq = sparc64_get_clock_tick(cpu) / 1000;
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 0c1e6783657f..c0220759003e 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -108,7 +108,7 @@ SECTIONS
__sun4v_2insn_patch_end = .;
}
- PERCPU(PAGE_SIZE)
+ PERCPU_SECTION(SMP_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
__init_end = .;