summaryrefslogtreecommitdiff
path: root/arch/tile/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r--arch/tile/kernel/Makefile1
-rw-r--r--arch/tile/kernel/compat.c11
-rw-r--r--arch/tile/kernel/compat_signal.c7
-rw-r--r--arch/tile/kernel/early_printk.c2
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/intvec_32.S24
-rw-r--r--arch/tile/kernel/irq.c4
-rw-r--r--arch/tile/kernel/machine_kexec.c6
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/pci.c621
-rw-r--r--arch/tile/kernel/process.c8
-rw-r--r--arch/tile/kernel/ptrace.c39
-rw-r--r--arch/tile/kernel/reboot.c6
-rw-r--r--arch/tile/kernel/setup.c10
-rw-r--r--arch/tile/kernel/signal.c20
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/smpboot.c1
-rw-r--r--arch/tile/kernel/sys.c1
-rw-r--r--arch/tile/kernel/time.c8
19 files changed, 715 insertions, 64 deletions
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 112b1e248f05..b4c8e8ec45dc 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 77739cdd9462..dbc213adf5e1 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -21,7 +21,6 @@
#include <linux/kdev_t.h>
#include <linux/fs.h>
#include <linux/fcntl.h>
-#include <linux/smp_lock.h>
#include <linux/uaccess.h>
#include <linux/signal.h>
#include <asm/syscalls.h>
@@ -148,11 +147,11 @@ long tile_compat_sys_msgrcv(int msqid,
#define compat_sys_readahead sys32_readahead
#define compat_sys_sync_file_range compat_sys_sync_file_range2
-/* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */
-#define compat_sys_stat64 sys_newstat
-#define compat_sys_lstat64 sys_newlstat
-#define compat_sys_fstat64 sys_newfstat
-#define compat_sys_fstatat64 sys_newfstatat
+/* We leverage the "struct stat64" type for 32-bit time_t/nsec. */
+#define compat_sys_stat64 sys_stat64
+#define compat_sys_lstat64 sys_lstat64
+#define compat_sys_fstat64 sys_fstat64
+#define compat_sys_fstatat64 sys_fstatat64
/* The native sys_ptrace dynamically handles compat binaries. */
#define compat_sys_ptrace sys_ptrace
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index fb64b99959d4..dbb0dfc7bece 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -15,7 +15,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
@@ -291,12 +290,12 @@ long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
return ret;
}
+/* The assembly shim for this function arranges to ignore the return value. */
long compat_sys_rt_sigreturn(struct pt_regs *regs)
{
struct compat_rt_sigframe __user *frame =
(struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
sigset_t set;
- long r0;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
@@ -309,13 +308,13 @@ long compat_sys_rt_sigreturn(struct pt_regs *regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
goto badframe;
- return r0;
+ return 0;
badframe:
force_sig(SIGSEGV, current);
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index 2c54fd43a8a0..493a0e66d916 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -54,7 +54,7 @@ void early_printk(const char *fmt, ...)
void early_panic(const char *fmt, ...)
{
va_list ap;
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
va_start(ap, fmt);
early_printk("Kernel panic - not syncing: ");
early_vprintk(fmt, ap);
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 1e54a7843410..e910530436e6 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -151,12 +151,12 @@ enum direction_protect {
static void enable_firewall_interrupts(void)
{
- raw_local_irq_unmask_now(INT_UDN_FIREWALL);
+ arch_local_irq_unmask_now(INT_UDN_FIREWALL);
}
static void disable_firewall_interrupts(void)
{
- raw_local_irq_mask_now(INT_UDN_FIREWALL);
+ arch_local_irq_mask_now(INT_UDN_FIREWALL);
}
/* Set up hardwall on this cpu based on the passed hardwall_info. */
@@ -768,13 +768,13 @@ static int hardwall_release(struct inode *inode, struct file *file)
}
static const struct file_operations dev_hardwall_fops = {
+ .open = nonseekable_open,
.unlocked_ioctl = hardwall_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = hardwall_compat_ioctl,
#endif
.flush = hardwall_flush,
.release = hardwall_release,
- .llseek = noop_llseek,
};
static struct cdev hardwall_dev;
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index f5821626247f..5eed4a02bf62 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -1342,8 +1342,8 @@ handle_syscall:
lw r20, r20
/* Jump to syscall handler. */
- jalr r20; .Lhandle_syscall_link:
- FEEDBACK_REENTER(handle_syscall)
+ jalr r20
+.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
/*
* Write our r0 onto the stack so it gets restored instead
@@ -1352,6 +1352,9 @@ handle_syscall:
PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
sw r29, r0
+.Lsyscall_sigreturn_skip:
+ FEEDBACK_REENTER(handle_syscall)
+
/* Do syscall trace again, if requested. */
lw r30, r31
andi r30, r30, _TIF_SYSCALL_TRACE
@@ -1536,9 +1539,24 @@ STD_ENTRY_LOCAL(bad_intr)
}; \
STD_ENDPROC(_##x)
+/*
+ * Special-case sigreturn to not write r0 to the stack on return.
+ * This is technically more efficient, but it also avoids difficulties
+ * in the 64-bit OS when handling 32-bit compat code, since we must not
+ * sign-extend r0 for the sigreturn return-value case.
+ */
+#define PTREGS_SYSCALL_SIGRETURN(x, reg) \
+ STD_ENTRY(_##x); \
+ addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
+ { \
+ PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
+ j x \
+ }; \
+ STD_ENDPROC(_##x)
+
PTREGS_SYSCALL(sys_execve, r3)
PTREGS_SYSCALL(sys_sigaltstack, r2)
-PTREGS_SYSCALL(sys_rt_sigreturn, r0)
+PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
/* Save additional callee-saves to pt_regs, put address in r4 and jump. */
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index e63917687e99..128805ef8f2c 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -26,7 +26,7 @@
#define IS_HW_CLEARED 1
/*
- * The set of interrupts we enable for raw_local_irq_enable().
+ * The set of interrupts we enable for arch_local_irq_enable().
* This is initialized to have just a single interrupt that the kernel
* doesn't actually use as a sentinel. During kernel init,
* interrupts are added as the kernel gets prepared to support them.
@@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
/* Enable interrupt delivery. */
unmask_irqs(~0UL);
#if CHIP_HAS_IPI()
- raw_local_irq_unmask(INT_IPI_K);
+ arch_local_irq_unmask(INT_IPI_K);
#endif
}
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index ba7a265d6179..0d8b9e933487 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -182,13 +182,13 @@ static void kexec_find_and_set_command_line(struct kimage *image)
if ((entry & IND_SOURCE)) {
void *va =
- kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0);
+ kmap_atomic_pfn(entry >> PAGE_SHIFT);
r = kexec_bn2cl(va);
if (r) {
command_line = r;
break;
}
- kunmap_atomic(va, KM_USER0);
+ kunmap_atomic(va);
}
}
@@ -198,7 +198,7 @@ static void kexec_find_and_set_command_line(struct kimage *image)
hverr = hv_set_command_line(
(HV_VirtAddr) command_line, strlen(command_line));
- kunmap_atomic(command_line, KM_USER0);
+ kunmap_atomic(command_line);
} else {
pr_info("%s: no command line found; making empty\n",
__func__);
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 997e3933f726..0858ee6b520f 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
panic("hv_register_message_state: error %d", rc);
/* Make sure downcall interrupts will be enabled. */
- raw_local_irq_unmask(INT_INTCTRL_K);
+ arch_local_irq_unmask(INT_INTCTRL_K);
}
void hv_message_intr(struct pt_regs *regs, int intnum)
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
new file mode 100644
index 000000000000..a1ee25be9ad9
--- /dev/null
+++ b/arch/tile/kernel/pci.c
@@ -0,0 +1,621 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bootmem.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/byteorder.h>
+#include <asm/hv_driver.h>
+#include <hv/drv_pcie_rc_intf.h>
+
+
+/*
+ * Initialization flow and process
+ * -------------------------------
+ *
+ * This files containes the routines to search for PCI buses,
+ * enumerate the buses, and configure any attached devices.
+ *
+ * There are two entry points here:
+ * 1) tile_pci_init
+ * This sets up the pci_controller structs, and opens the
+ * FDs to the hypervisor. This is called from setup_arch() early
+ * in the boot process.
+ * 2) pcibios_init
+ * This probes the PCI bus(es) for any attached hardware. It's
+ * called by subsys_initcall. All of the real work is done by the
+ * generic Linux PCI layer.
+ *
+ */
+
+/*
+ * This flag tells if the platform is TILEmpower that needs
+ * special configuration for the PLX switch chip.
+ */
+int __write_once tile_plx_gen1;
+
+static struct pci_controller controllers[TILE_NUM_PCIE];
+static int num_controllers;
+
+static struct pci_ops tile_cfg_ops;
+
+
+/*
+ * We don't need to worry about the alignment of resources.
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ return res->start;
+}
+EXPORT_SYMBOL(pcibios_align_resource);
+
+/*
+ * Open a FD to the hypervisor PCI device.
+ *
+ * controller_id is the controller number, config type is 0 or 1 for
+ * config0 or config1 operations.
+ */
+static int __init tile_pcie_open(int controller_id, int config_type)
+{
+ char filename[32];
+ int fd;
+
+ sprintf(filename, "pcie/%d/config%d", controller_id, config_type);
+
+ fd = hv_dev_open((HV_VirtAddr)filename, 0);
+
+ return fd;
+}
+
+
+/*
+ * Get the IRQ numbers from the HV and set up the handlers for them.
+ */
+static int __init tile_init_irqs(int controller_id,
+ struct pci_controller *controller)
+{
+ char filename[32];
+ int fd;
+ int ret;
+ int x;
+ struct pcie_rc_config rc_config;
+
+ sprintf(filename, "pcie/%d/ctl", controller_id);
+ fd = hv_dev_open((HV_VirtAddr)filename, 0);
+ if (fd < 0) {
+ pr_err("PCI: hv_dev_open(%s) failed\n", filename);
+ return -1;
+ }
+ ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config),
+ sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF);
+ hv_dev_close(fd);
+ if (ret != sizeof(rc_config)) {
+ pr_err("PCI: wanted %zd bytes, got %d\n",
+ sizeof(rc_config), ret);
+ return -1;
+ }
+ /* Record irq_base so that we can map INTx to IRQ # later. */
+ controller->irq_base = rc_config.intr;
+
+ for (x = 0; x < 4; x++)
+ tile_irq_activate(rc_config.intr + x,
+ TILE_IRQ_HW_CLEAR);
+
+ if (rc_config.plx_gen1)
+ controller->plx_gen1 = 1;
+
+ return 0;
+}
+
+/*
+ * First initialization entry point, called from setup_arch().
+ *
+ * Find valid controllers and fill in pci_controller structs for each
+ * of them.
+ *
+ * Returns the number of controllers discovered.
+ */
+int __init tile_pci_init(void)
+{
+ int i;
+
+ pr_info("PCI: Searching for controllers...\n");
+
+ /* Do any configuration we need before using the PCIe */
+
+ for (i = 0; i < TILE_NUM_PCIE; i++) {
+ int hv_cfg_fd0 = -1;
+ int hv_cfg_fd1 = -1;
+ int hv_mem_fd = -1;
+ char name[32];
+ struct pci_controller *controller;
+
+ /*
+ * Open the fd to the HV. If it fails then this
+ * device doesn't exist.
+ */
+ hv_cfg_fd0 = tile_pcie_open(i, 0);
+ if (hv_cfg_fd0 < 0)
+ continue;
+ hv_cfg_fd1 = tile_pcie_open(i, 1);
+ if (hv_cfg_fd1 < 0) {
+ pr_err("PCI: Couldn't open config fd to HV "
+ "for controller %d\n", i);
+ goto err_cont;
+ }
+
+ sprintf(name, "pcie/%d/mem", i);
+ hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
+ if (hv_mem_fd < 0) {
+ pr_err("PCI: Could not open mem fd to HV!\n");
+ goto err_cont;
+ }
+
+ pr_info("PCI: Found PCI controller #%d\n", i);
+
+ controller = &controllers[num_controllers];
+
+ if (tile_init_irqs(i, controller)) {
+ pr_err("PCI: Could not initialize "
+ "IRQs, aborting.\n");
+ goto err_cont;
+ }
+
+ controller->index = num_controllers;
+ controller->hv_cfg_fd[0] = hv_cfg_fd0;
+ controller->hv_cfg_fd[1] = hv_cfg_fd1;
+ controller->hv_mem_fd = hv_mem_fd;
+ controller->first_busno = 0;
+ controller->last_busno = 0xff;
+ controller->ops = &tile_cfg_ops;
+
+ num_controllers++;
+ continue;
+
+err_cont:
+ if (hv_cfg_fd0 >= 0)
+ hv_dev_close(hv_cfg_fd0);
+ if (hv_cfg_fd1 >= 0)
+ hv_dev_close(hv_cfg_fd1);
+ if (hv_mem_fd >= 0)
+ hv_dev_close(hv_mem_fd);
+ continue;
+ }
+
+ /*
+ * Before using the PCIe, see if we need to do any platform-specific
+ * configuration, such as the PLX switch Gen 1 issue on TILEmpower.
+ */
+ for (i = 0; i < num_controllers; i++) {
+ struct pci_controller *controller = &controllers[i];
+
+ if (controller->plx_gen1)
+ tile_plx_gen1 = 1;
+ }
+
+ return num_controllers;
+}
+
+/*
+ * (pin - 1) converts from the PCI standard's [1:4] convention to
+ * a normal [0:3] range.
+ */
+static int tile_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct pci_controller *controller =
+ (struct pci_controller *)dev->sysdata;
+ return (pin - 1) + controller->irq_base;
+}
+
+
+static void __init fixup_read_and_payload_sizes(void)
+{
+ struct pci_dev *dev = NULL;
+ int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
+ int max_read_size = 0x2; /* Limit to 512 byte reads. */
+ u16 new_values;
+
+ /* Scan for the smallest maximum payload size. */
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ int pcie_caps_offset;
+ u32 devcap;
+ int max_payload;
+
+ pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (pcie_caps_offset == 0)
+ continue;
+
+ pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP,
+ &devcap);
+ max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD;
+ if (max_payload < smallest_max_payload)
+ smallest_max_payload = max_payload;
+ }
+
+ /* Now, set the max_payload_size for all devices to that value. */
+ new_values = (max_read_size << 12) | (smallest_max_payload << 5);
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ int pcie_caps_offset;
+ u16 devctl;
+
+ pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (pcie_caps_offset == 0)
+ continue;
+
+ pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
+ &devctl);
+ devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ);
+ devctl |= new_values;
+ pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
+ devctl);
+ }
+}
+
+
+/*
+ * Second PCI initialization entry point, called by subsys_initcall.
+ *
+ * The controllers have been set up by the time we get here, by a call to
+ * tile_pci_init.
+ */
+static int __init pcibios_init(void)
+{
+ int i;
+
+ pr_info("PCI: Probing PCI hardware\n");
+
+ /*
+ * Delay a bit in case devices aren't ready. Some devices are
+ * known to require at least 20ms here, but we use a more
+ * conservative value.
+ */
+ mdelay(250);
+
+ /* Scan all of the recorded PCI controllers. */
+ for (i = 0; i < num_controllers; i++) {
+ struct pci_controller *controller = &controllers[i];
+ struct pci_bus *bus;
+
+ pr_info("PCI: initializing controller #%d\n", i);
+
+ /*
+ * This comes from the generic Linux PCI driver.
+ *
+ * It reads the PCI tree for this bus into the Linux
+ * data structures.
+ *
+ * This is inlined in linux/pci.h and calls into
+ * pci_scan_bus_parented() in probe.c.
+ */
+ bus = pci_scan_bus(0, controller->ops, controller);
+ controller->root_bus = bus;
+ controller->last_busno = bus->subordinate;
+
+ }
+
+ /* Do machine dependent PCI interrupt routing */
+ pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
+
+ /*
+ * This comes from the generic Linux PCI driver.
+ *
+ * It allocates all of the resources (I/O memory, etc)
+ * associated with the devices read in above.
+ */
+
+ pci_assign_unassigned_resources();
+
+ /* Configure the max_read_size and max_payload_size values. */
+ fixup_read_and_payload_sizes();
+
+ /* Record the I/O resources in the PCI controller structure. */
+ for (i = 0; i < num_controllers; i++) {
+ struct pci_bus *root_bus = controllers[i].root_bus;
+ struct pci_bus *next_bus;
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &root_bus->devices, bus_list) {
+ /* Find the PCI host controller, ie. the 1st bridge. */
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
+ (PCI_SLOT(dev->devfn) == 0)) {
+ next_bus = dev->subordinate;
+ controllers[i].mem_resources[0] =
+ *next_bus->resource[0];
+ controllers[i].mem_resources[1] =
+ *next_bus->resource[1];
+ controllers[i].mem_resources[2] =
+ *next_bus->resource[2];
+
+ break;
+ }
+ }
+
+ }
+
+ return 0;
+}
+subsys_initcall(pcibios_init);
+
+/*
+ * No bus fixups needed.
+ */
+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+{
+ /* Nothing needs to be done. */
+}
+
+/*
+ * This can be called from the generic PCI layer, but doesn't need to
+ * do anything.
+ */
+char __devinit *pcibios_setup(char *str)
+{
+ /* Nothing needs to be done. */
+ return str;
+}
+
+/*
+ * This is called from the generic Linux layer.
+ */
+void __init pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+/*
+ * Enable memory and/or address decoding, as appropriate, for the
+ * device described by the 'dev' struct.
+ *
+ * This is called from the generic PCI layer, and can be called
+ * for bridges or endpoints.
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd, old_cmd;
+ u8 header_type;
+ int i;
+ struct resource *r;
+
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /*
+ * For bridges, we enable both memory and I/O decoding
+ * in call cases.
+ */
+ cmd |= PCI_COMMAND_IO;
+ cmd |= PCI_COMMAND_MEMORY;
+ } else {
+ /*
+ * For endpoints, we enable memory and/or I/O decoding
+ * only if they have a memory resource of that type.
+ */
+ for (i = 0; i < 6; i++) {
+ r = &dev->resource[i];
+ if (r->flags & IORESOURCE_UNSET) {
+ pr_err("PCI: Device %s not available "
+ "because of resource collisions\n",
+ pci_name(dev));
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ }
+
+ /*
+ * We only write the command if it changed.
+ */
+ if (cmd != old_cmd)
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ return 0;
+}
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
+{
+ unsigned long start = pci_resource_start(dev, bar);
+ unsigned long len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len)
+ return NULL;
+ if (max && len > max)
+ len = max;
+
+ if (!(flags & IORESOURCE_MEM)) {
+ pr_info("PCI: Trying to map invalid resource %#lx\n", flags);
+ start = 0;
+ }
+
+ return (void __iomem *)start;
+}
+EXPORT_SYMBOL(pci_iomap);
+
+
+/****************************************************************
+ *
+ * Tile PCI config space read/write routines
+ *
+ ****************************************************************/
+
+/*
+ * These are the normal read and write ops
+ * These are expanded with macros from pci_bus_read_config_byte() etc.
+ *
+ * devfn is the combined PCI slot & function.
+ *
+ * offset is in bytes, from the start of config space for the
+ * specified bus & slot.
+ */
+
+static int __devinit tile_cfg_read(struct pci_bus *bus,
+ unsigned int devfn,
+ int offset,
+ int size,
+ u32 *val)
+{
+ struct pci_controller *controller = bus->sysdata;
+ int busnum = bus->number & 0xff;
+ int slot = (devfn >> 3) & 0x1f;
+ int function = devfn & 0x7;
+ u32 addr;
+ int config_mode = 1;
+
+ /*
+ * There is no bridge between the Tile and bus 0, so we
+ * use config0 to talk to bus 0.
+ *
+ * If we're talking to a bus other than zero then we
+ * must have found a bridge.
+ */
+ if (busnum == 0) {
+ /*
+ * We fake an empty slot for (busnum == 0) && (slot > 0),
+ * since there is only one slot on bus 0.
+ */
+ if (slot) {
+ *val = 0xFFFFFFFF;
+ return 0;
+ }
+ config_mode = 0;
+ }
+
+ addr = busnum << 20; /* Bus in 27:20 */
+ addr |= slot << 15; /* Slot (device) in 19:15 */
+ addr |= function << 12; /* Function is in 14:12 */
+ addr |= (offset & 0xFFF); /* byte address in 0:11 */
+
+ return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0,
+ (HV_VirtAddr)(val), size, addr);
+}
+
+
+/*
+ * See tile_cfg_read() for relevent comments.
+ * Note that "val" is the value to write, not a pointer to that value.
+ */
+static int __devinit tile_cfg_write(struct pci_bus *bus,
+ unsigned int devfn,
+ int offset,
+ int size,
+ u32 val)
+{
+ struct pci_controller *controller = bus->sysdata;
+ int busnum = bus->number & 0xff;
+ int slot = (devfn >> 3) & 0x1f;
+ int function = devfn & 0x7;
+ u32 addr;
+ int config_mode = 1;
+ HV_VirtAddr valp = (HV_VirtAddr)&val;
+
+ /*
+ * For bus 0 slot 0 we use config 0 accesses.
+ */
+ if (busnum == 0) {
+ /*
+ * We fake an empty slot for (busnum == 0) && (slot > 0),
+ * since there is only one slot on bus 0.
+ */
+ if (slot)
+ return 0;
+ config_mode = 0;
+ }
+
+ addr = busnum << 20; /* Bus in 27:20 */
+ addr |= slot << 15; /* Slot (device) in 19:15 */
+ addr |= function << 12; /* Function is in 14:12 */
+ addr |= (offset & 0xFFF); /* byte address in 0:11 */
+
+#ifdef __BIG_ENDIAN
+ /* Point to the correct part of the 32-bit "val". */
+ valp += 4 - size;
+#endif
+
+ return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0,
+ valp, size, addr);
+}
+
+
+static struct pci_ops tile_cfg_ops = {
+ .read = tile_cfg_read,
+ .write = tile_cfg_write,
+};
+
+
+/*
+ * In the following, each PCI controller's mem_resources[1]
+ * represents its (non-prefetchable) PCI memory resource.
+ * mem_resources[0] and mem_resources[2] refer to its PCI I/O and
+ * prefetchable PCI memory resources, respectively.
+ * For more details, see pci_setup_bridge() in setup-bus.c.
+ * By comparing the target PCI memory address against the
+ * end address of controller 0, we can determine the controller
+ * that should accept the PCI memory access.
+ */
+#define TILE_READ(size, type) \
+type _tile_read##size(unsigned long addr) \
+{ \
+ type val; \
+ int idx = 0; \
+ if (addr > controllers[0].mem_resources[1].end && \
+ addr > controllers[0].mem_resources[2].end) \
+ idx = 1; \
+ if (hv_dev_pread(controllers[idx].hv_mem_fd, 0, \
+ (HV_VirtAddr)(&val), sizeof(type), addr)) \
+ pr_err("PCI: read %zd bytes at 0x%lX failed\n", \
+ sizeof(type), addr); \
+ return val; \
+} \
+EXPORT_SYMBOL(_tile_read##size)
+
+TILE_READ(b, u8);
+TILE_READ(w, u16);
+TILE_READ(l, u32);
+TILE_READ(q, u64);
+
+#define TILE_WRITE(size, type) \
+void _tile_write##size(type val, unsigned long addr) \
+{ \
+ int idx = 0; \
+ if (addr > controllers[0].mem_resources[1].end && \
+ addr > controllers[0].mem_resources[2].end) \
+ idx = 1; \
+ if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0, \
+ (HV_VirtAddr)(&val), sizeof(type), addr)) \
+ pr_err("PCI: write %zd bytes at 0x%lX failed\n", \
+ sizeof(type), addr); \
+} \
+EXPORT_SYMBOL(_tile_write##size)
+
+TILE_WRITE(b, u8);
+TILE_WRITE(w, u16);
+TILE_WRITE(l, u32);
+TILE_WRITE(q, u64);
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 8430f45daea6..e90eb53173b0 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -212,6 +212,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
childregs->sp = sp; /* override with new user stack pointer */
/*
+ * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
+ * which is passed in as arg #5 to sys_clone().
+ */
+ if (clone_flags & CLONE_SETTLS)
+ childregs->tp = regs->regs[4];
+
+ /*
* Copy the callee-saved registers from the passed pt_regs struct
* into the context-switch callee-saved registers area.
* This way when we start the interrupt-return sequence, the
@@ -539,6 +546,7 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
return __switch_to(prev, next, next_current_ksp0(next));
}
+/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
void __user *, parent_tidptr, void __user *, child_tidptr,
struct pt_regs *, regs)
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 9cd29884c09f..e92e40527d6d 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -50,10 +50,10 @@ long arch_ptrace(struct task_struct *child, long request,
{
unsigned long __user *datap = (long __user __force *)data;
unsigned long tmp;
- int i;
long ret = -EIO;
- unsigned long *childregs;
char *childreg;
+ struct pt_regs copyregs;
+ int ex1_offset;
switch (request) {
@@ -80,6 +80,16 @@ long arch_ptrace(struct task_struct *child, long request,
if (addr >= PTREGS_SIZE)
break;
childreg = (char *)task_pt_regs(child) + addr;
+
+ /* Guard against overwrites of the privilege level. */
+ ex1_offset = PTREGS_OFFSET_EX1;
+#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN)
+ if (is_compat_task()) /* point at low word */
+ ex1_offset += sizeof(compat_long_t);
+#endif
+ if (addr == ex1_offset)
+ data = PL_ICS_EX1(USER_PL, EX1_ICS(data));
+
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
if (addr & (sizeof(compat_long_t)-1))
@@ -96,26 +106,19 @@ long arch_ptrace(struct task_struct *child, long request,
break;
case PTRACE_GETREGS: /* Get all registers from the child. */
- if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
- break;
- childregs = (long *)task_pt_regs(child);
- for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
- ++i) {
- ret = __put_user(childregs[i], &datap[i]);
- if (ret != 0)
- break;
+ if (copy_to_user(datap, task_pt_regs(child),
+ sizeof(struct pt_regs)) == 0) {
+ ret = 0;
}
break;
case PTRACE_SETREGS: /* Set all registers in the child. */
- if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
- break;
- childregs = (long *)task_pt_regs(child);
- for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
- ++i) {
- ret = __get_user(childregs[i], &datap[i]);
- if (ret != 0)
- break;
+ if (copy_from_user(&copyregs, datap,
+ sizeof(struct pt_regs)) == 0) {
+ copyregs.ex1 =
+ PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1));
+ *task_pt_regs(child) = copyregs;
+ ret = 0;
}
break;
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index acd86d20beba..baa3d905fee2 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -27,7 +27,7 @@
void machine_halt(void)
{
warn_early_printk();
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
smp_send_stop();
hv_halt();
}
@@ -35,14 +35,14 @@ void machine_halt(void)
void machine_power_off(void)
{
warn_early_printk();
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
smp_send_stop();
hv_power_off();
}
void machine_restart(char *cmd)
{
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
smp_send_stop();
hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index ae51cad12da0..f18573643ed1 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -840,7 +840,7 @@ static int __init topology_init(void)
for_each_online_node(i)
register_one_node(i);
- for_each_present_cpu(i)
+ for (i = 0; i < smp_height * smp_width; ++i)
register_cpu(&cpu_devices[i], i);
return 0;
@@ -868,14 +868,14 @@ void __cpuinit setup_cpu(int boot)
/* Allow asynchronous TLB interrupts. */
#if CHIP_HAS_TILE_DMA()
- raw_local_irq_unmask(INT_DMATLB_MISS);
- raw_local_irq_unmask(INT_DMATLB_ACCESS);
+ arch_local_irq_unmask(INT_DMATLB_MISS);
+ arch_local_irq_unmask(INT_DMATLB_ACCESS);
#endif
#if CHIP_HAS_SN_PROC()
- raw_local_irq_unmask(INT_SNITLB_MISS);
+ arch_local_irq_unmask(INT_SNITLB_MISS);
#endif
#ifdef __tilegx__
- raw_local_irq_unmask(INT_SINGLE_STEP_K);
+ arch_local_irq_unmask(INT_SINGLE_STEP_K);
#endif
/*
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index fb28e85ae3ae..1260321155f1 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -16,7 +16,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
@@ -53,7 +52,7 @@ SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
*/
int restore_sigcontext(struct pt_regs *regs,
- struct sigcontext __user *sc, long *pr0)
+ struct sigcontext __user *sc)
{
int err = 0;
int i;
@@ -71,19 +70,20 @@ int restore_sigcontext(struct pt_regs *regs,
for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
err |= __get_user(regs->regs[i], &sc->gregs[i]);
+ /* Ensure that the PL is always set to USER_PL. */
+ regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
+
regs->faultnum = INT_SWINT_1_SIGRETURN;
- err |= __get_user(*pr0, &sc->gregs[0]);
return err;
}
-/* sigreturn() returns long since it restores r0 in the interrupted code. */
+/* The assembly shim for this function arranges to ignore the return value. */
SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
{
struct rt_sigframe __user *frame =
(struct rt_sigframe __user *)(regs->sp);
sigset_t set;
- long r0;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
@@ -96,13 +96,13 @@ SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
goto badframe;
- return r0;
+ return 0;
badframe:
force_sig(SIGSEGV, current);
@@ -330,7 +330,7 @@ void do_signal(struct pt_regs *regs)
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
}
- return;
+ goto done;
}
/* Did we come from a system call? */
@@ -358,4 +358,8 @@ void do_signal(struct pt_regs *regs)
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
+
+done:
+ /* Avoid double syscall restart if there are nested signals. */
+ regs->faultnum = INT_SWINT_1_SIGRETURN;
}
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 75255d90aff3..9575b37a8b75 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -115,7 +115,7 @@ static void smp_start_cpu_interrupt(void)
static void smp_stop_cpu_interrupt(void)
{
set_cpu_online(smp_processor_id(), 0);
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
for (;;)
asm("nap");
}
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 74d62d098edf..b949edcec200 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -18,7 +18,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
-#include <linux/smp_lock.h>
#include <linux/bootmem.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index 7e764669a022..e2187d24a9b4 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -20,7 +20,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 6bed820e1421..f2e156e44692 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -132,7 +132,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
{
BUG_ON(ticks > MAX_TICK);
__insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
- raw_local_irq_unmask_now(INT_TILE_TIMER);
+ arch_local_irq_unmask_now(INT_TILE_TIMER);
return 0;
}
@@ -143,7 +143,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
static void tile_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- raw_local_irq_mask_now(INT_TILE_TIMER);
+ arch_local_irq_mask_now(INT_TILE_TIMER);
}
/*
@@ -172,7 +172,7 @@ void __cpuinit setup_tile_timer(void)
evt->cpumask = cpumask_of(smp_processor_id());
/* Start out with timer not firing. */
- raw_local_irq_mask_now(INT_TILE_TIMER);
+ arch_local_irq_mask_now(INT_TILE_TIMER);
/* Register tile timer. */
clockevents_register_device(evt);
@@ -188,7 +188,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
* Mask the timer interrupt here, since we are a oneshot timer
* and there are now by definition no events pending.
*/
- raw_local_irq_mask(INT_TILE_TIMER);
+ arch_local_irq_mask(INT_TILE_TIMER);
/* Track time spent here in an interrupt context */
irq_enter();