summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/kexec.h5
-rw-r--r--arch/arm/include/asm/ucontext.h6
-rw-r--r--arch/arm/include/debug/omap2plus.S1
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/hyp-stub.S1
-rw-r--r--arch/arm/kernel/iwmmxt.S1
-rw-r--r--arch/arm/kernel/machine_kexec.c11
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/signal.c78
-rw-r--r--arch/arm/kernel/sleep.S1
-rw-r--r--arch/arm/mach-exynos/sleep.S1
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S2
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S1
-rw-r--r--arch/arm/mach-pxa/mioa701_bootresume.S2
-rw-r--r--arch/arm/mach-rockchip/sleep.S2
-rw-r--r--arch/arm/mm/cache-v4wb.S1
-rw-r--r--arch/arm/mm/fault.c5
-rw-r--r--arch/arm/mm/proc-v7-3level.S3
-rw-r--r--arch/arm/mm/proc-xscale.S1
20 files changed, 102 insertions, 27 deletions
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index 1869af6bac5c..25021b798a1e 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -19,6 +19,11 @@
#ifndef __ASSEMBLY__
+#define ARCH_HAS_KIMAGE_ARCH
+struct kimage_arch {
+ u32 kernel_r2;
+};
+
/**
* crash_setup_regs() - save registers for the panic kernel
* @newregs: registers are saved here
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index 14749aec94bf..921d8274855c 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -35,6 +35,12 @@ struct ucontext {
* bytes, to prevent unpredictable padding in the signal frame.
*/
+/*
+ * Dummy padding block: if this magic is encountered, the block should
+ * be skipped using the corresponding size field.
+ */
+#define DUMMY_MAGIC 0xb0d9ed01
+
#ifdef CONFIG_CRUNCH
#define CRUNCH_MAGIC 0x5065cf03
#define CRUNCH_STORAGE_SIZE (CRUNCH_SIZE + 8)
diff --git a/arch/arm/include/debug/omap2plus.S b/arch/arm/include/debug/omap2plus.S
index 6d867aef18eb..78e7e58a58f6 100644
--- a/arch/arm/include/debug/omap2plus.S
+++ b/arch/arm/include/debug/omap2plus.S
@@ -59,6 +59,7 @@
#define UART_OFFSET(addr) ((addr) & 0x00ffffff)
.pushsection .data
+ .align 2
omap_uart_phys: .word 0
omap_uart_virt: .word 0
omap_uart_lsr: .word 0
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index c731f0d2b2af..fbc707626b3e 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -721,6 +721,7 @@ do_fpe:
*/
.pushsection .data
+ .align 2
ENTRY(fp_enter)
.word no_fp
.popsection
@@ -1224,6 +1225,7 @@ vector_addrexcptn:
W(b) vector_fiq
.data
+ .align 2
.globl cr_alignment
cr_alignment:
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 04286fd9e09c..6b1148cafffd 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -556,6 +556,7 @@ ENDPROC(__fixup_smp)
.word __smpalt_end
.pushsection .data
+ .align 2
.globl smp_on_up
smp_on_up:
ALT_SMP(.long 1)
@@ -716,6 +717,7 @@ ENTRY(fixup_pv_table)
ENDPROC(fixup_pv_table)
.data
+ .align 2
.globl __pv_phys_pfn_offset
.type __pv_phys_pfn_offset, %object
__pv_phys_pfn_offset:
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index ec7e7377d423..60146e32619a 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -31,6 +31,7 @@
* zeroing of .bss would clobber it.
*/
.data
+ .align 2
ENTRY(__boot_cpu_mode)
.long 0
.text
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index 49fadbda8c63..81cd4d43b3ec 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -367,6 +367,7 @@ ENTRY(iwmmxt_task_release)
ENDPROC(iwmmxt_task_release)
.data
+ .align 2
concan_owner:
.word 0
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 15495887ca14..fe1419eeb932 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -30,7 +30,6 @@ extern unsigned long kexec_boot_atags;
static atomic_t waiting_for_crash_ipi;
-static unsigned long dt_mem;
/*
* Provide a dummy crash_notes definition while crash dump arrives to arm.
* This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
@@ -42,6 +41,9 @@ int machine_kexec_prepare(struct kimage *image)
__be32 header;
int i, err;
+ image->arch.kernel_r2 = image->start - KEXEC_ARM_ZIMAGE_OFFSET
+ + KEXEC_ARM_ATAGS_OFFSET;
+
/*
* Validate that if the current HW supports SMP, then the SW supports
* and implements CPU hotplug for the current HW. If not, we won't be
@@ -66,8 +68,8 @@ int machine_kexec_prepare(struct kimage *image)
if (err)
return err;
- if (be32_to_cpu(header) == OF_DT_HEADER)
- dt_mem = current_segment->mem;
+ if (header == cpu_to_be32(OF_DT_HEADER))
+ image->arch.kernel_r2 = current_segment->mem;
}
return 0;
}
@@ -165,8 +167,7 @@ void machine_kexec(struct kimage *image)
kexec_start_address = image->start;
kexec_indirection_page = page_list;
kexec_mach_type = machine_arch_type;
- kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET
- + KEXEC_ARM_ATAGS_OFFSET;
+ kexec_boot_atags = image->arch.kernel_r2;
/* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer,
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 4e80bf7420d4..8e9a3e40d949 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -987,6 +987,9 @@ static void __init reserve_crashkernel(void)
if (crash_base <= 0) {
unsigned long long crash_max = idmap_to_phys((u32)~0);
+ unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
+ if (crash_max > lowmem_max)
+ crash_max = lowmem_max;
crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
crash_size, CRASH_ALIGN);
if (!crash_base) {
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 7b8f2141427b..5814298ef0b7 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -40,8 +40,10 @@ static int preserve_crunch_context(struct crunch_sigframe __user *frame)
return __copy_to_user(frame, kframe, sizeof(*frame));
}
-static int restore_crunch_context(struct crunch_sigframe __user *frame)
+static int restore_crunch_context(char __user **auxp)
{
+ struct crunch_sigframe __user *frame =
+ (struct crunch_sigframe __user *)*auxp;
char kbuf[sizeof(*frame) + 8];
struct crunch_sigframe *kframe;
@@ -52,6 +54,7 @@ static int restore_crunch_context(struct crunch_sigframe __user *frame)
if (kframe->magic != CRUNCH_MAGIC ||
kframe->size != CRUNCH_STORAGE_SIZE)
return -1;
+ *auxp += CRUNCH_STORAGE_SIZE;
crunch_task_restore(current_thread_info(), &kframe->storage);
return 0;
}
@@ -59,21 +62,39 @@ static int restore_crunch_context(struct crunch_sigframe __user *frame)
#ifdef CONFIG_IWMMXT
-static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
+static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
{
char kbuf[sizeof(*frame) + 8];
struct iwmmxt_sigframe *kframe;
+ int err = 0;
/* the iWMMXt context must be 64 bit aligned */
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
- kframe->magic = IWMMXT_MAGIC;
- kframe->size = IWMMXT_STORAGE_SIZE;
- iwmmxt_task_copy(current_thread_info(), &kframe->storage);
- return __copy_to_user(frame, kframe, sizeof(*frame));
+
+ if (test_thread_flag(TIF_USING_IWMMXT)) {
+ kframe->magic = IWMMXT_MAGIC;
+ kframe->size = IWMMXT_STORAGE_SIZE;
+ iwmmxt_task_copy(current_thread_info(), &kframe->storage);
+
+ err = __copy_to_user(frame, kframe, sizeof(*frame));
+ } else {
+ /*
+ * For bug-compatibility with older kernels, some space
+ * has to be reserved for iWMMXt even if it's not used.
+ * Set the magic and size appropriately so that properly
+ * written userspace can skip it reliably:
+ */
+ __put_user_error(DUMMY_MAGIC, &frame->magic, err);
+ __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
+ }
+
+ return err;
}
-static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
+static int restore_iwmmxt_context(char __user **auxp)
{
+ struct iwmmxt_sigframe __user *frame =
+ (struct iwmmxt_sigframe __user *)*auxp;
char kbuf[sizeof(*frame) + 8];
struct iwmmxt_sigframe *kframe;
@@ -81,10 +102,28 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
if (__copy_from_user(kframe, frame, sizeof(*frame)))
return -1;
- if (kframe->magic != IWMMXT_MAGIC ||
- kframe->size != IWMMXT_STORAGE_SIZE)
+
+ /*
+ * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
+ * block is discarded for compatibility with setup_sigframe() if
+ * present, but we don't mandate its presence. If some other
+ * magic is here, it's not for us:
+ */
+ if (!test_thread_flag(TIF_USING_IWMMXT) &&
+ kframe->magic != DUMMY_MAGIC)
+ return 0;
+
+ if (kframe->size != IWMMXT_STORAGE_SIZE)
return -1;
- iwmmxt_task_restore(current_thread_info(), &kframe->storage);
+
+ if (test_thread_flag(TIF_USING_IWMMXT)) {
+ if (kframe->magic != IWMMXT_MAGIC)
+ return -1;
+
+ iwmmxt_task_restore(current_thread_info(), &kframe->storage);
+ }
+
+ *auxp += IWMMXT_STORAGE_SIZE;
return 0;
}
@@ -107,8 +146,10 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
}
-static int restore_vfp_context(struct vfp_sigframe __user *frame)
+static int restore_vfp_context(char __user **auxp)
{
+ struct vfp_sigframe __user *frame =
+ (struct vfp_sigframe __user *)*auxp;
unsigned long magic;
unsigned long size;
int err = 0;
@@ -121,6 +162,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL;
+ *auxp += size;
return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
}
@@ -141,7 +183,7 @@ struct rt_sigframe {
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
{
- struct aux_sigframe __user *aux;
+ char __user *aux;
sigset_t set;
int err;
@@ -169,18 +211,18 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
err |= !valid_user_regs(regs);
- aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
+ aux = (char __user *) sf->uc.uc_regspace;
#ifdef CONFIG_CRUNCH
if (err == 0)
- err |= restore_crunch_context(&aux->crunch);
+ err |= restore_crunch_context(&aux);
#endif
#ifdef CONFIG_IWMMXT
- if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
- err |= restore_iwmmxt_context(&aux->iwmmxt);
+ if (err == 0)
+ err |= restore_iwmmxt_context(&aux);
#endif
#ifdef CONFIG_VFP
if (err == 0)
- err |= restore_vfp_context(&aux->vfp);
+ err |= restore_vfp_context(&aux);
#endif
return err;
@@ -286,7 +328,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
err |= preserve_crunch_context(&aux->crunch);
#endif
#ifdef CONFIG_IWMMXT
- if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
+ if (err == 0)
err |= preserve_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 0f6c1000582c..9f08d214d05a 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -171,6 +171,7 @@ mpidr_hash_ptr:
.long mpidr_hash - . @ mpidr_hash struct offset
.data
+ .align 2
.type sleep_save_sp, #object
ENTRY(sleep_save_sp)
.space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp
diff --git a/arch/arm/mach-exynos/sleep.S b/arch/arm/mach-exynos/sleep.S
index cf950790fbdc..4292cae43f3c 100644
--- a/arch/arm/mach-exynos/sleep.S
+++ b/arch/arm/mach-exynos/sleep.S
@@ -124,6 +124,7 @@ _cp15_save_diag:
#endif /* CONFIG_CACHE_L2X0 */
.data
+ .align 2
.globl cp15_save_diag
cp15_save_diag:
.long 0 @ cp15 diagnostic
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index 1b9f0520dea9..fa5fd24f524c 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -530,10 +530,12 @@ l2dis_3630_offset:
.long l2dis_3630 - .
.data
+ .align 2
l2dis_3630:
.word 0
.data
+ .align 2
l2_inv_api_params:
.word 0x1, 0x00
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index c7a3b4aab4b5..56dfa2d5d0a8 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -385,6 +385,7 @@ ppa_zero_params_offset:
ENDPROC(omap_do_wfi)
.data
+ .align 2
ppa_zero_params:
.word 0
diff --git a/arch/arm/mach-pxa/mioa701_bootresume.S b/arch/arm/mach-pxa/mioa701_bootresume.S
index 81591491ab94..42d93f40a59f 100644
--- a/arch/arm/mach-pxa/mioa701_bootresume.S
+++ b/arch/arm/mach-pxa/mioa701_bootresume.S
@@ -16,6 +16,7 @@
* insist on it to be truly read-only.
*/
.data
+ .align 2
ENTRY(mioa701_bootstrap)
0:
b 1f
@@ -34,4 +35,5 @@ ENTRY(mioa701_jumpaddr)
ENTRY(mioa701_bootstrap_lg)
.data
+ .align 2
.word 2b-0b
diff --git a/arch/arm/mach-rockchip/sleep.S b/arch/arm/mach-rockchip/sleep.S
index 2eec9a341f05..9927f06f52fe 100644
--- a/arch/arm/mach-rockchip/sleep.S
+++ b/arch/arm/mach-rockchip/sleep.S
@@ -23,7 +23,7 @@
* ddr to sram for system resumeing.
* so it is ".data section".
*/
-.align
+ .align 2
ENTRY(rockchip_slp_cpu_resume)
setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set svc, irqs off
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 2522f8c8fbb1..a5084ec70c6e 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -47,6 +47,7 @@
#define CACHE_DLIMIT (CACHE_DSIZE * 4)
.data
+ .align 2
flush_base:
.long FLUSH_BASE
.text
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index ff8b0aa2dfde..42f585379e19 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -315,8 +315,11 @@ retry:
* signal first. We do not need to release the mmap_sem because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
return 0;
+ }
/*
* Major/minor page fault accounting is only done on the
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 5e5720e8bc5f..7d16bbc4102b 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -129,8 +129,7 @@ ENDPROC(cpu_v7_set_pte_ext)
.macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp
ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address
cmp \ttbr1, \tmp, lsr #12 @ PHYS_OFFSET > PAGE_OFFSET?
- mrc p15, 0, \tmp, c2, c0, 2 @ TTB control egister
- orr \tmp, \tmp, #TTB_EAE
+ mov \tmp, #TTB_EAE @ for TTB control egister
ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP)
ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP)
ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16)
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index b6bbfdb6dfdc..3d75b7972fd1 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -104,6 +104,7 @@
.endm
.data
+ .align 2
clean_addr: .word CLEAN_ADDR
.text