summaryrefslogtreecommitdiff
path: root/arch/parisc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/include/asm/ldcw.h13
-rw-r--r--arch/parisc/kernel/module.c6
-rw-r--r--arch/parisc/mm/fault.c2
3 files changed, 13 insertions, 8 deletions
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index d2d11b7055ba..8121aa6db2ff 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -33,11 +33,18 @@
#endif /*!CONFIG_PA20*/
-/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
+ We don't explicitly expose that "*a" may be written as reload
+ fails to find a register in class R1_REGS when "a" needs to be
+ reloaded when generating 64-bit PIC code. Instead, we clobber
+ memory to indicate to the compiler that the assembly code reads
+ or writes to items other than those listed in the input and output
+ operands. This may pessimize the code somewhat but __ldcw is
+ usually used within code blocks surrounded by memory barriors. */
#define __ldcw(a) ({ \
unsigned __ret; \
- __asm__ __volatile__(__LDCW " 0(%2),%0" \
- : "=r" (__ret), "+m" (*(a)) : "r" (a)); \
+ __asm__ __volatile__(__LDCW " 0(%1),%0" \
+ : "=r" (__ret) : "r" (a) : "memory"); \
__ret; \
})
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 50dfafc3f2c1..5822e8e200e6 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -298,14 +298,10 @@ static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
}
#endif
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
{
kfree(mod->arch.section);
mod->arch.section = NULL;
-
- vfree(module_region);
}
/* Additional bytes needed in front of individual sections */
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 3ca9c1131cfe..e5120e653240 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -256,6 +256,8 @@ good_area:
*/
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto bad_area;
BUG();