summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/sh/kernel/cpu/init.c2
-rw-r--r--arch/sh/kernel/sys_sh.c92
-rw-r--r--arch/sh/mm/mmap.c94
3 files changed, 96 insertions, 92 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 75fb03d35670..d29e69c156f0 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -261,9 +261,11 @@ asmlinkage void __init sh_cpu_init(void)
cache_init();
if (raw_smp_processor_id() == 0) {
+#ifdef CONFIG_MMU
shm_align_mask = max_t(unsigned long,
current_cpu_data.dcache.way_size - 1,
PAGE_SIZE - 1);
+#endif
/* Boot CPU sets the cache shape */
detect_cache_shape();
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 38f098c9c72d..58dfc02c7af1 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -22,102 +22,10 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/ipc.h>
-#include <asm/cacheflush.h>
#include <asm/syscalls.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
-unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
-EXPORT_SYMBOL(shm_align_mask);
-
-#ifdef CONFIG_MMU
-/*
- * To avoid cache aliases, we map the shared page with same color.
- */
-#define COLOUR_ALIGN(addr, pgoff) \
- ((((addr) + shm_align_mask) & ~shm_align_mask) + \
- (((pgoff) << PAGE_SHIFT) & shm_align_mask))
-
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long start_addr;
- int do_colour_align;
-
- if (flags & MAP_FIXED) {
- /* We do not accept a shared mapping if it would violate
- * cache aliasing constraints.
- */
- if ((flags & MAP_SHARED) && (addr & shm_align_mask))
- return -EINVAL;
- return addr;
- }
-
- if (unlikely(len > TASK_SIZE))
- return -ENOMEM;
-
- do_colour_align = 0;
- if (filp || (flags & MAP_SHARED))
- do_colour_align = 1;
-
- if (addr) {
- if (do_colour_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
- return addr;
- }
-
- if (len > mm->cached_hole_size) {
- start_addr = addr = mm->free_area_cache;
- } else {
- mm->cached_hole_size = 0;
- start_addr = addr = TASK_UNMAPPED_BASE;
- }
-
-full_search:
- if (do_colour_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(mm->free_area_cache);
-
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (unlikely(TASK_SIZE - len < addr)) {
- /*
- * Start a new search - just in case we missed
- * some holes.
- */
- if (start_addr != TASK_UNMAPPED_BASE) {
- start_addr = addr = TASK_UNMAPPED_BASE;
- mm->cached_hole_size = 0;
- goto full_search;
- }
- return -ENOMEM;
- }
- if (likely(!vma || addr + len <= vma->vm_start)) {
- /*
- * Remember the place where we stopped the search:
- */
- mm->free_area_cache = addr + len;
- return addr;
- }
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
-
- addr = vma->vm_end;
- if (do_colour_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- }
-}
-#endif /* CONFIG_MMU */
-
static inline long
do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
unsigned long flags, int fd, unsigned long pgoff)
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 8837d511710a..931f4d003fa0 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -9,7 +9,101 @@
*/
#include <linux/io.h>
#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
#include <asm/page.h>
+#include <asm/processor.h>
+
+#ifdef CONFIG_MMU
+unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
+EXPORT_SYMBOL(shm_align_mask);
+
+/*
+ * To avoid cache aliases, we map the shared page with same color.
+ */
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + shm_align_mask) & ~shm_align_mask) + \
+ (((pgoff) << PAGE_SHIFT) & shm_align_mask))
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
+ int do_colour_align;
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) && (addr & shm_align_mask))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (unlikely(len > TASK_SIZE))
+ return -ENOMEM;
+
+ do_colour_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_colour_align = 1;
+
+ if (addr) {
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ if (len > mm->cached_hole_size) {
+ start_addr = addr = mm->free_area_cache;
+ } else {
+ mm->cached_hole_size = 0;
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ }
+
+full_search:
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(mm->free_area_cache);
+
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (unlikely(TASK_SIZE - len < addr)) {
+ /*
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+ if (start_addr != TASK_UNMAPPED_BASE) {
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+ if (likely(!vma || addr + len <= vma->vm_start)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+ mm->free_area_cache = addr + len;
+ return addr;
+ }
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ addr = vma->vm_end;
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ }
+}
+#endif /* CONFIG_MMU */
/*
* You really shouldn't be using read() or write() on /dev/mem. This