summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig14
-rw-r--r--mm/nommu.c20
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/percpu.c4
-rw-r--r--mm/sparse.c15
5 files changed, 4 insertions, 54 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index c782e8fb7235..d5004d82a1d6 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -278,13 +278,6 @@ config BOUNCE
by default when ZONE_DMA or HIGHMEM is selected, but you
may say n to override this.
-# On the 'tile' arch, USB OHCI needs the bounce pool since tilegx will often
-# have more than 4GB of memory, but we don't currently use the IOTLB to present
-# a 32-bit address to OHCI. So we need to use a bounce pool instead.
-config NEED_BOUNCE_POOL
- bool
- default y if TILE && USB_OHCI_HCD
-
config NR_QUICK
int
depends on QUICKLIST
@@ -627,15 +620,14 @@ config GENERIC_EARLY_IOREMAP
config MAX_STACK_SIZE_MB
int "Maximum user stack size for 32-bit processes (MB)"
default 80
- range 8 256 if METAG
range 8 2048
depends on STACK_GROWSUP && (!64BIT || COMPAT)
help
This is the maximum stack size in Megabytes in the VM layout of 32-bit
user processes when the stack grows upwards (currently only on parisc
- and metag arch). The stack will be located at the highest memory
- address minus the given value, unless the RLIMIT_STACK hard limit is
- changed to a smaller value in which case that is used.
+ arch). The stack will be located at the highest memory address minus
+ the given value, unless the RLIMIT_STACK hard limit is changed to a
+ smaller value in which case that is used.
A sane initial value is 80 MB.
diff --git a/mm/nommu.c b/mm/nommu.c
index ebb6e618dade..838a8fdec5c2 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -663,22 +663,6 @@ static void put_nommu_region(struct vm_region *region)
}
/*
- * update protection on a vma
- */
-static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
-{
-#ifdef CONFIG_MPU
- struct mm_struct *mm = vma->vm_mm;
- long start = vma->vm_start & PAGE_MASK;
- while (start < vma->vm_end) {
- protect_page(mm, start, flags);
- start += PAGE_SIZE;
- }
- update_protections(mm);
-#endif
-}
-
-/*
* add a VMA into a process's mm_struct in the appropriate place in the list
* and tree and add to the address space's page tree also if not an anonymous
* page
@@ -695,8 +679,6 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
mm->map_count++;
vma->vm_mm = mm;
- protect_vma(vma, vma->vm_flags);
-
/* add the VMA to the mapping */
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
@@ -757,8 +739,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
struct mm_struct *mm = vma->vm_mm;
struct task_struct *curr = current;
- protect_vma(vma, 0);
-
mm->map_count--;
for (i = 0; i < VMACACHE_SIZE; i++) {
/* if the vma is cached, invalidate the entire cache */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1741dd23e7c1..4ea018263210 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6192,10 +6192,7 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
end = pgdat_end_pfn(pgdat);
end = ALIGN(end, MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page);
- map = alloc_remap(pgdat->node_id, size);
- if (!map)
- map = memblock_virt_alloc_node_nopanic(size,
- pgdat->node_id);
+ map = memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
pgdat->node_mem_map = map + offset;
}
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
diff --git a/mm/percpu.c b/mm/percpu.c
index 9297098519a6..0b6480979ac7 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2740,11 +2740,7 @@ void __init setup_per_cpu_areas(void)
if (pcpu_setup_first_chunk(ai, fc) < 0)
panic("Failed to initialize percpu areas.");
-#ifdef CONFIG_CRIS
-#warning "the CRIS architecture has physical and virtual addresses confused"
-#else
pcpu_free_alloc_info(ai);
-#endif
}
#endif /* CONFIG_SMP */
diff --git a/mm/sparse.c b/mm/sparse.c
index 79b26f98d793..58cab483e81b 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -405,10 +405,6 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
struct page *map;
unsigned long size;
- map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
- if (map)
- return map;
-
size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
map = memblock_virt_alloc_try_nid(size,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
@@ -424,17 +420,6 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
unsigned long pnum;
unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
- map = alloc_remap(nodeid, size * map_count);
- if (map) {
- for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
- if (!present_section_nr(pnum))
- continue;
- map_map[pnum] = map;
- map += size;
- }
- return;
- }
-
size = PAGE_ALIGN(size);
map = memblock_virt_alloc_try_nid_raw(size * map_count,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS),