summaryrefslogtreecommitdiff
path: root/arch/sparc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/extable.c1
-rw-r--r--arch/sparc/mm/hugetlbpage.c201
-rw-r--r--arch/sparc/mm/init_32.c11
-rw-r--r--arch/sparc/mm/init_64.c260
-rw-r--r--arch/sparc/mm/srmmu.c6
-rw-r--r--arch/sparc/mm/tlb.c17
-rw-r--r--arch/sparc/mm/tsb.c61
7 files changed, 409 insertions, 148 deletions
diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c
index 768a11e6bd4f..db214e9931d9 100644
--- a/arch/sparc/mm/extable.c
+++ b/arch/sparc/mm/extable.c
@@ -3,6 +3,7 @@
*/
#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/uaccess.h>
void sort_extable(struct exception_table_entry *start,
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 988acc8b1b80..e98a3f2e8f0f 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -28,6 +28,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
unsigned long pgoff,
unsigned long flags)
{
+ struct hstate *h = hstate_file(filp);
unsigned long task_size = TASK_SIZE;
struct vm_unmapped_area_info info;
@@ -38,7 +39,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
info.length = len;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = min(task_size, VA_EXCLUDE_START);
- info.align_mask = PAGE_MASK & ~HPAGE_MASK;
+ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
addr = vm_unmapped_area(&info);
@@ -58,6 +59,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long pgoff,
const unsigned long flags)
{
+ struct hstate *h = hstate_file(filp);
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
@@ -69,7 +71,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
- info.align_mask = PAGE_MASK & ~HPAGE_MASK;
+ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
addr = vm_unmapped_area(&info);
@@ -94,6 +96,7 @@ unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
+ struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long task_size = TASK_SIZE;
@@ -101,7 +104,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (test_thread_flag(TIF_32BIT))
task_size = STACK_TOP32;
- if (len & ~HPAGE_MASK)
+ if (len & ~huge_page_mask(h))
return -EINVAL;
if (len > task_size)
return -ENOMEM;
@@ -113,7 +116,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
}
if (addr) {
- addr = ALIGN(addr, HPAGE_SIZE);
+ addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start))
@@ -127,17 +130,141 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
pgoff, flags);
}
+static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
+{
+ return entry;
+}
+
+static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
+{
+ unsigned long hugepage_size = _PAGE_SZ4MB_4V;
+
+ pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
+
+ switch (shift) {
+ case HPAGE_256MB_SHIFT:
+ hugepage_size = _PAGE_SZ256MB_4V;
+ pte_val(entry) |= _PAGE_PMD_HUGE;
+ break;
+ case HPAGE_SHIFT:
+ pte_val(entry) |= _PAGE_PMD_HUGE;
+ break;
+ case HPAGE_64K_SHIFT:
+ hugepage_size = _PAGE_SZ64K_4V;
+ break;
+ default:
+ WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
+ }
+
+ pte_val(entry) = pte_val(entry) | hugepage_size;
+ return entry;
+}
+
+static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
+{
+ if (tlb_type == hypervisor)
+ return sun4v_hugepage_shift_to_tte(entry, shift);
+ else
+ return sun4u_hugepage_shift_to_tte(entry, shift);
+}
+
+pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+ struct page *page, int writeable)
+{
+ unsigned int shift = huge_page_shift(hstate_vma(vma));
+
+ return hugepage_shift_to_tte(entry, shift);
+}
+
+static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
+{
+ unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
+ unsigned int shift;
+
+ switch (tte_szbits) {
+ case _PAGE_SZ256MB_4V:
+ shift = HPAGE_256MB_SHIFT;
+ break;
+ case _PAGE_SZ4MB_4V:
+ shift = REAL_HPAGE_SHIFT;
+ break;
+ case _PAGE_SZ64K_4V:
+ shift = HPAGE_64K_SHIFT;
+ break;
+ default:
+ shift = PAGE_SHIFT;
+ break;
+ }
+ return shift;
+}
+
+static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
+{
+ unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
+ unsigned int shift;
+
+ switch (tte_szbits) {
+ case _PAGE_SZ256MB_4U:
+ shift = HPAGE_256MB_SHIFT;
+ break;
+ case _PAGE_SZ4MB_4U:
+ shift = REAL_HPAGE_SHIFT;
+ break;
+ case _PAGE_SZ64K_4U:
+ shift = HPAGE_64K_SHIFT;
+ break;
+ default:
+ shift = PAGE_SHIFT;
+ break;
+ }
+ return shift;
+}
+
+static unsigned int huge_tte_to_shift(pte_t entry)
+{
+ unsigned long shift;
+
+ if (tlb_type == hypervisor)
+ shift = sun4v_huge_tte_to_shift(entry);
+ else
+ shift = sun4u_huge_tte_to_shift(entry);
+
+ if (shift == PAGE_SHIFT)
+ WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
+ pte_val(entry));
+
+ return shift;
+}
+
+static unsigned long huge_tte_to_size(pte_t pte)
+{
+ unsigned long size = 1UL << huge_tte_to_shift(pte);
+
+ if (size == REAL_HPAGE_SIZE)
+ size = HPAGE_SIZE;
+ return size;
+}
+
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
+ pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
- if (pud)
- pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ if (pud) {
+ pmd = pmd_alloc(mm, pud, addr);
+ if (!pmd)
+ return NULL;
+
+ if (sz == PMD_SHIFT)
+ pte = (pte_t *)pmd;
+ else
+ pte = pte_alloc_map(mm, pmd, addr);
+ }
return pte;
}
@@ -146,49 +273,83 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
+ pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd)) {
pud = pud_offset(pgd, addr);
- if (!pud_none(*pud))
- pte = (pte_t *)pmd_offset(pud, addr);
+ if (!pud_none(*pud)) {
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd)) {
+ if (is_hugetlb_pmd(*pmd))
+ pte = (pte_t *)pmd;
+ else
+ pte = pte_offset_map(pmd, addr);
+ }
+ }
}
+
return pte;
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
+ unsigned int i, nptes, orig_shift, shift;
+ unsigned long size;
pte_t orig;
+ size = huge_tte_to_size(entry);
+ shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT;
+ nptes = size >> shift;
+
if (!pte_present(*ptep) && pte_present(entry))
- mm->context.hugetlb_pte_count++;
+ mm->context.hugetlb_pte_count += nptes;
- addr &= HPAGE_MASK;
+ addr &= ~(size - 1);
orig = *ptep;
- *ptep = entry;
+ orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
+
+ for (i = 0; i < nptes; i++)
+ ptep[i] = __pte(pte_val(entry) + (i << shift));
- /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
- maybe_tlb_batch_add(mm, addr, ptep, orig, 0);
- maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0);
+ maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
+ /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
+ if (size == HPAGE_SIZE)
+ maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
+ orig_shift);
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
+ unsigned int i, nptes, hugepage_shift;
+ unsigned long size;
pte_t entry;
entry = *ptep;
+ size = huge_tte_to_size(entry);
+ if (size >= HPAGE_SIZE)
+ nptes = size >> PMD_SHIFT;
+ else
+ nptes = size >> PAGE_SHIFT;
+
+ hugepage_shift = pte_none(entry) ? PAGE_SHIFT :
+ huge_tte_to_shift(entry);
+
if (pte_present(entry))
- mm->context.hugetlb_pte_count--;
+ mm->context.hugetlb_pte_count -= nptes;
- addr &= HPAGE_MASK;
- *ptep = __pte(0UL);
+ addr &= ~(size - 1);
+ for (i = 0; i < nptes; i++)
+ ptep[i] = __pte(0UL);
- /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
- maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
- maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0);
+ maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift);
+ /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
+ if (size == HPAGE_SIZE)
+ maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
+ hugepage_shift);
return entry;
}
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index eb8287155279..c6afe98de4d9 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -55,17 +55,6 @@ extern unsigned int sparc_ramdisk_size;
unsigned long highstart_pfn, highend_pfn;
-void show_mem(unsigned int filter)
-{
- printk("Mem-info:\n");
- show_free_areas(filter);
- printk("Free swap: %6ldkB\n",
- get_nr_swap_pages() << (PAGE_SHIFT-10));
- printk("%ld pages of RAM\n", totalram_pages);
- printk("%ld free pages\n", nr_free_pages());
-}
-
-
unsigned long last_valid_pfn;
unsigned long calc_highpages(void)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 5d2f91511c60..ccd455328989 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -324,6 +324,50 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
tsb_insert(tsb, tag, tte);
}
+#ifdef CONFIG_HUGETLB_PAGE
+static int __init setup_hugepagesz(char *string)
+{
+ unsigned long long hugepage_size;
+ unsigned int hugepage_shift;
+ unsigned short hv_pgsz_idx;
+ unsigned int hv_pgsz_mask;
+ int rc = 0;
+
+ hugepage_size = memparse(string, &string);
+ hugepage_shift = ilog2(hugepage_size);
+
+ switch (hugepage_shift) {
+ case HPAGE_256MB_SHIFT:
+ hv_pgsz_mask = HV_PGSZ_MASK_256MB;
+ hv_pgsz_idx = HV_PGSZ_IDX_256MB;
+ break;
+ case HPAGE_SHIFT:
+ hv_pgsz_mask = HV_PGSZ_MASK_4MB;
+ hv_pgsz_idx = HV_PGSZ_IDX_4MB;
+ break;
+ case HPAGE_64K_SHIFT:
+ hv_pgsz_mask = HV_PGSZ_MASK_64K;
+ hv_pgsz_idx = HV_PGSZ_IDX_64K;
+ break;
+ default:
+ hv_pgsz_mask = 0;
+ }
+
+ if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
+ pr_warn("hugepagesz=%llu not supported by MMU.\n",
+ hugepage_size);
+ goto out;
+ }
+
+ hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT);
+ rc = 1;
+
+out:
+ return rc;
+}
+__setup("hugepagesz=", setup_hugepagesz);
+#endif /* CONFIG_HUGETLB_PAGE */
+
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
struct mm_struct *mm;
@@ -347,7 +391,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
- is_hugetlb_pte(pte)) {
+ is_hugetlb_pmd(__pmd(pte_val(pte)))) {
/* We are fabricating 8MB pages using 4MB real hw pages. */
pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
@@ -785,13 +829,23 @@ static void __init find_ramdisk(unsigned long phys_base)
struct node_mem_mask {
unsigned long mask;
- unsigned long val;
+ unsigned long match;
};
static struct node_mem_mask node_masks[MAX_NUMNODES];
static int num_node_masks;
#ifdef CONFIG_NEED_MULTIPLE_NODES
+struct mdesc_mlgroup {
+ u64 node;
+ u64 latency;
+ u64 match;
+ u64 mask;
+};
+
+static struct mdesc_mlgroup *mlgroups;
+static int num_mlgroups;
+
int numa_cpu_lookup_table[NR_CPUS];
cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
@@ -802,78 +856,129 @@ struct mdesc_mblock {
};
static struct mdesc_mblock *mblocks;
static int num_mblocks;
-static int find_numa_node_for_addr(unsigned long pa,
- struct node_mem_mask *pnode_mask);
-static unsigned long __init ra_to_pa(unsigned long addr)
+static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr)
{
+ struct mdesc_mblock *m = NULL;
int i;
for (i = 0; i < num_mblocks; i++) {
- struct mdesc_mblock *m = &mblocks[i];
+ m = &mblocks[i];
if (addr >= m->base &&
addr < (m->base + m->size)) {
- addr += m->offset;
break;
}
}
- return addr;
+
+ return m;
}
-static int __init find_node(unsigned long addr)
+static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
{
- static bool search_mdesc = true;
- static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
- static int last_index;
- int i;
+ int prev_nid, new_nid;
- addr = ra_to_pa(addr);
- for (i = 0; i < num_node_masks; i++) {
- struct node_mem_mask *p = &node_masks[i];
+ prev_nid = -1;
+ for ( ; start < end; start += PAGE_SIZE) {
+ for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
+ struct node_mem_mask *p = &node_masks[new_nid];
- if ((addr & p->mask) == p->val)
- return i;
- }
- /* The following condition has been observed on LDOM guests because
- * node_masks only contains the best latency mask and value.
- * LDOM guest's mdesc can contain a single latency group to
- * cover multiple address range. Print warning message only if the
- * address cannot be found in node_masks nor mdesc.
- */
- if ((search_mdesc) &&
- ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
- /* find the available node in the mdesc */
- last_index = find_numa_node_for_addr(addr, &last_mem_mask);
- numadbg("find_node: latency group for address 0x%lx is %d\n",
- addr, last_index);
- if ((last_index < 0) || (last_index >= num_node_masks)) {
- /* WARN_ONCE() and use default group 0 */
- WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
- search_mdesc = false;
- last_index = 0;
+ if ((start & p->mask) == p->match) {
+ if (prev_nid == -1)
+ prev_nid = new_nid;
+ break;
+ }
}
+
+ if (new_nid == num_node_masks) {
+ prev_nid = 0;
+ WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.",
+ start);
+ break;
+ }
+
+ if (prev_nid != new_nid)
+ break;
}
+ *nid = prev_nid;
- return last_index;
+ return start > end ? end : start;
}
static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
{
- *nid = find_node(start);
- start += PAGE_SIZE;
- while (start < end) {
- int n = find_node(start);
+ u64 ret_end, pa_start, m_mask, m_match, m_end;
+ struct mdesc_mblock *mblock;
+ int _nid, i;
+
+ if (tlb_type != hypervisor)
+ return memblock_nid_range_sun4u(start, end, nid);
+
+ mblock = addr_to_mblock(start);
+ if (!mblock) {
+ WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]",
+ start);
+
+ _nid = 0;
+ ret_end = end;
+ goto done;
+ }
+
+ pa_start = start + mblock->offset;
+ m_match = 0;
+ m_mask = 0;
+
+ for (_nid = 0; _nid < num_node_masks; _nid++) {
+ struct node_mem_mask *const m = &node_masks[_nid];
- if (n != *nid)
+ if ((pa_start & m->mask) == m->match) {
+ m_match = m->match;
+ m_mask = m->mask;
break;
- start += PAGE_SIZE;
+ }
}
- if (start > end)
- start = end;
+ if (num_node_masks == _nid) {
+ /* We could not find NUMA group, so default to 0, but lets
+ * search for latency group, so we could calculate the correct
+ * end address that we return
+ */
+ _nid = 0;
+
+ for (i = 0; i < num_mlgroups; i++) {
+ struct mdesc_mlgroup *const m = &mlgroups[i];
+
+ if ((pa_start & m->mask) == m->match) {
+ m_match = m->match;
+ m_mask = m->mask;
+ break;
+ }
+ }
+
+ if (i == num_mlgroups) {
+ WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]",
+ start);
+
+ ret_end = end;
+ goto done;
+ }
+ }
- return start;
+ /*
+ * Each latency group has match and mask, and each memory block has an
+ * offset. An address belongs to a latency group if its address matches
+ * the following formula: ((addr + offset) & mask) == match
+ * It is, however, slow to check every single page if it matches a
+ * particular latency group. As optimization we calculate end value by
+ * using bit arithmetics.
+ */
+ m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset;
+ m_end += pa_start & ~((1ul << fls64(m_mask)) - 1);
+ ret_end = m_end > end ? end : m_end;
+
+done:
+ *nid = _nid;
+ return ret_end;
}
#endif
@@ -914,7 +1019,8 @@ static void init_node_masks_nonnuma(void)
numadbg("Initializing tables for non-numa.\n");
- node_masks[0].mask = node_masks[0].val = 0;
+ node_masks[0].mask = 0;
+ node_masks[0].match = 0;
num_node_masks = 1;
#ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -932,15 +1038,6 @@ EXPORT_SYMBOL(numa_cpu_lookup_table);
EXPORT_SYMBOL(numa_cpumask_lookup_table);
EXPORT_SYMBOL(node_data);
-struct mdesc_mlgroup {
- u64 node;
- u64 latency;
- u64 match;
- u64 mask;
-};
-static struct mdesc_mlgroup *mlgroups;
-static int num_mlgroups;
-
static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
u32 cfg_handle)
{
@@ -1029,6 +1126,10 @@ int of_node_to_nid(struct device_node *dp)
static void __init add_node_ranges(void)
{
struct memblock_region *reg;
+ unsigned long prev_max;
+
+memblock_resized:
+ prev_max = memblock.memory.max;
for_each_memblock(memory, reg) {
unsigned long size = reg->size;
@@ -1048,6 +1149,8 @@ static void __init add_node_ranges(void)
memblock_set_node(start, this_end - start,
&memblock.memory, nid);
+ if (memblock.memory.max != prev_max)
+ goto memblock_resized;
start = this_end;
}
}
@@ -1182,41 +1285,6 @@ int __node_distance(int from, int to)
return numa_latency[from][to];
}
-static int find_numa_node_for_addr(unsigned long pa,
- struct node_mem_mask *pnode_mask)
-{
- struct mdesc_handle *md = mdesc_grab();
- u64 node, arc;
- int i = 0;
-
- node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
- if (node == MDESC_NODE_NULL)
- goto out;
-
- mdesc_for_each_node_by_name(md, node, "group") {
- mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
- u64 target = mdesc_arc_target(md, arc);
- struct mdesc_mlgroup *m = find_mlgroup(target);
-
- if (!m)
- continue;
- if ((pa & m->mask) == m->match) {
- if (pnode_mask) {
- pnode_mask->mask = m->mask;
- pnode_mask->val = m->match;
- }
- mdesc_release(md);
- return i;
- }
- }
- i++;
- }
-
-out:
- mdesc_release(md);
- return -1;
-}
-
static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
{
int i;
@@ -1224,7 +1292,7 @@ static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
for (i = 0; i < MAX_NUMNODES; i++) {
struct node_mem_mask *n = &node_masks[i];
- if ((grp->mask == n->mask) && (grp->match == n->val))
+ if ((grp->mask == n->mask) && (grp->match == n->match))
break;
}
return i;
@@ -1279,10 +1347,10 @@ static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
n = &node_masks[num_node_masks++];
n->mask = candidate->mask;
- n->val = candidate->match;
+ n->match = candidate->match;
- numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
- index, n->mask, n->val, candidate->latency);
+ numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n",
+ index, n->mask, n->match, candidate->latency);
return 0;
}
@@ -1379,7 +1447,7 @@ static int __init numa_parse_jbus(void)
numa_cpu_lookup_table[cpu] = index;
cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
node_masks[index].mask = ~((1UL << 36UL) - 1UL);
- node_masks[index].val = cpu << 36UL;
+ node_masks[index].match = cpu << 36UL;
index++;
}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index c7f2a5295b3a..def82f6d626f 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1444,7 +1444,7 @@ static void poke_viking(void)
srmmu_set_mmureg(mreg);
}
-static struct sparc32_cachetlb_ops viking_ops = {
+static struct sparc32_cachetlb_ops viking_ops __ro_after_init = {
.cache_all = viking_flush_cache_all,
.cache_mm = viking_flush_cache_mm,
.cache_page = viking_flush_cache_page,
@@ -1475,7 +1475,7 @@ static struct sparc32_cachetlb_ops viking_ops = {
* flushes going at once will require SMP locking anyways so there's
* no real value in trying any harder than this.
*/
-static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
+static struct sparc32_cachetlb_ops viking_sun4d_smp_ops __ro_after_init = {
.cache_all = viking_flush_cache_all,
.cache_mm = viking_flush_cache_mm,
.cache_page = viking_flush_cache_page,
@@ -1759,7 +1759,7 @@ static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
local_ops->sig_insns(mm, insn_addr);
}
-static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
+static struct sparc32_cachetlb_ops smp_cachetlb_ops __ro_after_init = {
.cache_all = smp_flush_cache_all,
.cache_mm = smp_flush_cache_mm,
.cache_page = smp_flush_cache_page,
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index c56a195c9071..afda3bbf7854 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void)
}
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
- bool exec, bool huge)
+ bool exec, unsigned int hugepage_shift)
{
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
unsigned long nr;
@@ -84,19 +84,19 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
}
if (!tb->active) {
- flush_tsb_user_page(mm, vaddr, huge);
+ flush_tsb_user_page(mm, vaddr, hugepage_shift);
global_flush_tlb_page(mm, vaddr);
goto out;
}
if (nr == 0) {
tb->mm = mm;
- tb->huge = huge;
+ tb->hugepage_shift = hugepage_shift;
}
- if (tb->huge != huge) {
+ if (tb->hugepage_shift != hugepage_shift) {
flush_tlb_pending();
- tb->huge = huge;
+ tb->hugepage_shift = hugepage_shift;
nr = 0;
}
@@ -110,10 +110,9 @@ out:
}
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
- pte_t *ptep, pte_t orig, int fullmm)
+ pte_t *ptep, pte_t orig, int fullmm,
+ unsigned int hugepage_shift)
{
- bool huge = is_hugetlb_pte(orig);
-
if (tlb_type != hypervisor &&
pte_dirty(orig)) {
unsigned long paddr, pfn = pte_pfn(orig);
@@ -139,7 +138,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
no_cache_flush:
if (!fullmm)
- tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge);
+ tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index e20fbbafb0b0..23479c3d39f0 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -86,6 +86,33 @@ static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
__flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
}
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static void __flush_huge_tsb_one_entry(unsigned long tsb, unsigned long v,
+ unsigned long hash_shift,
+ unsigned long nentries,
+ unsigned int hugepage_shift)
+{
+ unsigned int hpage_entries;
+ unsigned int i;
+
+ hpage_entries = 1 << (hugepage_shift - hash_shift);
+ for (i = 0; i < hpage_entries; i++)
+ __flush_tsb_one_entry(tsb, v + (i << hash_shift), hash_shift,
+ nentries);
+}
+
+static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
+ unsigned long tsb, unsigned long nentries,
+ unsigned int hugepage_shift)
+{
+ unsigned long i;
+
+ for (i = 0; i < tb->tlb_nr; i++)
+ __flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift,
+ nentries, hugepage_shift);
+}
+#endif
+
void flush_tsb_user(struct tlb_batch *tb)
{
struct mm_struct *mm = tb->mm;
@@ -93,45 +120,61 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_lock_irqsave(&mm->context.lock, flags);
- if (!tb->huge) {
+ if (tb->hugepage_shift < HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
- __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
+ if (tb->hugepage_shift == PAGE_SHIFT)
+ __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
+#if defined(CONFIG_HUGETLB_PAGE)
+ else
+ __flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries,
+ tb->hugepage_shift);
+#endif
}
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+ else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
- __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries);
+ __flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries,
+ tb->hugepage_shift);
}
#endif
spin_unlock_irqrestore(&mm->context.lock, flags);
}
-void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge)
+void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
+ unsigned int hugepage_shift)
{
unsigned long nentries, base, flags;
spin_lock_irqsave(&mm->context.lock, flags);
- if (!huge) {
+ if (hugepage_shift < HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
- __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
+ if (hugepage_shift == PAGE_SHIFT)
+ __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT,
+ nentries);
+#if defined(CONFIG_HUGETLB_PAGE)
+ else
+ __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT,
+ nentries, hugepage_shift);
+#endif
}
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+ else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
- __flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries);
+ __flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT,
+ nentries, hugepage_shift);
}
#endif
spin_unlock_irqrestore(&mm->context.lock, flags);