From 3e91faec47e9e12b965c952d698b0bb64847af06 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 24 Feb 2009 22:23:51 +0900 Subject: sh: fix P4 iounmap() pass-through Fix iounmap() of pass-through P4 addresses. Without this patch iounmap() on the sh7780 rtc area results in a warning message. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/mm/ioremap_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index 32946fba123e..8dc77026a0b5 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c @@ -119,7 +119,7 @@ void __iounmap(void __iomem *addr) unsigned long seg = PXSEG(vaddr); struct vm_struct *p; - if (seg < P3SEG || seg >= P3_ADDR_MAX || is_pci_memaddr(vaddr)) + if (seg < P3SEG || vaddr >= P3_ADDR_MAX || is_pci_memaddr(vaddr)) return; #ifdef CONFIG_32BIT -- cgit From 2f47f44790a9c8fc43e515df3c6be19a35ee5de5 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Tue, 10 Mar 2009 15:49:54 +0900 Subject: sh: Support fixed 32-bit PMB mappings from bootloader. This provides a method for supporting fixed PMB mappings inherited from the bootloader, as an alternative to the dynamic PMB mapping currently used by the kernel. In the future these methods will be combined. P1/P2 area is handled like a regular 29-bit physical address, and local bus device are assigned P3 area addresses. Signed-off-by: Yoshihiro Shimoda Signed-off-by: Paul Mundt --- arch/sh/mm/Kconfig | 29 ++++++++++++++++++++++++++++- arch/sh/mm/Makefile_32 | 1 + arch/sh/mm/ioremap_32.c | 6 ++++-- arch/sh/mm/pmb-fixed.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 78 insertions(+), 3 deletions(-) create mode 100644 arch/sh/mm/pmb-fixed.c (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 555ec9714b9e..10c24356d2d5 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -57,7 +57,7 @@ config 32BIT bool default y if CPU_SH5 -config PMB +config PMB_ENABLE bool "Support 32-bit physical addressing through PMB" depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) select 32BIT @@ -67,6 +67,33 @@ config PMB 32-bits through the SH-4A PMB. If this is not set, legacy 29-bit physical addressing will be used. +choice + prompt "PMB handling type" + depends on PMB_ENABLE + default PMB_FIXED + +config PMB + bool "PMB" + depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) + select 32BIT + help + If you say Y here, physical addressing will be extended to + 32-bits through the SH-4A PMB. If this is not set, legacy + 29-bit physical addressing will be used. + +config PMB_FIXED + bool "fixed PMB" + depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || \ + CPU_SUBTYPE_SH7785) + select 32BIT + help + If this option is enabled, fixed PMB mappings are inherited + from the boot loader, and the kernel does not attempt dynamic + management. This is the closest to legacy 29-bit physical mode, + and allows systems to support up to 512MiB of system memory. + +endchoice + config X2TLB bool "Enable extended TLB mode" depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index cb2f3f299591..469ff1672451 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 @@ -35,6 +35,7 @@ endif obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PMB) += pmb.o +obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o obj-$(CONFIG_NUMA) += numa.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index 8dc77026a0b5..60cc486d2c2c 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c @@ -59,11 +59,13 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr)) return (void __iomem *)phys_addr; +#if !defined(CONFIG_PMB_FIXED) /* * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr < virt_to_phys(high_memory)) return NULL; +#endif /* * Mappings have to be page-aligned @@ -81,7 +83,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, area->phys_addr = phys_addr; orig_addr = addr = (unsigned long)area->addr; -#ifdef CONFIG_32BIT +#ifdef CONFIG_PMB /* * First try to remap through the PMB once a valid VMA has been * established. Smaller allocations (or the rest of the size @@ -122,7 +124,7 @@ void __iounmap(void __iomem *addr) if (seg < P3SEG || vaddr >= P3_ADDR_MAX || is_pci_memaddr(vaddr)) return; -#ifdef CONFIG_32BIT +#ifdef CONFIG_PMB /* * Purge any PMB entries that may have been established for this * mapping, then proceed with conventional VMA teardown. diff --git a/arch/sh/mm/pmb-fixed.c b/arch/sh/mm/pmb-fixed.c new file mode 100644 index 000000000000..43c8eac4d8a1 --- /dev/null +++ b/arch/sh/mm/pmb-fixed.c @@ -0,0 +1,45 @@ +/* + * arch/sh/mm/fixed_pmb.c + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include + +static int __uses_jump_to_uncached fixed_pmb_init(void) +{ + int i; + unsigned long addr, data; + + jump_to_uncached(); + + for (i = 0; i < PMB_ENTRY_MAX; i++) { + addr = PMB_DATA + (i << PMB_E_SHIFT); + data = ctrl_inl(addr); + if (!(data & PMB_V)) + continue; + + if (data & PMB_C) { +#if defined(CONFIG_CACHE_WRITETHROUGH) + data |= PMB_WT; +#elif defined(CONFIG_CACHE_WRITEBACK) + data &= ~PMB_WT; +#else + data &= ~(PMB_C | PMB_WT); +#endif + } + ctrl_outl(data, addr); + } + + back_to_cached(); + + return 0; +} +arch_initcall(fixed_pmb_init); -- cgit From a83c0b739f3ad1887704cfa9f1ee5ee208cf1532 Mon Sep 17 00:00:00 2001 From: Francesco VIRLINZI Date: Wed, 11 Mar 2009 10:39:02 +0000 Subject: sh: PMB hibernation support This implements preliminary suspend/resume support for the PMB. Signed-off-by: Francesco Virlinzi Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 84241676265e..b1a714a92b14 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -15,6 +15,8 @@ */ #include #include +#include +#include #include #include #include @@ -402,3 +404,39 @@ static int __init pmb_debugfs_init(void) return 0; } postcore_initcall(pmb_debugfs_init); + +#ifdef CONFIG_PM +static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) +{ + static pm_message_t prev_state; + + /* Restore the PMB after a resume from hibernation */ + if (state.event == PM_EVENT_ON && + prev_state.event == PM_EVENT_FREEZE) { + struct pmb_entry *pmbe; + spin_lock_irq(&pmb_list_lock); + for (pmbe = pmb_list; pmbe; pmbe = pmbe->next) + set_pmb_entry(pmbe); + spin_unlock_irq(&pmb_list_lock); + } + prev_state = state; + return 0; +} + +static int pmb_sysdev_resume(struct sys_device *dev) +{ + return pmb_sysdev_suspend(dev, PMSG_ON); +} + +static struct sysdev_driver pmb_sysdev_driver = { + .suspend = pmb_sysdev_suspend, + .resume = pmb_sysdev_resume, +}; + +static int __init pmb_sysdev_init(void) +{ + return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); +} + +subsys_initcall(pmb_sysdev_init); +#endif -- cgit From 8263a67e169fdf0d06d172acbf6c03ae172a69d4 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 17 Mar 2009 17:49:49 +0900 Subject: sh: Support for extended ASIDs on PTEAEX-capable SH-X3 cores. This adds support for extended ASIDs (up to 16-bits) on newer SH-X3 cores that implement the PTAEX register and respective functionality. Presently only the 65nm SH7786 (90nm only supports legacy 8-bit ASIDs). The main change is in how the PTE is written out when loading the entry in to the TLB, as well as in how the TLB entry is selectively flushed. While SH-X2 extended mode splits out the memory-mapped U and I-TLB data arrays for extra bits, extended ASID mode splits out the address arrays. While we don't use the memory-mapped data array access, the address array accesses are necessary for selective TLB flushes, so these are implemented newly and replace the generic SH-4 implementation. With this, TLB flushes in switch_mm() are almost non-existent on newer parts. Signed-off-by: Paul Mundt --- arch/sh/mm/Makefile_32 | 6 ++- arch/sh/mm/tlb-pteaex.c | 99 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+), 2 deletions(-) create mode 100644 arch/sh/mm/tlb-pteaex.c (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index 469ff1672451..986a1e055834 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 @@ -25,8 +25,10 @@ obj-$(CONFIG_CPU_SH4) += cache-debugfs.o endif ifdef CONFIG_MMU -obj-$(CONFIG_CPU_SH3) += tlb-sh3.o -obj-$(CONFIG_CPU_SH4) += tlb-sh4.o +tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o +tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o +tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o +obj-y += $(tlb-y) ifndef CONFIG_CACHE_OFF obj-$(CONFIG_CPU_SH4) += pg-sh4.o obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c new file mode 100644 index 000000000000..5c9b2d781e08 --- /dev/null +++ b/arch/sh/mm/tlb-pteaex.c @@ -0,0 +1,99 @@ +/* + * arch/sh/mm/tlb-pteaex.c + * + * TLB operations for SH-X3 CPUs featuring PTE ASID Extensions. + * + * Copyright (C) 2009 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include + +void update_mmu_cache(struct vm_area_struct * vma, + unsigned long address, pte_t pte) +{ + unsigned long flags; + unsigned long pteval; + unsigned long vpn; + + /* Ptrace may call this routine. */ + if (vma && current->active_mm != vma->vm_mm) + return; + +#ifndef CONFIG_CACHE_OFF + { + unsigned long pfn = pte_pfn(pte); + + if (pfn_valid(pfn)) { + struct page *page = pfn_to_page(pfn); + + if (!test_bit(PG_mapped, &page->flags)) { + unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; + __flush_wback_region((void *)P1SEGADDR(phys), + PAGE_SIZE); + __set_bit(PG_mapped, &page->flags); + } + } + } +#endif + + local_irq_save(flags); + + /* Set PTEH register */ + vpn = address & MMU_VPN_MASK; + __raw_writel(vpn, MMU_PTEH); + + /* Set PTEAEX */ + __raw_writel(get_asid(), MMU_PTEAEX); + + pteval = pte.pte_low; + + /* Set PTEA register */ +#ifdef CONFIG_X2TLB + /* + * For the extended mode TLB this is trivial, only the ESZ and + * EPR bits need to be written out to PTEA, with the remainder of + * the protection bits (with the exception of the compat-mode SZ + * and PR bits, which are cleared) being written out in PTEL. + */ + __raw_writel(pte.pte_high, MMU_PTEA); +#else + /* TODO: make this look less hacky */ + __raw_writel(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); +#endif + + /* Set PTEL register */ + pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ +#ifdef CONFIG_CACHE_WRITETHROUGH + pteval |= _PAGE_WT; +#endif + /* conveniently, we want all the software flags to be 0 anyway */ + __raw_writel(pteval, MMU_PTEL); + + /* Load the TLB */ + asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); + local_irq_restore(flags); +} + +/* + * While SH-X2 extended TLB mode splits out the memory-mapped I/UTLB + * data arrays, SH-X3 cores with PTEAEX split out the memory-mapped + * address arrays. In compat mode the second array is inaccessible, while + * in extended mode, the legacy 8-bit ASID field in address array 1 has + * undefined behaviour. + */ +void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, + unsigned long page) +{ + jump_to_uncached(); + __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT); + __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); + back_to_cached(); +} -- cgit From c54a43e90b80993b2e0772d678563cb2bc6a1b3b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 17 Mar 2009 17:58:33 +0900 Subject: sh: tlb-pteaex: Kill off legacy PTEA updates. While harmless, PTEA has different semantics on these parts, and is only used in extended TLB mode. Kill off the legacy support. Signed-off-by: Paul Mundt --- arch/sh/mm/tlb-pteaex.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index 5c9b2d781e08..2aab3ea934d7 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c @@ -64,9 +64,6 @@ void update_mmu_cache(struct vm_area_struct * vma, * and PR bits, which are cleared) being written out in PTEL. */ __raw_writel(pte.pte_high, MMU_PTEA); -#else - /* TODO: make this look less hacky */ - __raw_writel(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); #endif /* Set PTEL register */ -- cgit From 3a3b311ca375a37b29bb78b030f96bf97dee97f5 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 17 Mar 2009 17:59:31 +0900 Subject: sh: Update debugfs ASID dumping for 16-bit ASID support. Signed-off-by: Paul Mundt --- arch/sh/mm/asids-debugfs.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c index 8e912a15e94f..cd8c3bf39b5a 100644 --- a/arch/sh/mm/asids-debugfs.c +++ b/arch/sh/mm/asids-debugfs.c @@ -37,10 +37,8 @@ static int asids_seq_show(struct seq_file *file, void *iter) continue; if (p->mm) - seq_printf(file, "%5d : %02lx\n", pid, + seq_printf(file, "%5d : %04lx\n", pid, cpu_asid(smp_processor_id(), p->mm)); - else - seq_printf(file, "%5d : (none)\n", pid); } read_unlock(&tasklist_lock); -- cgit