From d8db9bc55a316d97d0acdf6ccc9f43b370acd358 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 27 Sep 2016 22:10:16 +0800 Subject: selftests/powerpc: Add Anton's null_syscall benchmark to the selftests Pull in a version of Anton's null_syscall benchmark: http://ozlabs.org/~anton/junkcode/null_syscall.c Suggested-by: Michael Ellerman Signed-off-by: Anton Blanchard Signed-off-by: Rui Teng Signed-off-by: Michael Ellerman --- .../selftests/powerpc/benchmarks/.gitignore | 3 +- .../testing/selftests/powerpc/benchmarks/Makefile | 2 +- .../selftests/powerpc/benchmarks/null_syscall.c | 157 +++++++++++++++++++++ 3 files changed, 160 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/powerpc/benchmarks/null_syscall.c diff --git a/tools/testing/selftests/powerpc/benchmarks/.gitignore b/tools/testing/selftests/powerpc/benchmarks/.gitignore index bce49ebd869e..04dc1e6ef2ce 100644 --- a/tools/testing/selftests/powerpc/benchmarks/.gitignore +++ b/tools/testing/selftests/powerpc/benchmarks/.gitignore @@ -1,4 +1,5 @@ gettimeofday context_switch mmap_bench -futex_bench \ No newline at end of file +futex_bench +null_syscall diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile index a9adfb7de78f..545077f98f72 100644 --- a/tools/testing/selftests/powerpc/benchmarks/Makefile +++ b/tools/testing/selftests/powerpc/benchmarks/Makefile @@ -1,4 +1,4 @@ -TEST_PROGS := gettimeofday context_switch mmap_bench futex_bench +TEST_PROGS := gettimeofday context_switch mmap_bench futex_bench null_syscall CFLAGS += -O2 diff --git a/tools/testing/selftests/powerpc/benchmarks/null_syscall.c b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c new file mode 100644 index 000000000000..ecc14d68e101 --- /dev/null +++ b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c @@ -0,0 +1,157 @@ +/* + * Test null syscall performance + * + * Copyright (C) 2009-2015 Anton Blanchard, IBM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define NR_LOOPS 10000000 + +#include +#include +#include +#include +#include +#include +#include +#include + +static volatile int soak_done; +unsigned long long clock_frequency; +unsigned long long timebase_frequency; +double timebase_multiplier; + +static inline unsigned long long mftb(void) +{ + unsigned long low; + + asm volatile("mftb %0" : "=r" (low)); + + return low; +} + +static void sigalrm_handler(int unused) +{ + soak_done = 1; +} + +/* + * Use a timer instead of busy looping on clock_gettime() so we don't + * pollute profiles with glibc and VDSO hits. + */ +static void cpu_soak_usecs(unsigned long usecs) +{ + struct itimerval val; + + memset(&val, 0, sizeof(val)); + val.it_value.tv_usec = usecs; + + signal(SIGALRM, sigalrm_handler); + setitimer(ITIMER_REAL, &val, NULL); + + while (1) { + if (soak_done) + break; + } + + signal(SIGALRM, SIG_DFL); +} + +/* + * This only works with recent kernels where cpufreq modifies + * /proc/cpuinfo dynamically. + */ +static void get_proc_frequency(void) +{ + FILE *f; + char line[128]; + char *p, *end; + unsigned long v; + double d; + char *override; + + /* Try to get out of low power/low frequency mode */ + cpu_soak_usecs(0.25 * 1000000); + + f = fopen("/proc/cpuinfo", "r"); + if (f == NULL) + return; + + timebase_frequency = 0; + + while (fgets(line, sizeof(line), f) != NULL) { + if (strncmp(line, "timebase", 8) == 0) { + p = strchr(line, ':'); + if (p != NULL) { + v = strtoull(p + 1, &end, 0); + if (end != p + 1) + timebase_frequency = v; + } + } + + if (((strncmp(line, "clock", 5) == 0) || + (strncmp(line, "cpu MHz", 7) == 0))) { + p = strchr(line, ':'); + if (p != NULL) { + d = strtod(p + 1, &end); + if (end != p + 1) { + /* Find fastest clock frequency */ + if ((d * 1000000ULL) > clock_frequency) + clock_frequency = d * 1000000ULL; + } + } + } + } + + fclose(f); + + override = getenv("FREQUENCY"); + if (override) + clock_frequency = strtoull(override, NULL, 10); + + if (timebase_frequency) + timebase_multiplier = (double)clock_frequency + / timebase_frequency; + else + timebase_multiplier = 1; +} + +static void do_null_syscall(unsigned long nr) +{ + unsigned long i; + + for (i = 0; i < nr; i++) + getppid(); +} + +#define TIME(A, STR) \ + +int main(void) +{ + unsigned long tb_start, tb_now; + struct timespec tv_start, tv_now; + unsigned long long elapsed_ns, elapsed_tb; + + get_proc_frequency(); + + clock_gettime(CLOCK_MONOTONIC, &tv_start); + tb_start = mftb(); + + do_null_syscall(NR_LOOPS); + + clock_gettime(CLOCK_MONOTONIC, &tv_now); + tb_now = mftb(); + + elapsed_ns = (tv_now.tv_sec - tv_start.tv_sec) * 1000000000ULL + + (tv_now.tv_nsec - tv_start.tv_nsec); + elapsed_tb = tb_now - tb_start; + + printf("%10.2f ns %10.2f cycles\n", (float)elapsed_ns / NR_LOOPS, + (float)elapsed_tb * timebase_multiplier / NR_LOOPS); + + return 0; +} -- cgit From fda0440d82d38e603cd713e997fdfedb97e0ef0c Mon Sep 17 00:00:00 2001 From: Rui Teng Date: Thu, 25 Aug 2016 14:31:10 +0800 Subject: powerpc: Remove suspect CONFIG_PPC_BOOK3E #ifdefs in nohash/64/pgtable.h There are three #ifdef CONFIG_PPC_BOOK3E sections in nohash/64/pgtable.h. And there should be no configurations possible which use nohash/64/pgtable.h but don't also enable CONFIG_PPC_BOOK3E. Suggested-by: Michael Ellerman Signed-off-by: Rui Teng Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/nohash/64/pgtable.h | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 653a1838469d..e9e540a804fc 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -26,15 +26,11 @@ #else #define PMD_CACHE_INDEX PMD_INDEX_SIZE #endif + /* * Define the address range of the kernel non-linear virtual area */ - -#ifdef CONFIG_PPC_BOOK3E #define KERN_VIRT_START ASM_CONST(0x8000000000000000) -#else -#define KERN_VIRT_START ASM_CONST(0xD000000000000000) -#endif #define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) /* @@ -43,11 +39,7 @@ * (we keep a quarter for the virtual memmap) */ #define VMALLOC_START KERN_VIRT_START -#ifdef CONFIG_PPC_BOOK3E #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2) -#else -#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) -#endif #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) /* @@ -85,12 +77,8 @@ * Defines the address of the vmemap area, in its own region on * hash table CPUs and after the vmalloc space on Book3E */ -#ifdef CONFIG_PPC_BOOK3E #define VMEMMAP_BASE VMALLOC_END #define VMEMMAP_END KERN_IO_START -#else -#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) -#endif #define vmemmap ((struct page *)VMEMMAP_BASE) -- cgit From f4329f2ecb149282fdfdd8830a936a56b1497a05 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 13 Oct 2016 14:43:52 +1100 Subject: powerpc/64s: Reduce exception alignment Exception handlers are aligned to 128 bytes (L1 cache) on 64s, which is overkill. It can reduce the icache footprint of any individual exception path. However taken as a whole, the expansion in icache footprint seems likely to be counter-productive and cause more total misses. Create IFETCH_ALIGN_SHIFT/BYTES, which should give optimal ifetch alignment with much more reasonable alignment. This saves 1792 bytes from head_64.o text with an allmodconfig build. Other subarchitectures should define appropriate IFETCH_ALIGN_SHIFT values if this becomes more widely used. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cache.h | 3 +++ arch/powerpc/include/asm/head-64.h | 8 ++++---- arch/powerpc/kernel/exceptions-64s.S | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index ffbafbf76b19..7657aa897a38 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -20,12 +20,15 @@ #endif #else /* CONFIG_PPC64 */ #define L1_CACHE_SHIFT 7 +#define IFETCH_ALIGN_SHIFT 4 /* POWER8,9 */ #endif #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define SMP_CACHE_BYTES L1_CACHE_BYTES +#define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT) + #if defined(__powerpc64__) && !defined(__ASSEMBLY__) struct ppc64_caches { u32 dsize; /* L1 d-cache size */ diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h index ab90c2fa1ea6..fca7033839a9 100644 --- a/arch/powerpc/include/asm/head-64.h +++ b/arch/powerpc/include/asm/head-64.h @@ -95,12 +95,12 @@ end_##sname: #define __FIXED_SECTION_ENTRY_BEGIN(sname, name, __align) \ USE_FIXED_SECTION(sname); \ - .align __align; \ + .balign __align; \ .global name; \ name: #define FIXED_SECTION_ENTRY_BEGIN(sname, name) \ - __FIXED_SECTION_ENTRY_BEGIN(sname, name, 0) + __FIXED_SECTION_ENTRY_BEGIN(sname, name, IFETCH_ALIGN_BYTES) #define FIXED_SECTION_ENTRY_BEGIN_LOCATION(sname, name, start) \ USE_FIXED_SECTION(sname); \ @@ -203,9 +203,9 @@ name: #define EXC_VIRT_END(name, start, end) \ FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, end) -#define EXC_COMMON_BEGIN(name) \ +#define EXC_COMMON_BEGIN(name) \ USE_TEXT_SECTION(); \ - .align 7; \ + .balign IFETCH_ALIGN_BYTES; \ .global name; \ DEFINE_FIXED_SYMBOL(name); \ name: diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 08ba447a4b3d..50f0c5ab7e02 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1403,7 +1403,7 @@ USE_TEXT_SECTION() /* * Hash table stuff */ - .align 7 + .balign IFETCH_ALIGN_BYTES do_hash_page: #ifdef CONFIG_PPC_STD_MMU_64 andis. r0,r4,0xa410 /* weird error? */ -- cgit From e3f2c6c39371c0e67c7fd6ebcb8b74ca27ce95d7 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 14 Oct 2016 15:58:28 +1100 Subject: powerpc/asm: Allow including ppc_asm.h in asm files There's no reason to #error if we include ppc_asm.h in asm files, the ifdef already prevents any problems. Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/ppc_asm.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index c73750b0d9fa..28ab87e5b739 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -10,9 +10,7 @@ #include #include -#ifndef __ASSEMBLY__ -#error __FILE__ should only be used in assembler files -#else +#ifdef __ASSEMBLY__ #define SZL (BITS_PER_LONG/8) @@ -779,5 +777,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) .long 0xa6037b7d; /* mtsrr1 r11 */ \ .long 0x2400004c /* rfid */ #endif /* !CONFIG_PPC_BOOK3E */ + #endif /* __ASSEMBLY__ */ + #endif /* _ASM_POWERPC_PPC_ASM_H */ -- cgit From 9f751b82b491d06c6438066b511d44fa4cc49168 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 26 Oct 2016 14:51:12 +1100 Subject: powerpc/module: Add support for R_PPC64_REL32 relocations We haven't seen these before, but the soon to be merged relative exception tables support causes them to be generated. Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/module_64.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 183368e008cf..bb1807184bad 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -652,6 +652,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, *location = value - (unsigned long)location; break; + case R_PPC64_REL32: + /* 32 bits relative (used by relative exception tables) */ + *(u32 *)location = value - (unsigned long)location; + break; + case R_PPC64_TOCSAVE: /* * Marker reloc indicates we don't have to save r2. -- cgit From 06236f4efb926aa433e2cb3e36e8467f4a0e42c0 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 1 Nov 2016 15:01:07 +1100 Subject: selftests/powerpc: Abort load_unaligned_zeropad on unhandled SEGV If the load unaligned zeropad test takes a SEGV which can't be handled, we increment segv_error, print the offending NIP and then return without taking any further action. In almost all cases this means we'll just take the SEGV again, and loop eternally spamming the console. Instead just abort(), it's a fatal error in the test. The test harness will notice that the child died and print a nice message for us. Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c index 6cae06117b55..cf7a4a114a90 100644 --- a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c +++ b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c @@ -73,7 +73,6 @@ extern char __stop___ex_table[]; #error implement UCONTEXT_NIA #endif -static int segv_error; static void segv_handler(int signr, siginfo_t *info, void *ptr) { @@ -95,7 +94,7 @@ static void segv_handler(int signr, siginfo_t *info, void *ptr) } printf("No exception table match for NIA %lx ADDR %lx\n", *ip, addr); - segv_error++; + abort(); } static void setup_segv_handler(void) @@ -145,8 +144,6 @@ static int test_body(void) for (i = 0; i < page_size; i++) FAIL_IF(do_one_test(mem_region+i, i)); - FAIL_IF(segv_error); - return 0; } -- cgit From 997e200182347d2cc7e37bc43eaafe249b4571b9 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 1 Nov 2016 15:01:18 +1100 Subject: selftests/powerpc: Fail load_unaligned_zeropad on miscompare If the result returned by load_unaligned_zeropad() doesn't match what we expect we should fail the test! Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c index cf7a4a114a90..cd7af4e1b65a 100644 --- a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c +++ b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c @@ -118,8 +118,10 @@ static int do_one_test(char *p, int page_offset) got = load_unaligned_zeropad(p); - if (should != got) + if (should != got) { printf("offset %u load_unaligned_zeropad returned 0x%lx, should be 0x%lx\n", page_offset, got, should); + return 1; + } return 0; } -- cgit From 24bfa6a9e0d4fe414dfc4ad06c93e10c4c37194e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 13 Oct 2016 16:42:53 +1100 Subject: powerpc: EX_TABLE macro for exception tables This macro is taken from s390, and allows more flexibility in changing exception table format. mpe: Put it in ppc_asm.h and only define one version using stringinfy_in_c(). Add some empty definitions and headers to keep the selftests happy. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/futex.h | 14 +- arch/powerpc/include/asm/io.h | 19 +- arch/powerpc/include/asm/ppc_asm.h | 10 + arch/powerpc/include/asm/uaccess.h | 25 +- arch/powerpc/include/asm/word-at-a-time.h | 6 +- arch/powerpc/lib/checksum_32.S | 47 ++-- arch/powerpc/lib/checksum_64.S | 20 +- arch/powerpc/lib/copy_32.S | 55 ++--- arch/powerpc/lib/copyuser_64.S | 271 ++++++++++----------- arch/powerpc/lib/copyuser_power7.S | 20 +- arch/powerpc/lib/ldstfp.S | 24 +- arch/powerpc/lib/sstep.c | 15 +- arch/powerpc/lib/string.S | 11 +- arch/powerpc/lib/string_64.S | 16 +- arch/powerpc/sysdev/fsl_rio.c | 6 +- arch/powerpc/sysdev/tsi108_pci.c | 6 +- .../selftests/powerpc/copyloops/asm/ppc_asm.h | 2 + .../selftests/powerpc/primitives/asm/firmware.h | 0 .../selftests/powerpc/primitives/asm/ppc_asm.h | 1 + .../selftests/powerpc/primitives/asm/processor.h | 0 .../selftests/powerpc/primitives/linux/stringify.h | 0 21 files changed, 241 insertions(+), 327 deletions(-) create mode 100644 tools/testing/selftests/powerpc/primitives/asm/firmware.h create mode 120000 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h create mode 100644 tools/testing/selftests/powerpc/primitives/asm/processor.h create mode 100644 tools/testing/selftests/powerpc/primitives/linux/stringify.h diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index 2a9cf845473b..eaada6c92344 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -23,10 +23,8 @@ "4: li %1,%3\n" \ "b 3b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - ".align 3\n" \ - PPC_LONG "1b,4b,2b,4b\n" \ - ".previous" \ + EX_TABLE(1b, 4b) \ + EX_TABLE(2b, 4b) \ : "=&r" (oldval), "=&r" (ret) \ : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ : "cr0", "memory") @@ -104,11 +102,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "3: .section .fixup,\"ax\"\n\ 4: li %0,%6\n\ b 3b\n\ - .previous\n\ - .section __ex_table,\"a\"\n\ - .align 3\n\ - " PPC_LONG "1b,4b,2b,4b\n\ - .previous" \ + .previous\n" + EX_TABLE(1b, 4b) + EX_TABLE(2b, 4b) : "+r" (ret), "=&r" (prev), "+m" (*uaddr) : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) : "cc", "memory"); diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index f6fda8482f60..5ed292431b5b 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -33,6 +33,7 @@ extern struct pci_dev *isa_bridge_pcidev; #include #include #include +#include #include @@ -458,13 +459,10 @@ static inline unsigned int name(unsigned int port) \ "5: li %0,-1\n" \ " b 4b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 2\n" \ - " .long 0b,5b\n" \ - " .long 1b,5b\n" \ - " .long 2b,5b\n" \ - " .long 3b,5b\n" \ - ".previous" \ + EX_TABLE(0b, 5b) \ + EX_TABLE(1b, 5b) \ + EX_TABLE(2b, 5b) \ + EX_TABLE(3b, 5b) \ : "=&r" (x) \ : "r" (port + _IO_BASE) \ : "memory"); \ @@ -479,11 +477,8 @@ static inline void name(unsigned int val, unsigned int port) \ "0:" op " %0,0,%1\n" \ "1: sync\n" \ "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 2\n" \ - " .long 0b,2b\n" \ - " .long 1b,2b\n" \ - ".previous" \ + EX_TABLE(0b, 2b) \ + EX_TABLE(1b, 2b) \ : : "r" (val), "r" (port + _IO_BASE) \ : "memory"); \ } diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 28ab87e5b739..6af8852d1f7f 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -780,4 +780,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) #endif /* __ASSEMBLY__ */ +/* + * Helper macro for exception table entries + */ +#define EX_TABLE(_fault, _target) \ + stringify_in_c(.section __ex_table,"a";)\ + PPC_LONG_ALIGN stringify_in_c(;) \ + PPC_LONG stringify_in_c(_fault;) \ + PPC_LONG stringify_in_c(_target;) \ + stringify_in_c(.previous) + #endif /* _ASM_POWERPC_PPC_ASM_H */ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index c266227fdd5b..e0b724619c4a 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -132,10 +133,7 @@ extern long __put_user_bad(void); "3: li %0,%3\n" \ " b 2b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - PPC_LONG_ALIGN "\n" \ - PPC_LONG "1b,3b\n" \ - ".previous" \ + EX_TABLE(1b, 3b) \ : "=r" (err) \ : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) @@ -152,11 +150,8 @@ extern long __put_user_bad(void); "4: li %0,%3\n" \ " b 3b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - PPC_LONG_ALIGN "\n" \ - PPC_LONG "1b,4b\n" \ - PPC_LONG "2b,4b\n" \ - ".previous" \ + EX_TABLE(1b, 4b) \ + EX_TABLE(2b, 4b) \ : "=r" (err) \ : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) #endif /* __powerpc64__ */ @@ -215,10 +210,7 @@ extern long __get_user_bad(void); " li %1,0\n" \ " b 2b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - PPC_LONG_ALIGN "\n" \ - PPC_LONG "1b,3b\n" \ - ".previous" \ + EX_TABLE(1b, 3b) \ : "=r" (err), "=r" (x) \ : "b" (addr), "i" (-EFAULT), "0" (err)) @@ -237,11 +229,8 @@ extern long __get_user_bad(void); " li %1+1,0\n" \ " b 3b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - PPC_LONG_ALIGN "\n" \ - PPC_LONG "1b,4b\n" \ - PPC_LONG "2b,4b\n" \ - ".previous" \ + EX_TABLE(1b, 4b) \ + EX_TABLE(2b, 4b) \ : "=r" (err), "=&r" (x) \ : "b" (addr), "i" (-EFAULT), "0" (err)) #endif /* __powerpc64__ */ diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index 4afe66aa1400..f3f4710d4ff5 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -7,6 +7,7 @@ #include #include +#include #ifdef __BIG_ENDIAN__ @@ -193,10 +194,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) #endif "b 2b\n" ".previous\n" - ".section __ex_table,\"a\"\n\t" - PPC_LONG_ALIGN "\n\t" - PPC_LONG "1b,3b\n" - ".previous" + EX_TABLE(1b, 3b) : [tmp] "=&b" (tmp), [offset] "=&r" (offset), [ret] "=&r" (ret) : [addr] "b" (addr), "m" (*(unsigned long *)addr)); diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S index ea29a5d67743..9a671c774b22 100644 --- a/arch/powerpc/lib/checksum_32.S +++ b/arch/powerpc/lib/checksum_32.S @@ -103,17 +103,14 @@ EXPORT_SYMBOL(__csum_partial) adde r12,r12,r10 #define CSUM_COPY_16_BYTES_EXCODE(n) \ -.section __ex_table,"a"; \ - .align 2; \ - .long 8 ## n ## 0b,src_error; \ - .long 8 ## n ## 1b,src_error; \ - .long 8 ## n ## 2b,src_error; \ - .long 8 ## n ## 3b,src_error; \ - .long 8 ## n ## 4b,dst_error; \ - .long 8 ## n ## 5b,dst_error; \ - .long 8 ## n ## 6b,dst_error; \ - .long 8 ## n ## 7b,dst_error; \ - .text + EX_TABLE(8 ## n ## 0b, src_error); \ + EX_TABLE(8 ## n ## 1b, src_error); \ + EX_TABLE(8 ## n ## 2b, src_error); \ + EX_TABLE(8 ## n ## 3b, src_error); \ + EX_TABLE(8 ## n ## 4b, dst_error); \ + EX_TABLE(8 ## n ## 5b, dst_error); \ + EX_TABLE(8 ## n ## 6b, dst_error); \ + EX_TABLE(8 ## n ## 7b, dst_error); .text .stabs "arch/powerpc/lib/",N_SO,0,0,0f @@ -263,14 +260,11 @@ dst_error: stw r0,0(r8) blr - .section __ex_table,"a" - .align 2 - .long 70b,src_error - .long 71b,dst_error - .long 72b,src_error - .long 73b,dst_error - .long 54b,dst_error - .text + EX_TABLE(70b, src_error); + EX_TABLE(71b, dst_error); + EX_TABLE(72b, src_error); + EX_TABLE(73b, dst_error); + EX_TABLE(54b, dst_error); /* * this stuff handles faults in the cacheline loop and branches to either @@ -291,12 +285,11 @@ dst_error: #endif #endif - .section __ex_table,"a" - .align 2 - .long 30b,src_error - .long 31b,dst_error - .long 40b,src_error - .long 41b,dst_error - .long 50b,src_error - .long 51b,dst_error + EX_TABLE(30b, src_error); + EX_TABLE(31b, dst_error); + EX_TABLE(40b, src_error); + EX_TABLE(41b, dst_error); + EX_TABLE(50b, src_error); + EX_TABLE(51b, dst_error); + EXPORT_SYMBOL(csum_partial_copy_generic) diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index fd9176671f9f..d0d311e108ff 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S @@ -182,34 +182,22 @@ EXPORT_SYMBOL(__csum_partial) .macro srcnr 100: - .section __ex_table,"a" - .align 3 - .llong 100b,.Lsrc_error_nr - .previous + EX_TABLE(100b,.Lsrc_error_nr) .endm .macro source 150: - .section __ex_table,"a" - .align 3 - .llong 150b,.Lsrc_error - .previous + EX_TABLE(150b,.Lsrc_error) .endm .macro dstnr 200: - .section __ex_table,"a" - .align 3 - .llong 200b,.Ldest_error_nr - .previous + EX_TABLE(200b,.Ldest_error_nr) .endm .macro dest 250: - .section __ex_table,"a" - .align 3 - .llong 250b,.Ldest_error - .previous + EX_TABLE(250b,.Ldest_error) .endm /* diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S index 40cce33b08d6..ff0d894d7ff9 100644 --- a/arch/powerpc/lib/copy_32.S +++ b/arch/powerpc/lib/copy_32.S @@ -49,17 +49,14 @@ 9 ## n ## 1: \ addi r5,r5,-(16 * n); \ b 105f; \ -.section __ex_table,"a"; \ - .align 2; \ - .long 8 ## n ## 0b,9 ## n ## 0b; \ - .long 8 ## n ## 1b,9 ## n ## 0b; \ - .long 8 ## n ## 2b,9 ## n ## 0b; \ - .long 8 ## n ## 3b,9 ## n ## 0b; \ - .long 8 ## n ## 4b,9 ## n ## 1b; \ - .long 8 ## n ## 5b,9 ## n ## 1b; \ - .long 8 ## n ## 6b,9 ## n ## 1b; \ - .long 8 ## n ## 7b,9 ## n ## 1b; \ - .text + EX_TABLE(8 ## n ## 0b,9 ## n ## 0b); \ + EX_TABLE(8 ## n ## 1b,9 ## n ## 0b); \ + EX_TABLE(8 ## n ## 2b,9 ## n ## 0b); \ + EX_TABLE(8 ## n ## 3b,9 ## n ## 0b); \ + EX_TABLE(8 ## n ## 4b,9 ## n ## 1b); \ + EX_TABLE(8 ## n ## 5b,9 ## n ## 1b); \ + EX_TABLE(8 ## n ## 6b,9 ## n ## 1b); \ + EX_TABLE(8 ## n ## 7b,9 ## n ## 1b) .text .stabs "arch/powerpc/lib/",N_SO,0,0,0f @@ -323,13 +320,10 @@ _GLOBAL(__copy_tofrom_user) 73: stwu r9,4(r6) bdnz 72b - .section __ex_table,"a" - .align 2 - .long 70b,100f - .long 71b,101f - .long 72b,102f - .long 73b,103f - .text + EX_TABLE(70b,100f) + EX_TABLE(71b,101f) + EX_TABLE(72b,102f) + EX_TABLE(73b,103f) 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */ clrlwi r5,r5,32-LG_CACHELINE_BYTES @@ -364,10 +358,7 @@ _GLOBAL(__copy_tofrom_user) 53: dcbt r3,r4 54: dcbz r11,r6 - .section __ex_table,"a" - .align 2 - .long 54b,105f - .text + EX_TABLE(54b,105f) /* the main body of the cacheline loop */ COPY_16_BYTES_WITHEX(0) #if L1_CACHE_BYTES >= 32 @@ -500,15 +491,13 @@ _GLOBAL(__copy_tofrom_user) bdnz 114b 120: blr - .section __ex_table,"a" - .align 2 - .long 30b,108b - .long 31b,109b - .long 40b,110b - .long 41b,111b - .long 130b,132b - .long 131b,120b - .long 112b,120b - .long 114b,120b - .text + EX_TABLE(30b,108b) + EX_TABLE(31b,109b) + EX_TABLE(40b,110b) + EX_TABLE(41b,111b) + EX_TABLE(130b,132b) + EX_TABLE(131b,120b) + EX_TABLE(112b,120b) + EX_TABLE(114b,120b) + EXPORT_SYMBOL(__copy_tofrom_user) diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index 60386b2c99bb..aee6e24e81ab 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -394,70 +394,66 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 192: blr /* #bytes not copied in r3 */ - .section __ex_table,"a" - .align 3 - .llong 20b,120b - .llong 220b,320b - .llong 21b,121b - .llong 221b,321b - .llong 70b,170b - .llong 270b,370b - .llong 22b,122b - .llong 222b,322b - .llong 71b,171b - .llong 271b,371b - .llong 72b,172b - .llong 272b,372b - .llong 244b,344b - .llong 245b,345b - .llong 23b,123b - .llong 73b,173b - .llong 44b,144b - .llong 74b,174b - .llong 45b,145b - .llong 75b,175b - .llong 24b,124b - .llong 25b,125b - .llong 26b,126b - .llong 27b,127b - .llong 28b,128b - .llong 29b,129b - .llong 30b,130b - .llong 31b,131b - .llong 32b,132b - .llong 76b,176b - .llong 33b,133b - .llong 77b,177b - .llong 78b,178b - .llong 79b,179b - .llong 80b,180b - .llong 34b,134b - .llong 94b,194b - .llong 95b,195b - .llong 96b,196b - .llong 35b,135b - .llong 81b,181b - .llong 36b,136b - .llong 82b,182b - .llong 37b,137b - .llong 83b,183b - .llong 38b,138b - .llong 39b,139b - .llong 84b,184b - .llong 85b,185b - .llong 40b,140b - .llong 86b,186b - .llong 41b,141b - .llong 87b,187b - .llong 42b,142b - .llong 88b,188b - .llong 43b,143b - .llong 89b,189b - .llong 90b,190b - .llong 91b,191b - .llong 92b,192b - - .text + EX_TABLE(20b,120b) + EX_TABLE(220b,320b) + EX_TABLE(21b,121b) + EX_TABLE(221b,321b) + EX_TABLE(70b,170b) + EX_TABLE(270b,370b) + EX_TABLE(22b,122b) + EX_TABLE(222b,322b) + EX_TABLE(71b,171b) + EX_TABLE(271b,371b) + EX_TABLE(72b,172b) + EX_TABLE(272b,372b) + EX_TABLE(244b,344b) + EX_TABLE(245b,345b) + EX_TABLE(23b,123b) + EX_TABLE(73b,173b) + EX_TABLE(44b,144b) + EX_TABLE(74b,174b) + EX_TABLE(45b,145b) + EX_TABLE(75b,175b) + EX_TABLE(24b,124b) + EX_TABLE(25b,125b) + EX_TABLE(26b,126b) + EX_TABLE(27b,127b) + EX_TABLE(28b,128b) + EX_TABLE(29b,129b) + EX_TABLE(30b,130b) + EX_TABLE(31b,131b) + EX_TABLE(32b,132b) + EX_TABLE(76b,176b) + EX_TABLE(33b,133b) + EX_TABLE(77b,177b) + EX_TABLE(78b,178b) + EX_TABLE(79b,179b) + EX_TABLE(80b,180b) + EX_TABLE(34b,134b) + EX_TABLE(94b,194b) + EX_TABLE(95b,195b) + EX_TABLE(96b,196b) + EX_TABLE(35b,135b) + EX_TABLE(81b,181b) + EX_TABLE(36b,136b) + EX_TABLE(82b,182b) + EX_TABLE(37b,137b) + EX_TABLE(83b,183b) + EX_TABLE(38b,138b) + EX_TABLE(39b,139b) + EX_TABLE(84b,184b) + EX_TABLE(85b,185b) + EX_TABLE(40b,140b) + EX_TABLE(86b,186b) + EX_TABLE(41b,141b) + EX_TABLE(87b,187b) + EX_TABLE(42b,142b) + EX_TABLE(88b,188b) + EX_TABLE(43b,143b) + EX_TABLE(89b,189b) + EX_TABLE(90b,190b) + EX_TABLE(91b,191b) + EX_TABLE(92b,192b) /* * Routine to copy a whole page of data, optimized for POWER4. @@ -598,78 +594,77 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) li r5,4096 b .Ldst_aligned - .section __ex_table,"a" - .align 3 - .llong 20b,100b - .llong 21b,100b - .llong 22b,100b - .llong 23b,100b - .llong 24b,100b - .llong 25b,100b - .llong 26b,100b - .llong 27b,100b - .llong 28b,100b - .llong 29b,100b - .llong 30b,100b - .llong 31b,100b - .llong 32b,100b - .llong 33b,100b - .llong 34b,100b - .llong 35b,100b - .llong 36b,100b - .llong 37b,100b - .llong 38b,100b - .llong 39b,100b - .llong 40b,100b - .llong 41b,100b - .llong 42b,100b - .llong 43b,100b - .llong 44b,100b - .llong 45b,100b - .llong 46b,100b - .llong 47b,100b - .llong 48b,100b - .llong 49b,100b - .llong 50b,100b - .llong 51b,100b - .llong 52b,100b - .llong 53b,100b - .llong 54b,100b - .llong 55b,100b - .llong 56b,100b - .llong 57b,100b - .llong 58b,100b - .llong 59b,100b - .llong 60b,100b - .llong 61b,100b - .llong 62b,100b - .llong 63b,100b - .llong 64b,100b - .llong 65b,100b - .llong 66b,100b - .llong 67b,100b - .llong 68b,100b - .llong 69b,100b - .llong 70b,100b - .llong 71b,100b - .llong 72b,100b - .llong 73b,100b - .llong 74b,100b - .llong 75b,100b - .llong 76b,100b - .llong 77b,100b - .llong 78b,100b - .llong 79b,100b - .llong 80b,100b - .llong 81b,100b - .llong 82b,100b - .llong 83b,100b - .llong 84b,100b - .llong 85b,100b - .llong 86b,100b - .llong 87b,100b - .llong 88b,100b - .llong 89b,100b - .llong 90b,100b - .llong 91b,100b + EX_TABLE(20b,100b) + EX_TABLE(21b,100b) + EX_TABLE(22b,100b) + EX_TABLE(23b,100b) + EX_TABLE(24b,100b) + EX_TABLE(25b,100b) + EX_TABLE(26b,100b) + EX_TABLE(27b,100b) + EX_TABLE(28b,100b) + EX_TABLE(29b,100b) + EX_TABLE(30b,100b) + EX_TABLE(31b,100b) + EX_TABLE(32b,100b) + EX_TABLE(33b,100b) + EX_TABLE(34b,100b) + EX_TABLE(35b,100b) + EX_TABLE(36b,100b) + EX_TABLE(37b,100b) + EX_TABLE(38b,100b) + EX_TABLE(39b,100b) + EX_TABLE(40b,100b) + EX_TABLE(41b,100b) + EX_TABLE(42b,100b) + EX_TABLE(43b,100b) + EX_TABLE(44b,100b) + EX_TABLE(45b,100b) + EX_TABLE(46b,100b) + EX_TABLE(47b,100b) + EX_TABLE(48b,100b) + EX_TABLE(49b,100b) + EX_TABLE(50b,100b) + EX_TABLE(51b,100b) + EX_TABLE(52b,100b) + EX_TABLE(53b,100b) + EX_TABLE(54b,100b) + EX_TABLE(55b,100b) + EX_TABLE(56b,100b) + EX_TABLE(57b,100b) + EX_TABLE(58b,100b) + EX_TABLE(59b,100b) + EX_TABLE(60b,100b) + EX_TABLE(61b,100b) + EX_TABLE(62b,100b) + EX_TABLE(63b,100b) + EX_TABLE(64b,100b) + EX_TABLE(65b,100b) + EX_TABLE(66b,100b) + EX_TABLE(67b,100b) + EX_TABLE(68b,100b) + EX_TABLE(69b,100b) + EX_TABLE(70b,100b) + EX_TABLE(71b,100b) + EX_TABLE(72b,100b) + EX_TABLE(73b,100b) + EX_TABLE(74b,100b) + EX_TABLE(75b,100b) + EX_TABLE(76b,100b) + EX_TABLE(77b,100b) + EX_TABLE(78b,100b) + EX_TABLE(79b,100b) + EX_TABLE(80b,100b) + EX_TABLE(81b,100b) + EX_TABLE(82b,100b) + EX_TABLE(83b,100b) + EX_TABLE(84b,100b) + EX_TABLE(85b,100b) + EX_TABLE(86b,100b) + EX_TABLE(87b,100b) + EX_TABLE(88b,100b) + EX_TABLE(89b,100b) + EX_TABLE(90b,100b) + EX_TABLE(91b,100b) + EXPORT_SYMBOL(__copy_tofrom_user) diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index da0c568d18c4..a24b4039352c 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -29,35 +29,23 @@ .macro err1 100: - .section __ex_table,"a" - .align 3 - .llong 100b,.Ldo_err1 - .previous + EX_TABLE(100b,.Ldo_err1) .endm .macro err2 200: - .section __ex_table,"a" - .align 3 - .llong 200b,.Ldo_err2 - .previous + EX_TABLE(200b,.Ldo_err2) .endm #ifdef CONFIG_ALTIVEC .macro err3 300: - .section __ex_table,"a" - .align 3 - .llong 300b,.Ldo_err3 - .previous + EX_TABLE(300b,.Ldo_err3) .endm .macro err4 400: - .section __ex_table,"a" - .align 3 - .llong 400b,.Ldo_err4 - .previous + EX_TABLE(400b,.Ldo_err4) .endm diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S index 5d0cdbfbe3f2..a58777c1b2cb 100644 --- a/arch/powerpc/lib/ldstfp.S +++ b/arch/powerpc/lib/ldstfp.S @@ -21,18 +21,12 @@ #define STKFRM (PPC_MIN_STKFRM + 16) - .macro extab instr,handler - .section __ex_table,"a" - PPC_LONG \instr,\handler - .previous - .endm - .macro inst32 op reg = 0 .rept 32 20: \op reg,0,r4 b 3f - extab 20b,99f + EX_TABLE(20b,99f) reg = reg + 1 .endr .endm @@ -100,7 +94,7 @@ _GLOBAL(do_lfs) mr r3,r9 addi r1,r1,STKFRM blr - extab 2b,3b + EX_TABLE(2b,3b) /* Load FP reg N from double at *p. N is in r3, p in r4. */ _GLOBAL(do_lfd) @@ -127,7 +121,7 @@ _GLOBAL(do_lfd) mr r3,r9 addi r1,r1,STKFRM blr - extab 2b,3b + EX_TABLE(2b,3b) /* Store FP reg N to float at *p. N is in r3, p in r4. */ _GLOBAL(do_stfs) @@ -154,7 +148,7 @@ _GLOBAL(do_stfs) mr r3,r9 addi r1,r1,STKFRM blr - extab 2b,3b + EX_TABLE(2b,3b) /* Store FP reg N to double at *p. N is in r3, p in r4. */ _GLOBAL(do_stfd) @@ -181,7 +175,7 @@ _GLOBAL(do_stfd) mr r3,r9 addi r1,r1,STKFRM blr - extab 2b,3b + EX_TABLE(2b,3b) #ifdef CONFIG_ALTIVEC /* Get the contents of vrN into v0; N is in r3. */ @@ -248,7 +242,7 @@ _GLOBAL(do_lvx) mr r3,r9 addi r1,r1,STKFRM blr - extab 2b,3b + EX_TABLE(2b,3b) /* Store vector reg N to *p. N is in r3, p in r4. */ _GLOBAL(do_stvx) @@ -276,7 +270,7 @@ _GLOBAL(do_stvx) mr r3,r9 addi r1,r1,STKFRM blr - extab 2b,3b + EX_TABLE(2b,3b) #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX @@ -344,7 +338,7 @@ _GLOBAL(do_lxvd2x) mr r3,r9 addi r1,r1,STKFRM blr - extab 2b,3b + EX_TABLE(2b,3b) /* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */ _GLOBAL(do_stxvd2x) @@ -372,7 +366,7 @@ _GLOBAL(do_stxvd2x) mr r3,r9 addi r1,r1,STKFRM blr - extab 2b,3b + EX_TABLE(2b,3b) #endif /* CONFIG_VSX */ diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 3362299b1859..b64287c6793f 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -493,10 +493,7 @@ static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long), "3: li %0,%4\n" \ " b 2b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - PPC_LONG_ALIGN "\n" \ - PPC_LONG "1b,3b\n" \ - ".previous" \ + EX_TABLE(1b, 3b) \ : "=r" (err), "=r" (cr) \ : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)) @@ -508,10 +505,7 @@ static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long), "3: li %0,%3\n" \ " b 2b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - PPC_LONG_ALIGN "\n" \ - PPC_LONG "1b,3b\n" \ - ".previous" \ + EX_TABLE(1b, 3b) \ : "=r" (err), "=r" (x) \ : "r" (addr), "i" (-EFAULT), "0" (err)) @@ -523,10 +517,7 @@ static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long), "3: li %0,%3\n" \ " b 2b\n" \ ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - PPC_LONG_ALIGN "\n" \ - PPC_LONG "1b,3b\n" \ - ".previous" \ + EX_TABLE(1b, 3b) \ : "=r" (err) \ : "r" (addr), "i" (-EFAULT), "0" (err)) diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S index d13e07603519..a787776822d8 100644 --- a/arch/powerpc/lib/string.S +++ b/arch/powerpc/lib/string.S @@ -13,8 +13,6 @@ #include #include - .section __ex_table,"a" - PPC_LONG_ALIGN .text /* This clears out any unused part of the destination buffer, @@ -125,10 +123,9 @@ _GLOBAL(__clear_user) 92: mfctr r3 blr - .section __ex_table,"a" - PPC_LONG 11b,90b - PPC_LONG 1b,91b - PPC_LONG 8b,92b - .text + EX_TABLE(11b, 90b) + EX_TABLE(1b, 91b) + EX_TABLE(8b, 92b) + EXPORT_SYMBOL(__clear_user) #endif diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S index 57ace356c949..c100f4d5d5d0 100644 --- a/arch/powerpc/lib/string_64.S +++ b/arch/powerpc/lib/string_64.S @@ -19,6 +19,7 @@ */ #include +#include #include #include @@ -41,26 +42,17 @@ PPC64_CACHES: .macro err1 100: - .section __ex_table,"a" - .align 3 - .llong 100b,.Ldo_err1 - .previous + EX_TABLE(100b,.Ldo_err1) .endm .macro err2 200: - .section __ex_table,"a" - .align 3 - .llong 200b,.Ldo_err2 - .previous + EX_TABLE(200b,.Ldo_err2) .endm .macro err3 300: - .section __ex_table,"a" - .align 3 - .llong 300b,.Ldo_err3 - .previous + EX_TABLE(300b,.Ldo_err3) .endm .Ldo_err1: diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 3cc7cace194a..87fee0c8eb21 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -80,10 +80,8 @@ "3: li %1,-1\n" \ " li %0,%3\n" \ " b 2b\n" \ - ".section __ex_table,\"a\"\n" \ - PPC_LONG_ALIGN "\n" \ - PPC_LONG "1b,3b\n" \ - ".text" \ + ".previous\n" \ + EX_TABLE(1b, 3b) \ : "=r" (err), "=r" (x) \ : "b" (addr), "i" (-EFAULT), "0" (err)) diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c index 57c971b7839c..53a16aa4d384 100644 --- a/arch/powerpc/sysdev/tsi108_pci.c +++ b/arch/powerpc/sysdev/tsi108_pci.c @@ -137,10 +137,8 @@ void tsi108_clear_pci_error(u32 pci_cfg_base) ".section .fixup,\"ax\"\n" \ "3: li %0,-1\n" \ " b 2b\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 2\n" \ - " .long 1b,3b\n" \ - ".text" \ + ".previous\n" \ + EX_TABLE(1b, 3b) \ : "=r"(x) : "r"(addr)) int diff --git a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h index 50ae7d2091ce..80d34a9ffff4 100644 --- a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h +++ b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h @@ -25,6 +25,8 @@ #define PPC_MTOCRF(A, B) mtocrf A, B +#define EX_TABLE(x, y) + FUNC_START(enter_vmx_usercopy) li r3,1 blr diff --git a/tools/testing/selftests/powerpc/primitives/asm/firmware.h b/tools/testing/selftests/powerpc/primitives/asm/firmware.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h b/tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h new file mode 120000 index 000000000000..66c8193224e9 --- /dev/null +++ b/tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h @@ -0,0 +1 @@ +../../../../../../arch/powerpc/include/asm/ppc_asm.h \ No newline at end of file diff --git a/tools/testing/selftests/powerpc/primitives/asm/processor.h b/tools/testing/selftests/powerpc/primitives/asm/processor.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tools/testing/selftests/powerpc/primitives/linux/stringify.h b/tools/testing/selftests/powerpc/primitives/linux/stringify.h new file mode 100644 index 000000000000..e69de29bb2d1 -- cgit From 61a92f703120daf7ed25e046275aa8a2d3085ad4 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 14 Oct 2016 16:47:31 +1100 Subject: powerpc: Add support for relative exception tables This halves the exception table size on 64-bit builds, and it allows build-time sorting of exception tables to work on relocated kernels. Signed-off-by: Nicholas Piggin [mpe: Minor asm fixups and bits to keep the selftests working] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/ppc_asm.h | 6 ++--- arch/powerpc/include/asm/uaccess.h | 27 ++++++++++++++-------- arch/powerpc/kernel/kprobes.c | 2 +- arch/powerpc/kernel/traps.c | 2 +- arch/powerpc/mm/fault.c | 2 +- arch/powerpc/platforms/embedded6xx/holly.c | 2 +- arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c | 2 +- arch/powerpc/sysdev/fsl_rio.c | 2 +- .../powerpc/primitives/load_unaligned_zeropad.c | 12 ++++++---- 9 files changed, 34 insertions(+), 23 deletions(-) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 6af8852d1f7f..bf9de5575ca9 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -785,9 +785,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) */ #define EX_TABLE(_fault, _target) \ stringify_in_c(.section __ex_table,"a";)\ - PPC_LONG_ALIGN stringify_in_c(;) \ - PPC_LONG stringify_in_c(_fault;) \ - PPC_LONG stringify_in_c(_target;) \ + stringify_in_c(.balign 4;) \ + stringify_in_c(.long (_fault) - . ;) \ + stringify_in_c(.long (_target) - . ;) \ stringify_in_c(.previous) #endif /* _ASM_POWERPC_PPC_ASM_H */ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index e0b724619c4a..a15d84d59356 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -64,23 +64,30 @@ __access_ok((__force unsigned long)(addr), (size), get_fs())) /* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is + * The exception table consists of pairs of relative addresses: the first is + * the address of an instruction that is allowed to fault, and the second is * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. + * modified, so it is entirely up to the continuation code to figure out what + * to do. * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. + * All the routines below use bits of fixup code that are out of line with the + * main instruction path. This means when everything is well, we don't even + * have to jump over them. Further, they do not intrude on our cache or tlb + * entries. */ +#define ARCH_HAS_RELATIVE_EXTABLE + struct exception_table_entry { - unsigned long insn; - unsigned long fixup; + int insn; + int fixup; }; +static inline unsigned long extable_fixup(const struct exception_table_entry *x) +{ + return (unsigned long)&x->fixup + x->fixup; +} + /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index e785cc9e1ecd..9479d8e360cf 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -449,7 +449,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) * zero, try to fix up. */ if ((entry = search_exception_tables(regs->nip)) != NULL) { - regs->nip = entry->fixup; + regs->nip = extable_fixup(entry); return 1; } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 023a462725b5..32c468b8b548 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -365,7 +365,7 @@ static inline int check_io_access(struct pt_regs *regs) (*nip & 0x100)? "OUT to": "IN from", regs->gpr[rb] - _IO_BASE, nip); regs->msr |= MSR_RI; - regs->nip = entry->fixup; + regs->nip = extable_fixup(entry); return 1; } } diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index d0b137d96df1..73932f4a386e 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -512,7 +512,7 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) /* Are we prepared to handle this fault? */ if ((entry = search_exception_tables(regs->nip)) != NULL) { - regs->nip = entry->fixup; + regs->nip = extable_fixup(entry); return; } diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c index dfd310031549..0409714e8070 100644 --- a/arch/powerpc/platforms/embedded6xx/holly.c +++ b/arch/powerpc/platforms/embedded6xx/holly.c @@ -263,7 +263,7 @@ static int ppc750_machine_check_exception(struct pt_regs *regs) if ((entry = search_exception_tables(regs->nip)) != NULL) { tsi108_clear_pci_cfg_error(); regs->msr |= MSR_RI; - regs->nip = entry->fixup; + regs->nip = extable_fixup(entry); return 1; } return 0; diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index f97bab8e37a2..9de100e22bf3 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c @@ -174,7 +174,7 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs) if ((entry = search_exception_tables(regs->nip)) != NULL) { tsi108_clear_pci_cfg_error(); regs->msr |= MSR_RI; - regs->nip = entry->fixup; + regs->nip = extable_fixup(entry); return 1; } return 0; diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 87fee0c8eb21..1c41c51f22cb 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -111,7 +111,7 @@ int fsl_rio_mcheck_exception(struct pt_regs *regs) out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); regs->msr |= MSR_RI; - regs->nip = entry->fixup; + regs->nip = extable_fixup(entry); return 1; } } diff --git a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c index cd7af4e1b65a..ed3239bbfae2 100644 --- a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c +++ b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c @@ -73,19 +73,23 @@ extern char __stop___ex_table[]; #error implement UCONTEXT_NIA #endif +struct extbl_entry { + int insn; + int fixup; +}; static void segv_handler(int signr, siginfo_t *info, void *ptr) { ucontext_t *uc = (ucontext_t *)ptr; unsigned long addr = (unsigned long)info->si_addr; unsigned long *ip = &UCONTEXT_NIA(uc); - unsigned long *ex_p = (unsigned long *)__start___ex_table; + struct extbl_entry *entry = (struct extbl_entry *)__start___ex_table; - while (ex_p < (unsigned long *)__stop___ex_table) { + while (entry < (struct extbl_entry *)__stop___ex_table) { unsigned long insn, fixup; - insn = *ex_p++; - fixup = *ex_p++; + insn = (unsigned long)&entry->insn + entry->insn; + fixup = (unsigned long)&entry->fixup + entry->fixup; if (insn == *ip) { *ip = fixup; -- cgit From 5b9ff027859868efd63cdbbff5d30182d4cca50a Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 13 Oct 2016 16:42:55 +1100 Subject: powerpc: Build-time sort the exception table Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/module.h | 4 ---- scripts/sortextable.c | 2 ++ 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 65fba4c34cd7..8567c6809240 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -80,6 +80,7 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK config PPC bool default y + select BUILDTIME_EXTABLE_SORT select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select BINFMT_ELF diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index cd4ffd86765f..cc12c61ef315 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -90,10 +90,6 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec } #endif -struct exception_table_entry; -void sort_ex_table(struct exception_table_entry *start, - struct exception_table_entry *finish); - #if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64) #define ARCH_RELOCATES_KCRCTAB #define reloc_start PHYSICAL_START diff --git a/scripts/sortextable.c b/scripts/sortextable.c index f453b7ce99d6..365a907f98b3 100644 --- a/scripts/sortextable.c +++ b/scripts/sortextable.c @@ -316,6 +316,8 @@ do_file(char const *const fname) case EM_S390: case EM_AARCH64: case EM_PARISC: + case EM_PPC: + case EM_PPC64: custom_sort = sort_relative_table; break; case EM_ARCOMPACT: -- cgit From c0a5149105ab5f76f7c5a8fc2eb3d79fe3de6582 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 13 Oct 2016 13:07:14 +1100 Subject: powerpc: Make _ASM_NOKPROBE_SYMBOL a noop when KPROBES not defined Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/ppc_asm.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index bf9de5575ca9..025833b8df9f 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -263,10 +263,14 @@ n: * latter is for those that incdentially must be excluded from probing * and allows them to be linked at more optimal location within text. */ +#ifdef CONFIG_KPROBES #define _ASM_NOKPROBE_SYMBOL(entry) \ .pushsection "_kprobe_blacklist","aw"; \ PPC_LONG (entry) ; \ .popsection +#else +#define _ASM_NOKPROBE_SYMBOL(entry) +#endif #define FUNC_START(name) _GLOBAL(name) #define FUNC_END(name) -- cgit From 9e607f72748ddc5620aeeb8d1f32f30c79b360b9 Mon Sep 17 00:00:00 2001 From: Denis Kirjanov Date: Wed, 21 Sep 2016 14:34:58 +0300 Subject: i2c_powermac: shut up lockdep warning That's unclear why lockdep shows the following warning but adding a lockdep class to struct pmac_i2c_bus solves it [ 20.507795] ====================================================== [ 20.507796] [ INFO: possible circular locking dependency detected ] [ 20.507800] 4.8.0-rc7-00037-gd2ffb01 #21 Not tainted [ 20.507801] ------------------------------------------------------- [ 20.507803] swapper/0/1 is trying to acquire lock: [ 20.507818] (&bus->mutex){+.+.+.}, at: [] .pmac_i2c_open+0x30/0x100 [ 20.507819] [ 20.507819] but task is already holding lock: [ 20.507829] (&policy->rwsem){+.+.+.}, at: [] .cpufreq_online+0x1ac/0x9d0 [ 20.507830] [ 20.507830] which lock already depends on the new lock. [ 20.507830] [ 20.507832] [ 20.507832] the existing dependency chain (in reverse order) is: [ 20.507837] [ 20.507837] -> #4 (&policy->rwsem){+.+.+.}: [ 20.507844] [] .down_write+0x6c/0x110 [ 20.507849] [] .cpufreq_online+0x1ac/0x9d0 [ 20.507855] [] .subsys_interface_register+0xb8/0x110 [ 20.507860] [] .cpufreq_register_driver+0x1d0/0x250 [ 20.507866] [] .g5_cpufreq_init+0x9cc/0xa28 [ 20.507872] [] .do_one_initcall+0x5c/0x1d0 [ 20.507878] [] .kernel_init_freeable+0x1ac/0x28c [ 20.507883] [] .kernel_init+0x1c/0x140 [ 20.507887] [] .ret_from_kernel_thread+0x58/0x64 [ 20.507894] [ 20.507894] -> #3 (subsys mutex#2){+.+.+.}: [ 20.507899] [] .mutex_lock_nested+0xa8/0x590 [ 20.507903] [] .bus_probe_device+0x44/0xe0 [ 20.507907] [] .device_add+0x508/0x730 [ 20.507911] [] .register_cpu+0x118/0x190 [ 20.507916] [] .topology_init+0x148/0x248 [ 20.507921] [] .do_one_initcall+0x5c/0x1d0 [ 20.507925] [] .kernel_init_freeable+0x1ac/0x28c [ 20.507929] [] .kernel_init+0x1c/0x140 [ 20.507934] [] .ret_from_kernel_thread+0x58/0x64 [ 20.507939] [ 20.507939] -> #2 (cpu_add_remove_lock){+.+.+.}: [ 20.507944] [] .mutex_lock_nested+0xa8/0x590 [ 20.507950] [] .register_cpu_notifier+0x2c/0x70 [ 20.507955] [] .spawn_ksoftirqd+0x18/0x4c [ 20.507959] [] .do_one_initcall+0x5c/0x1d0 [ 20.507964] [] .kernel_init_freeable+0xb0/0x28c [ 20.507968] [] .kernel_init+0x1c/0x140 [ 20.507972] [] .ret_from_kernel_thread+0x58/0x64 [ 20.507978] [ 20.507978] -> #1 (&host->mutex){+.+.+.}: [ 20.507982] [] .mutex_lock_nested+0xa8/0x590 [ 20.507987] [] .kw_i2c_open+0x18/0x30 [ 20.507991] [] .pmac_i2c_open+0x94/0x100 [ 20.507995] [] .smp_core99_probe+0x260/0x410 [ 20.507999] [] .smp_prepare_cpus+0x280/0x2ac [ 20.508003] [] .kernel_init_freeable+0x88/0x28c [ 20.508008] [] .kernel_init+0x1c/0x140 [ 20.508012] [] .ret_from_kernel_thread+0x58/0x64 [ 20.508018] [ 20.508018] -> #0 (&bus->mutex){+.+.+.}: [ 20.508023] [] .lock_acquire+0x84/0x100 [ 20.508027] [] .mutex_lock_nested+0xa8/0x590 [ 20.508032] [] .pmac_i2c_open+0x30/0x100 [ 20.508037] [] .pmac_i2c_do_begin+0x34/0x120 [ 20.508040] [] .pmf_call_one+0x50/0xd0 [ 20.508045] [] .g5_pfunc_switch_volt+0x2c/0xc0 [ 20.508050] [] .g5_pfunc_switch_freq+0x1cc/0x1f0 [ 20.508054] [] .g5_cpufreq_target+0x2c/0x40 [ 20.508058] [] .__cpufreq_driver_target+0x23c/0x840 [ 20.508062] [] .cpufreq_gov_performance_limits+0x18/0x30 [ 20.508067] [] .cpufreq_start_governor+0xac/0x100 [ 20.508071] [] .cpufreq_set_policy+0x208/0x260 [ 20.508076] [] .cpufreq_init_policy+0x6c/0xb0 [ 20.508081] [] .cpufreq_online+0x250/0x9d0 [ 20.508085] [] .subsys_interface_register+0xb8/0x110 [ 20.508090] [] .cpufreq_register_driver+0x1d0/0x250 [ 20.508094] [] .g5_cpufreq_init+0x9cc/0xa28 [ 20.508099] [] .do_one_initcall+0x5c/0x1d0 [ 20.508103] [] .kernel_init_freeable+0x1ac/0x28c [ 20.508107] [] .kernel_init+0x1c/0x140 [ 20.508112] [] .ret_from_kernel_thread+0x58/0x64 [ 20.508113] [ 20.508113] other info that might help us debug this: [ 20.508113] [ 20.508121] Chain exists of: [ 20.508121] &bus->mutex --> subsys mutex#2 --> &policy->rwsem [ 20.508121] [ 20.508123] Possible unsafe locking scenario: [ 20.508123] [ 20.508124] CPU0 CPU1 [ 20.508125] ---- ---- [ 20.508128] lock(&policy->rwsem); [ 20.508132] lock(subsys mutex#2); [ 20.508135] lock(&policy->rwsem); [ 20.508138] lock(&bus->mutex); [ 20.508139] [ 20.508139] *** DEADLOCK *** [ 20.508139] [ 20.508141] 3 locks held by swapper/0/1: [ 20.508150] #0: (cpu_hotplug.lock){++++++}, at: [] .get_online_cpus+0x48/0xc0 [ 20.508159] #1: (subsys mutex#2){+.+.+.}, at: [] .subsys_interface_register+0x50/0x110 [ 20.508168] #2: (&policy->rwsem){+.+.+.}, at: [] .cpufreq_online+0x1ac/0x9d0 [ 20.508169] [ 20.508169] stack backtrace: [ 20.508173] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.8.0-rc7-00037-gd2ffb01 #21 [ 20.508175] Call Trace: [ 20.508180] [c0000000790c2b90] [c00000000082cc70] .dump_stack+0xe0/0x14c (unreliable) [ 20.508184] [c0000000790c2c20] [c000000000828c88] .print_circular_bug+0x350/0x388 [ 20.508188] [c0000000790c2cd0] [c0000000000ecb0c] .__lock_acquire+0x196c/0x1d30 [ 20.508192] [c0000000790c2e50] [c0000000000ed5b4] .lock_acquire+0x84/0x100 [ 20.508196] [c0000000790c2f20] [c000000000820448] .mutex_lock_nested+0xa8/0x590 [ 20.508201] [c0000000790c3030] [c000000000052830] .pmac_i2c_open+0x30/0x100 [ 20.508206] [c0000000790c30c0] [c000000000052e14] .pmac_i2c_do_begin+0x34/0x120 [ 20.508209] [c0000000790c3150] [c000000000056bc0] .pmf_call_one+0x50/0xd0 [ 20.508213] [c0000000790c31e0] [c00000000068ff1c] .g5_pfunc_switch_volt+0x2c/0xc0 [ 20.508217] [c0000000790c3250] [c00000000068fecc] .g5_pfunc_switch_freq+0x1cc/0x1f0 [ 20.508221] [c0000000790c3320] [c00000000068fc2c] .g5_cpufreq_target+0x2c/0x40 [ 20.508226] [c0000000790c3390] [c0000000006873ec] .__cpufreq_driver_target+0x23c/0x840 [ 20.508230] [c0000000790c3440] [c00000000068c798] .cpufreq_gov_performance_limits+0x18/0x30 [ 20.508235] [c0000000790c34b0] [c00000000068915c] .cpufreq_start_governor+0xac/0x100 [ 20.508239] [c0000000790c3530] [c00000000068a788] .cpufreq_set_policy+0x208/0x260 [ 20.508244] [c0000000790c35d0] [c00000000068abdc] .cpufreq_init_policy+0x6c/0xb0 [ 20.508249] [c0000000790c3940] [c00000000068ae70] .cpufreq_online+0x250/0x9d0 [ 20.508253] [c0000000790c3a30] [c0000000004d76d8] .subsys_interface_register+0xb8/0x110 [ 20.508258] [c0000000790c3ad0] [c000000000689bb0] .cpufreq_register_driver+0x1d0/0x250 [ 20.508262] [c0000000790c3b60] [c000000000b4f8f4] .g5_cpufreq_init+0x9cc/0xa28 [ 20.508267] [c0000000790c3c20] [c00000000000a98c] .do_one_initcall+0x5c/0x1d0 [ 20.508271] [c0000000790c3d00] [c000000000b0f86c] .kernel_init_freeable+0x1ac/0x28c [ 20.508276] [c0000000790c3db0] [c00000000000b3bc] .kernel_init+0x1c/0x140 [ 20.508280] [c0000000790c3e30] [c0000000000098f4] .ret_from_kernel_thread+0x58/0x64 Signed-off-by: Denis Kirjanov Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powermac/low_i2c.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index c8c217b7dd33..f627c9fd7b48 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -90,6 +90,7 @@ struct pmac_i2c_bus int opened; int polled; /* open mode */ struct platform_device *platform_dev; + struct lock_class_key lock_key; /* ops */ int (*open)(struct pmac_i2c_bus *bus); @@ -587,6 +588,7 @@ static void __init kw_i2c_add(struct pmac_i2c_host_kw *host, bus->close = kw_i2c_close; bus->xfer = kw_i2c_xfer; mutex_init(&bus->mutex); + lockdep_set_class(&bus->mutex, &bus->lock_key); if (controller == busnode) bus->flags = pmac_i2c_multibus; list_add(&bus->link, &pmac_i2c_busses); @@ -815,6 +817,7 @@ static void __init pmu_i2c_probe(void) bus->hostdata = bus + 1; bus->xfer = pmu_i2c_xfer; mutex_init(&bus->mutex); + lockdep_set_class(&bus->mutex, &bus->lock_key); bus->flags = pmac_i2c_multibus; list_add(&bus->link, &pmac_i2c_busses); @@ -938,6 +941,7 @@ static void __init smu_i2c_probe(void) bus->hostdata = bus + 1; bus->xfer = smu_i2c_xfer; mutex_init(&bus->mutex); + lockdep_set_class(&bus->mutex, &bus->lock_key); bus->flags = 0; list_add(&bus->link, &pmac_i2c_busses); -- cgit From 5246adec59458b5d325b8e1462ea9ef3ead7f6ae Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sat, 1 Oct 2016 20:41:56 +1000 Subject: powerpc/pseries: Use H_CLEAR_HPT to clear MMU hash table during kexec An hcall was recently added that does exactly what we need during kexec - it clears the entire MMU hash table, ignoring any VRMA mappings. Try it and fall back to the old method if we get a failure. On a POWER8 box with 5TB of memory, this reduces the time it takes to kexec a new kernel from from 4 minutes to 1 minute. Signed-off-by: Anton Blanchard Tested-by: Mahesh Salgaonkar [mpe: Split into separate functions and tweak function naming] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/hvcall.h | 3 ++- arch/powerpc/platforms/pseries/lpar.c | 24 ++++++++++++++++++++++-- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 708edebcf147..489748ed21b4 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -275,7 +275,8 @@ #define H_COP 0x304 #define H_GET_MPP_X 0x314 #define H_SET_MODE 0x31C -#define MAX_HCALL_OPCODE H_SET_MODE +#define H_CLEAR_HPT 0x358 +#define MAX_HCALL_OPCODE H_CLEAR_HPT /* H_VIOCTL functions */ #define H_GET_VIOA_DUMP_SIZE 0x01 diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index aa35245d8d6d..24ad43afbb46 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -221,7 +221,7 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) return -1; } -static void pSeries_lpar_hptab_clear(void) +static void manual_hpte_clear_all(void) { unsigned long size_bytes = 1UL << ppc64_pft_size; unsigned long hpte_count = size_bytes >> 4; @@ -249,6 +249,26 @@ static void pSeries_lpar_hptab_clear(void) &(ptes[j].pteh), &(ptes[j].ptel)); } } +} + +static int hcall_hpte_clear_all(void) +{ + int rc; + + do { + rc = plpar_hcall_norets(H_CLEAR_HPT); + } while (rc == H_CONTINUE); + + return rc; +} + +static void pseries_hpte_clear_all(void) +{ + int rc; + + rc = hcall_hpte_clear_all(); + if (rc != H_SUCCESS) + manual_hpte_clear_all(); #ifdef __LITTLE_ENDIAN__ /* @@ -598,7 +618,7 @@ void __init hpte_init_pseries(void) mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove; mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted; mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; - mmu_hash_ops.hpte_clear_all = pSeries_lpar_hptab_clear; + mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; } -- cgit From 70839d207792aa348f013c733e8853c1bd3bc4f0 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 14 Oct 2016 18:31:33 +1100 Subject: powerpc/64: Add an option to force run-at-load to test relocation This adds a config option that can help exercise the case when the kernel is not running at PAGE_OFFSET. Signed-off-by: Nicholas Piggin Reviewed-by: Balbir Singh Signed-off-by: Michael Ellerman --- arch/powerpc/Kconfig | 9 +++++++++ arch/powerpc/kernel/head_64.S | 9 ++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 8567c6809240..b49062b060d2 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -479,6 +479,15 @@ config RELOCATABLE setting can still be useful to bootwrappers that need to know the load address of the kernel (eg. u-boot/mkimage). +config RELOCATABLE_TEST + bool "Test relocatable kernel" + depends on (PPC64 && RELOCATABLE) + default n + help + This runs the relocatable kernel at the address it was initially + loaded at, which tends to be non-zero and therefore test the + relocation code. + config CRASH_DUMP bool "Build a kdump crash kernel" depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 04c546e20cc0..451a8e1cf57b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -107,12 +107,19 @@ __secondary_hold_acknowledge: * crash_kernel region. The loader is responsible for * observing the alignment requirement. */ + +#ifdef CONFIG_RELOCATABLE_TEST +#define RUN_AT_LOAD_DEFAULT 1 /* Test relocation, do not copy to 0 */ +#else +#define RUN_AT_LOAD_DEFAULT 0x72756e30 /* "run0" -- relocate to 0 by default */ +#endif + /* Do not move this variable as kexec-tools knows about it. */ . = 0x5c .globl __run_at_load __run_at_load: DEFINE_FIXED_SYMBOL(__run_at_load) - .long 0x72756e30 /* "run0" -- relocate to 0 by default */ + .long RUN_AT_LOAD_DEFAULT #endif . = 0x60 -- cgit From 3a849815a13695ad0e32f6f01c90544922a72229 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 20 Oct 2016 16:00:14 +1100 Subject: powerpc/book3s64: Always build for power4 or later When we're not compiling for a specific CPU, ie. none of the CONFIG_POWERx_CPU options are set, and CONFIG_GENERIC_CPU *is* set, we currently don't pass any -mcpu option to the compiler. This means the compiler builds for a "generic" Power CPU. But back in 2014 we dropped support for pre power4 CPUs in commit 468a33028edd ("powerpc: Drop support for pre-POWER4 cpus"). Given that, there's no point in building the kernel to run on pre power4 cpus. So update the flags we pass to the compiler when CONFIG_GENERIC_CPU is set, to specify -mcpu=power4. Signed-off-by: Michael Ellerman --- arch/powerpc/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 617dece67924..041fda1e2a5d 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -121,6 +121,7 @@ CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) ifeq ($(CONFIG_PPC_BOOK3S_64),y) CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4) +CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4 else CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 endif -- cgit From f923efbcfdbaa4391874eeda676b08fcf9ad6e99 Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Wed, 28 Sep 2016 17:25:52 +1000 Subject: powerpc/hash64: Be more careful when generating tlbiel In ISA v2.05, the tlbiel instruction takes two arguments, RB and L: tlbiel RB,L +---------+---------+----+---------+---------+---------+----+ | 31 | / | L | / | RB | 274 | / | | 31 - 26 | 25 - 22 | 21 | 20 - 16 | 15 - 11 | 10 - 1 | 0 | +---------+---------+----+---------+---------+---------+----+ In ISA v2.06 tlbiel takes only one argument, RB: tlbiel RB +---------+---------+---------+---------+---------+----+ | 31 | / | / | RB | 274 | / | | 31 - 26 | 25 - 21 | 20 - 16 | 15 - 11 | 10 - 1 | 0 | +---------+---------+---------+---------+---------+----+ And in ISA v3.00 tlbiel takes five arguments: tlbiel RB,RS,RIC,PRS,R +---------+---------+----+---------+----+----+---------+---------+----+ | 31 | RS | / | RIC |PRS | R | RB | 274 | / | | 31 - 26 | 25 - 21 | 20 | 19 - 18 | 17 | 16 | 15 - 11 | 10 - 1 | 0 | +---------+---------+----+---------+----+----+---------+---------+----+ However the assembler also accepts "tlbiel RB", and generates "tlbiel RB,r0,0,0,0". As you can see above the L field from the v2.05 encoding overlaps with the reserved field of the v2.06 encoding, and the low bit of the RS field of the v3.00 encoding. Currently in __tlbiel() we generate two tlbiel instructions manually using hex constants. In the first case, for MMU_PAGE_4K, we generate "tlbiel RB,0", which is safe in all cases, because the L bit is zero. However in the default case we generate "tlbiel RB,1", therefore setting bit 21 to 1. This is not an actual bug on v2.06 processors, because the CPU ignores the value of the reserved field. However software is supposed to encode the reserved fields as zero to enable forward compatibility. On v3.00 processors setting bit 21 to 1 and no other bits of RS, means we are using r1 for the value of RS. Although it's not obvious, the code sets the IS field (bits 10-11) to 0 (by omission), and L=1, in the va value, which is passed as RB. We also pass R=0 in the instruction. The combination of IS=0, L=1 and R=0 means the value of RS is not used, so even on ISA v3.00 there is no actual bug. We should still fix it, as setting a reserved bit on v2.06 is naughty, and we are only avoiding a bug on v3.00 by accident rather than design. Use ASM_FTR_IFSET() to generate the single argument form on ISA v2.06 and later, and the two argument form on pre v2.06. Although there may be very old toolchains which don't understand tlbiel, we have other code in the tree which has been using tlbiel for over five years, and no one has reported any build failures, so just let the assembler generate the instructions. Signed-off-by: Balbir Singh [mpe: Rewrite change log, use IFSET instead of IFCLR] Signed-off-by: Michael Ellerman --- arch/powerpc/mm/hash_native_64.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 83ddc0e171b0..9d9b3eff123e 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -123,8 +123,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) va |= ssize << 8; sllp = get_sllp_encoding(apsize); va |= sllp << 5; - asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" - : : "r"(va) : "memory"); + asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1) + : : "r" (va), "i" (CPU_FTR_ARCH_206) + : "memory"); break; default: /* We need 14 to 14 + i bits of va */ @@ -141,8 +142,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) */ va |= (vpn & 0xfe); va |= 1; /* L */ - asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" - : : "r"(va) : "memory"); + asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1) + : : "r" (va), "i" (CPU_FTR_ARCH_206) + : "memory"); break; } -- cgit From 0e865a80c1358db9311c411c4763b9e2e324d0df Mon Sep 17 00:00:00 2001 From: Elimar Riesebieter Date: Sun, 18 Sep 2016 13:08:45 +0200 Subject: macintosh: Remove dependency on IDE_GD_ATA if ADB_PMU_LED_DISK is selected We can use the front led of powerbooks/ibooks to visualize disk activity without the deprecated IDE_GD_ATA. Signed-off-by: Elimar Riesebieter Signed-off-by: Michael Ellerman --- drivers/macintosh/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig index d28690f6e262..5d80810934df 100644 --- a/drivers/macintosh/Kconfig +++ b/drivers/macintosh/Kconfig @@ -102,7 +102,6 @@ config ADB_PMU_LED_DISK bool "Use front LED as DISK LED by default" depends on ADB_PMU_LED depends on LEDS_CLASS - depends on IDE_GD_ATA select LEDS_TRIGGERS select LEDS_TRIGGER_DISK help -- cgit From 7a53ef5ebe80bc15f70d060dc4548434aa5e6cf6 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 3 Nov 2016 18:12:16 +1100 Subject: powerpc/configs: Drop REISERFS from pseries & powernv No one uses reiserfs much these days, or is likely to in future. So drop it from pseries and powernv defconfigs to save time and space. It's still enabled in ppc64_defconfig so we get some build coverage. Signed-off-by: Michael Ellerman --- arch/powerpc/configs/powernv_defconfig | 4 ---- arch/powerpc/configs/pseries_defconfig | 4 ---- 2 files changed, 8 deletions(-) diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index d98b6eb3254f..d77af0eca967 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -241,10 +241,6 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y -CONFIG_REISERFS_FS=m -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 8a3bc016b732..c8cee26fd9a0 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -244,10 +244,6 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y -CONFIG_REISERFS_FS=m -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y -- cgit From 7c65856b7e0d12f2be2adae7a75a11659285f5b9 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Mon, 31 Oct 2016 13:19:38 +1100 Subject: selftests/powerpc: Revert Load Monitor Register Tests Load monitored won't be supported in POWER9, so PPC_FEATURE2_ARCH_3_00 (in HWCAP2) will no longer imply Load monitor support. These Load monitored tests are enabled by PPC_FEATURE2_ARCH_3_00 so they are now bogus and need to be removed. This reverts commit 16c19a2e9833 ("selftests/powerpc: Load Monitor Register Tests"). Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/pmu/ebb/.gitignore | 2 - tools/testing/selftests/powerpc/pmu/ebb/Makefile | 2 +- tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c | 143 --------------------- tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h | 39 ------ .../selftests/powerpc/pmu/ebb/ebb_lmr_regs.c | 37 ------ tools/testing/selftests/powerpc/reg.h | 5 - 6 files changed, 1 insertion(+), 227 deletions(-) delete mode 100644 tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c delete mode 100644 tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h delete mode 100644 tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c diff --git a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore index 44b7df14a936..42bddbed8b64 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore +++ b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore @@ -20,5 +20,3 @@ back_to_back_ebbs_test lost_exception_test no_handler_test cycles_with_mmcr2_test -ebb_lmr -ebb_lmr_regs \ No newline at end of file diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile index 6b0453e60d53..8d2279c4bb4b 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile +++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile @@ -14,7 +14,7 @@ TEST_PROGS := reg_access_test event_attributes_test cycles_test \ fork_cleanup_test ebb_on_child_test \ ebb_on_willing_child_test back_to_back_ebbs_test \ lost_exception_test no_handler_test \ - cycles_with_mmcr2_test ebb_lmr ebb_lmr_regs + cycles_with_mmcr2_test all: $(TEST_PROGS) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c deleted file mode 100644 index c47ebd55ba4d..000000000000 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright 2016, Jack Miller, IBM Corp. - * Licensed under GPLv2. - */ - -#include -#include - -#include "ebb.h" -#include "ebb_lmr.h" - -#define SIZE (32 * 1024 * 1024) /* 32M */ -#define LM_SIZE 0 /* Smallest encoding, 32M */ - -#define SECTIONS 64 /* 1 per bit in LMSER */ -#define SECTION_SIZE (SIZE / SECTIONS) -#define SECTION_LONGS (SECTION_SIZE / sizeof(long)) - -static unsigned long *test_mem; - -static int lmr_count = 0; - -void ebb_lmr_handler(void) -{ - lmr_count++; -} - -void ldmx_full_section(unsigned long *mem, int section) -{ - unsigned long *ptr; - int i; - - for (i = 0; i < SECTION_LONGS; i++) { - ptr = &mem[(SECTION_LONGS * section) + i]; - ldmx((unsigned long) &ptr); - ebb_lmr_reset(); - } -} - -unsigned long section_masks[] = { - 0x8000000000000000, - 0xFF00000000000000, - 0x0000000F70000000, - 0x8000000000000001, - 0xF0F0F0F0F0F0F0F0, - 0x0F0F0F0F0F0F0F0F, - 0x0 -}; - -int ebb_lmr_section_test(unsigned long *mem) -{ - unsigned long *mask = section_masks; - int i; - - for (; *mask; mask++) { - mtspr(SPRN_LMSER, *mask); - printf("Testing mask 0x%016lx\n", mfspr(SPRN_LMSER)); - - for (i = 0; i < 64; i++) { - lmr_count = 0; - ldmx_full_section(mem, i); - if (*mask & (1UL << (63 - i))) - FAIL_IF(lmr_count != SECTION_LONGS); - else - FAIL_IF(lmr_count); - } - } - - return 0; -} - -int ebb_lmr(void) -{ - int i; - - SKIP_IF(!lmr_is_supported()); - - setup_ebb_handler(ebb_lmr_handler); - - ebb_global_enable(); - - FAIL_IF(posix_memalign((void **)&test_mem, SIZE, SIZE) != 0); - - mtspr(SPRN_LMSER, 0); - - FAIL_IF(mfspr(SPRN_LMSER) != 0); - - mtspr(SPRN_LMRR, ((unsigned long)test_mem | LM_SIZE)); - - FAIL_IF(mfspr(SPRN_LMRR) != ((unsigned long)test_mem | LM_SIZE)); - - /* Read every single byte to ensure we get no false positives */ - for (i = 0; i < SECTIONS; i++) - ldmx_full_section(test_mem, i); - - FAIL_IF(lmr_count != 0); - - /* Turn on the first section */ - - mtspr(SPRN_LMSER, (1UL << 63)); - FAIL_IF(mfspr(SPRN_LMSER) != (1UL << 63)); - - /* Enable LM (BESCR) */ - - mtspr(SPRN_BESCR, mfspr(SPRN_BESCR) | BESCR_LME); - FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LME)); - - ldmx((unsigned long)&test_mem); - - FAIL_IF(lmr_count != 1); // exactly one exception - FAIL_IF(mfspr(SPRN_BESCR) & BESCR_LME); // LM now disabled - FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LMEO)); // occurred bit set - - printf("Simple LMR EBB OK\n"); - - /* This shouldn't cause an EBB since it's been disabled */ - ldmx((unsigned long)&test_mem); - FAIL_IF(lmr_count != 1); - - printf("LMR disable on EBB OK\n"); - - ebb_lmr_reset(); - - /* This should cause an EBB or reset is broken */ - ldmx((unsigned long)&test_mem); - FAIL_IF(lmr_count != 2); - - printf("LMR reset EBB OK\n"); - - ebb_lmr_reset(); - - return ebb_lmr_section_test(test_mem); -} - -int main(void) -{ - int ret = test_harness(ebb_lmr, "ebb_lmr"); - - if (test_mem) - free(test_mem); - - return ret; -} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h deleted file mode 100644 index ef50abd557cd..000000000000 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef _SELFTESTS_POWERPC_PMU_EBB_LMR_H -#define _SELFTESTS_POWERPC_PMU_EBB_LMR_H - -#include "reg.h" - -#ifndef PPC_FEATURE2_ARCH_3_00 -#define PPC_FEATURE2_ARCH_3_00 0x00800000 -#endif - -#define lmr_is_supported() have_hwcap2(PPC_FEATURE2_ARCH_3_00) - -static inline void ebb_lmr_reset(void) -{ - unsigned long bescr = mfspr(SPRN_BESCR); - bescr &= ~(BESCR_LMEO); - bescr |= BESCR_LME; - mtspr(SPRN_BESCR, bescr); -} - -#define LDMX(t, a, b)\ - (0x7c00026a | \ - (((t) & 0x1f) << 21) | \ - (((a) & 0x1f) << 16) | \ - (((b) & 0x1f) << 11)) - -static inline unsigned long ldmx(unsigned long address) -{ - unsigned long ret; - - asm volatile ("mr 9, %1\r\n" - ".long " __stringify(LDMX(9, 0, 9)) "\r\n" - "mr %0, 9\r\n":"=r"(ret) - :"r"(address) - :"r9"); - - return ret; -} - -#endif diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c deleted file mode 100644 index aff4241fd88a..000000000000 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2016, Jack Miller, IBM Corp. - * Licensed under GPLv2. - */ - -#include -#include -#include - -#include "ebb.h" -#include "ebb_lmr.h" - -#define CHECKS 10000 - -int ebb_lmr_regs(void) -{ - int i; - - SKIP_IF(!lmr_is_supported()); - - ebb_global_enable(); - - for (i = 0; i < CHECKS; i++) { - mtspr(SPRN_LMRR, i << 25); // skip size and rsvd bits - mtspr(SPRN_LMSER, i); - - FAIL_IF(mfspr(SPRN_LMRR) != (i << 25)); - FAIL_IF(mfspr(SPRN_LMSER) != i); - } - - return 0; -} - -int main(void) -{ - return test_harness(ebb_lmr_regs, "ebb_lmr_regs"); -} diff --git a/tools/testing/selftests/powerpc/reg.h b/tools/testing/selftests/powerpc/reg.h index fddf368ed82f..65bfdeeebdee 100644 --- a/tools/testing/selftests/powerpc/reg.h +++ b/tools/testing/selftests/powerpc/reg.h @@ -34,11 +34,6 @@ #define BESCR_PMEO 0x1 /* PMU Event-based exception Occurred */ #define BESCR_PME (0x1ul << 32) /* PMU Event-based exception Enable */ -#define BESCR_LME (0x1ul << 34) /* Load Monitor Enable */ -#define BESCR_LMEO (0x1ul << 2) /* Load Monitor Exception Occurred */ - -#define SPRN_LMRR 813 /* Load Monitor Region Register */ -#define SPRN_LMSER 814 /* Load Monitor Section Enable Register */ #define SPRN_PMC1 771 #define SPRN_PMC2 772 -- cgit From 29a969b764817c1dce819c2bc8c00a147529a5ef Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Mon, 31 Oct 2016 13:19:39 +1100 Subject: powerpc: Revert Load Monitor Register Support Load monitored is no longer supported on POWER9 so let's remove the code. This reverts commit bd3ea317fddf ("powerpc: Load Monitor Register Support"). Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/processor.h | 2 -- arch/powerpc/include/asm/reg.h | 5 ----- arch/powerpc/kernel/process.c | 18 ------------------ arch/powerpc/kernel/traps.c | 9 --------- 4 files changed, 34 deletions(-) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index c07c31b0e89e..5c0a665af1c3 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -312,8 +312,6 @@ struct thread_struct { unsigned long mmcr2; unsigned mmcr0; unsigned used_ebb; - unsigned long lmrr; - unsigned long lmser; #endif }; diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 9cd4e8cbc78c..c491cfebfc05 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -292,8 +292,6 @@ #define SPRN_HRMOR 0x139 /* Real mode offset register */ #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ -#define SPRN_LMRR 0x32D /* Load Monitor Region Register */ -#define SPRN_LMSER 0x32E /* Load Monitor Section Enable Register */ #define SPRN_IC 0x350 /* Virtual Instruction Count */ #define SPRN_VTB 0x351 /* Virtual Time Base */ #define SPRN_LDBAR 0x352 /* LD Base Address Register */ @@ -304,7 +302,6 @@ #define SPRN_PMCR 0x374 /* Power Management Control Register */ /* HFSCR and FSCR bit numbers are the same */ -#define FSCR_LM_LG 11 /* Enable Load Monitor Registers */ #define FSCR_TAR_LG 8 /* Enable Target Address Register */ #define FSCR_EBB_LG 7 /* Enable Event Based Branching */ #define FSCR_TM_LG 5 /* Enable Transactional Memory */ @@ -314,12 +311,10 @@ #define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */ #define FSCR_FP_LG 0 /* Enable Floating Point */ #define SPRN_FSCR 0x099 /* Facility Status & Control Register */ -#define FSCR_LM __MASK(FSCR_LM_LG) #define FSCR_TAR __MASK(FSCR_TAR_LG) #define FSCR_EBB __MASK(FSCR_EBB_LG) #define FSCR_DSCR __MASK(FSCR_DSCR_LG) #define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ -#define HFSCR_LM __MASK(FSCR_LM_LG) #define HFSCR_TAR __MASK(FSCR_TAR_LG) #define HFSCR_EBB __MASK(FSCR_EBB_LG) #define HFSCR_TM __MASK(FSCR_TM_LG) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index ce6dc61b15b2..5b62e8c36210 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1051,14 +1051,6 @@ static inline void save_sprs(struct thread_struct *t) */ t->tar = mfspr(SPRN_TAR); } - - if (cpu_has_feature(CPU_FTR_ARCH_300)) { - /* Conditionally save Load Monitor registers, if enabled */ - if (t->fscr & FSCR_LM) { - t->lmrr = mfspr(SPRN_LMRR); - t->lmser = mfspr(SPRN_LMSER); - } - } #endif } @@ -1094,16 +1086,6 @@ static inline void restore_sprs(struct thread_struct *old_thread, if (old_thread->tar != new_thread->tar) mtspr(SPRN_TAR, new_thread->tar); } - - if (cpu_has_feature(CPU_FTR_ARCH_300)) { - /* Conditionally restore Load Monitor registers, if enabled */ - if (new_thread->fscr & FSCR_LM) { - if (old_thread->lmrr != new_thread->lmrr) - mtspr(SPRN_LMRR, new_thread->lmrr); - if (old_thread->lmser != new_thread->lmser) - mtspr(SPRN_LMSER, new_thread->lmser); - } - } #endif } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 32c468b8b548..91d278c9ab28 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1430,7 +1430,6 @@ void facility_unavailable_exception(struct pt_regs *regs) [FSCR_TM_LG] = "TM", [FSCR_EBB_LG] = "EBB", [FSCR_TAR_LG] = "TAR", - [FSCR_LM_LG] = "LM", }; char *facility = "unknown"; u64 value; @@ -1488,14 +1487,6 @@ void facility_unavailable_exception(struct pt_regs *regs) emulate_single_step(regs); } return; - } else if ((status == FSCR_LM_LG) && cpu_has_feature(CPU_FTR_ARCH_300)) { - /* - * This process has touched LM, so turn it on forever - * for this process - */ - current->thread.fscr |= FSCR_LM; - mtspr(SPRN_FSCR, current->thread.fscr); - return; } if (status == FSCR_TM_LG) { -- cgit From 9e4f51bdaf880208869aa001ee94a49de4b28d27 Mon Sep 17 00:00:00 2001 From: Jack Miller Date: Wed, 10 Aug 2016 19:32:40 -0500 Subject: powerpc/powernv: Simplify searching for compatible device nodes This condenses the opal node searching into a single function that finds all compatible nodes, instead of just searching the ibm,opal children, for ipmi, flash, and prd similar to how opal-i2c nodes are found. Signed-off-by: Jack Miller Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/opal.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 6c9a65b52e63..893d8ea995aa 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -632,21 +632,11 @@ static void __init opal_dump_region_init(void) "rc = %d\n", rc); } -static void opal_pdev_init(struct device_node *opal_node, - const char *compatible) +static void opal_pdev_init(const char *compatible) { struct device_node *np; - for_each_child_of_node(opal_node, np) - if (of_device_is_compatible(np, compatible)) - of_platform_device_create(np, NULL, NULL); -} - -static void opal_i2c_create_devs(void) -{ - struct device_node *np; - - for_each_compatible_node(np, NULL, "ibm,opal-i2c") + for_each_compatible_node(np, NULL, compatible) of_platform_device_create(np, NULL, NULL); } @@ -718,7 +708,7 @@ static int __init opal_init(void) opal_hmi_handler_init(); /* Create i2c platform devices */ - opal_i2c_create_devs(); + opal_pdev_init("ibm,opal-i2c"); /* Setup a heatbeat thread if requested by OPAL */ opal_init_heartbeat(); @@ -753,12 +743,12 @@ static int __init opal_init(void) } /* Initialize platform devices: IPMI backend, PRD & flash interface */ - opal_pdev_init(opal_node, "ibm,opal-ipmi"); - opal_pdev_init(opal_node, "ibm,opal-flash"); - opal_pdev_init(opal_node, "ibm,opal-prd"); + opal_pdev_init("ibm,opal-ipmi"); + opal_pdev_init("ibm,opal-flash"); + opal_pdev_init("ibm,opal-prd"); /* Initialise platform device: oppanel interface */ - opal_pdev_init(opal_node, "ibm,opal-oppanel"); + opal_pdev_init("ibm,opal-oppanel"); /* Initialise OPAL kmsg dumper for flushing console on panic */ opal_kmsg_init(); -- cgit From fe0f3168169f7c34c29b0cf0c489f126a7f29643 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 1 Nov 2016 16:26:00 +0100 Subject: powerpc/ibmebus: Fix device reference leaks in sysfs interface Make sure to drop any reference taken by bus_find_device() in the sysfs callbacks that are used to create and destroy devices based on device-tree entries. Fixes: 6bccf755ff53 ("[POWERPC] ibmebus: dynamic addition/removal of adapters, some code cleanup") Signed-off-by: Johan Hovold Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/ibmebus.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 6ca9a2ffaac7..c7d3ff7e101c 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -262,6 +262,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus, const char *buf, size_t count) { struct device_node *dn = NULL; + struct device *dev; char *path; ssize_t rc = 0; @@ -269,8 +270,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus, if (!path) return -ENOMEM; - if (bus_find_device(&ibmebus_bus_type, NULL, path, - ibmebus_match_path)) { + dev = bus_find_device(&ibmebus_bus_type, NULL, path, + ibmebus_match_path); + if (dev) { + put_device(dev); printk(KERN_WARNING "%s: %s has already been probed\n", __func__, path); rc = -EEXIST; @@ -307,6 +310,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus, if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path, ibmebus_match_path))) { of_device_unregister(to_platform_device(dev)); + put_device(dev); kfree(path); return count; -- cgit From 815a7141c4d1b11610dccb7fcbb38633759824f2 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 1 Nov 2016 16:26:01 +0100 Subject: powerpc/ibmebus: Fix further device reference leaks Make sure to drop any reference taken by bus_find_device() when creating devices during init and driver registration. Fixes: 55347cc9962f ("[POWERPC] ibmebus: Add device creation and bus probing based on of_device") Signed-off-by: Johan Hovold Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/ibmebus.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index c7d3ff7e101c..35f5244782d9 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn) static int ibmebus_create_devices(const struct of_device_id *matches) { struct device_node *root, *child; + struct device *dev; int ret = 0; root = of_find_node_by_path("/"); @@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches) if (!of_match_node(matches, child)) continue; - if (bus_find_device(&ibmebus_bus_type, NULL, child, - ibmebus_match_node)) + dev = bus_find_device(&ibmebus_bus_type, NULL, child, + ibmebus_match_node); + if (dev) { + put_device(dev); continue; + } ret = ibmebus_create_device(child); if (ret) { -- cgit From e8cfb7e7c3b2be8a4c2241b5da2ae62bc1f4808f Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 1 Nov 2016 16:26:02 +0100 Subject: powerpc/vio: Clarify vio_find_node() reference counting Add comment clarifying that vio_find_node() takes a reference to the embedded struct device which needs to be dropped after use. Signed-off-by: Johan Hovold Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/vio.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index b3813ddb2fb4..2c8fb3ec989e 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -1648,6 +1648,9 @@ static struct vio_dev *vio_find_name(const char *name) /** * vio_find_node - find an already-registered vio_dev * @vnode: device_node of the virtual device we're looking for + * + * Takes a reference to the embedded struct device which needs to be dropped + * after use. */ struct vio_dev *vio_find_node(struct device_node *vnode) { -- cgit From 99e5cde5eae78bef95bfe7c16ccda87fb070149b Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 1 Nov 2016 16:26:03 +0100 Subject: powerpc/pci/rpadlpar: Fix device reference leaks Make sure to drop any device reference taken by vio_find_node() when adding and removing virtual I/O slots. Fixes: 5eeb8c63a38f ("[PATCH] PCI Hotplug: rpaphp: Move VIO registration") Signed-off-by: Johan Hovold Signed-off-by: Michael Ellerman --- drivers/pci/hotplug/rpadlpar_core.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index dc67f39779ec..c614ff7c3bc3 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -257,8 +257,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn) static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn) { - if (vio_find_node(dn)) + struct vio_dev *vio_dev; + + vio_dev = vio_find_node(dn); + if (vio_dev) { + put_device(&vio_dev->dev); return -EINVAL; + } if (!vio_register_device_node(dn)) { printk(KERN_ERR @@ -334,6 +339,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn) return -EINVAL; vio_unregister_device(vio_dev); + + put_device(&vio_dev->dev); + return 0; } -- cgit From 0e27d27e0d6d92342797e0d37738c276c1ff090e Mon Sep 17 00:00:00 2001 From: Peter Senna Tschudin Date: Wed, 9 Nov 2016 09:55:04 +0100 Subject: selftests/powerpc: Return false instead of -1 in require_paranoia_below() Returning a negative value for a boolean function seem to have the undesired effect of returning true. require_paranoia_below() is a boolean function, but the variable used to store the return value is an integer, receiving -1 or 0. This patch converts rc to bool, replaces -1 by false, and 0 by true. mpe: This wasn't exhibiting in practice because the common case, where we do the comparison of the desired level vs the current value, was being compiled into a computation based on the result of the comparison, ie. it wasn't using the default -1 value at all. However that was just luck and the code is still wrong. Signed-off-by: Peter Senna Tschudin Signed-off-by: Andrew Shadura Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/pmu/lib.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/powerpc/pmu/lib.c b/tools/testing/selftests/powerpc/pmu/lib.c index 8b992fa5b478..5bf5dd40822b 100644 --- a/tools/testing/selftests/powerpc/pmu/lib.c +++ b/tools/testing/selftests/powerpc/pmu/lib.c @@ -193,9 +193,9 @@ bool require_paranoia_below(int level) long current; char *end, buf[16]; FILE *f; - int rc; + bool rc; - rc = -1; + rc = false; f = fopen(PARANOID_PATH, "r"); if (!f) { @@ -218,7 +218,7 @@ bool require_paranoia_below(int level) if (current >= level) goto out_close; - rc = 0; + rc = true; out_close: fclose(f); out: -- cgit From 67f6d66559930eddea06c6bfdaf0e53b568766ed Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 3 Feb 2016 16:50:18 +0100 Subject: powerpc: convert amigaone_defconfig to use libata PATA drivers IDE subsystem has been deprecated since 2009 and the majority (if not all) of Linux distributions have switched to use libata for ATA support exclusively. However there are still some users (mostly old or/and embedded non-x86 systems) that have not converted from using IDE subsystem to libata PATA drivers. This doesn't seem to be good thing in the long-term for Linux as while there is less and less PATA systems left in use: * testing efforts are divided between two subsystems * having duplicate drivers for same hardware confuses users This patch converts amigaone_defconfig to use libata PATA drivers. Signed-off-by: Bartlomiej Zolnierkiewicz --- arch/powerpc/configs/amigaone_defconfig | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig index 8b83ce8a01e7..8d3e3c41258d 100644 --- a/arch/powerpc/configs/amigaone_defconfig +++ b/arch/powerpc/configs/amigaone_defconfig @@ -45,12 +45,6 @@ CONFIG_PARPORT_PC_FIFO=y CONFIG_BLK_DEV_FD=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -# CONFIG_IDEPCI_PCIBUS_ORDER is not set -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_SIIMAGE=y -CONFIG_BLK_DEV_VIA82CXXX=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y @@ -61,6 +55,10 @@ CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 # CONFIG_SCSI_SYM53C8XX_MMIO is not set +CONFIG_ATA=y +CONFIG_PATA_SIL680=y +CONFIG_PATA_VIA=y +CONFIG_ATA_GENERIC=y CONFIG_NETDEVICES=y CONFIG_VORTEX=y CONFIG_8139CP=y -- cgit From 8020c1220dd4ff324698668f067746dd08e050c9 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 3 Feb 2016 16:50:19 +0100 Subject: powerpc: convert cell_defconfig to use libata PATA drivers IDE subsystem has been deprecated since 2009 and the majority (if not all) of Linux distributions have switched to use libata for ATA support exclusively. However there are still some users (mostly old or/and embedded non-x86 systems) that have not converted from using IDE subsystem to libata PATA drivers. This doesn't seem to be good thing in the long-term for Linux as while there is less and less PATA systems left in use: * testing efforts are divided between two subsystems * having duplicate drivers for same hardware confuses users This patch converts cell_defconfig to use libata PATA drivers. Signed-off-by: Bartlomiej Zolnierkiewicz --- arch/powerpc/configs/cell_defconfig | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig index 7b6f30dece34..2d7fcbe047ac 100644 --- a/arch/powerpc/configs/cell_defconfig +++ b/arch/powerpc/configs/cell_defconfig @@ -108,16 +108,15 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_IDE=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_AEC62XX=y -CONFIG_BLK_DEV_SIIMAGE=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=y CONFIG_ATA=y CONFIG_SATA_PROMISE=y +CONFIG_PATA_ARTOP=y CONFIG_PATA_PDC2027X=m +CONFIG_PATA_SIL680=y +CONFIG_ATA_GENERIC=y CONFIG_MD=y CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m -- cgit From f22cc4d7bb268ffc585a5f8593138b3f61a92f6c Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 3 Feb 2016 16:50:20 +0100 Subject: powerpc: convert chrp32_defconfig to use libata PATA drivers IDE subsystem has been deprecated since 2009 and the majority (if not all) of Linux distributions have switched to use libata for ATA support exclusively. However there are still some users (mostly old or/and embedded non-x86 systems) that have not converted from using IDE subsystem to libata PATA drivers. This doesn't seem to be good thing in the long-term for Linux as while there is less and less PATA systems left in use: * testing efforts are divided between two subsystems * having duplicate drivers for same hardware confuses users This patch converts chrp32_defconfig to use libata PATA drivers. Signed-off-by: Bartlomiej Zolnierkiewicz --- arch/powerpc/configs/chrp32_defconfig | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig index ac9a50da2dc6..1f6f90cd8aff 100644 --- a/arch/powerpc/configs/chrp32_defconfig +++ b/arch/powerpc/configs/chrp32_defconfig @@ -42,12 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_BLK_DEV_FD=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_SL82C105=y -CONFIG_BLK_DEV_VIA82CXXX=y -CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y @@ -56,6 +50,10 @@ CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 +CONFIG_ATA=y +CONFIG_PATA_VIA=y +CONFIG_PATA_WINBOND=y +CONFIG_ATA_GENERIC=y CONFIG_NETDEVICES=y CONFIG_PCNET32=y CONFIG_NET_TULIP=y -- cgit From b10fd396376c9b1b1f5b0a3980b487ea2965ab73 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 3 Feb 2016 16:50:21 +0100 Subject: powerpc: convert g5_defconfig to use libata PATA drivers IDE subsystem has been deprecated since 2009 and the majority (if not all) of Linux distributions have switched to use libata for ATA support exclusively. However there are still some users (mostly old or/and embedded non-x86 systems) that have not converted from using IDE subsystem to libata PATA drivers. This doesn't seem to be good thing in the long-term for Linux as while there is less and less PATA systems left in use: * testing efforts are divided between two subsystems * having duplicate drivers for same hardware confuses users This patch converts g5_defconfig to use libata PATA drivers. Signed-off-by: Bartlomiej Zolnierkiewicz --- arch/powerpc/configs/g5_defconfig | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index 3b2511c090d8..e18f2e06553f 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -60,10 +60,6 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_CDROM_PKTCDVD=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_IDE_PMAC=y -CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y @@ -73,6 +69,7 @@ CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SPI_ATTRS=y CONFIG_ATA=y CONFIG_SATA_SVW=y +CONFIG_PATA_MACIO=y CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=y -- cgit From c1f9d2938bd75c0245f640ad30d81e0564f0c0ce Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 3 Feb 2016 16:50:22 +0100 Subject: powerpc: convert maple_defconfig to use libata PATA drivers IDE subsystem has been deprecated since 2009 and the majority (if not all) of Linux distributions have switched to use libata for ATA support exclusively. However there are still some users (mostly old or/and embedded non-x86 systems) that have not converted from using IDE subsystem to libata PATA drivers. This doesn't seem to be good thing in the long-term for Linux as while there is less and less PATA systems left in use: * testing efforts are divided between two subsystems * having duplicate drivers for same hardware confuses users This patch converts maple_defconfig to use libata PATA drivers. Signed-off-by: Bartlomiej Zolnierkiewicz --- arch/powerpc/configs/maple_defconfig | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index 27abfab31219..c4018179e219 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig @@ -39,16 +39,15 @@ CONFIG_IP_PNP_DHCP=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_IDE_TASK_IOCTL=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_AMD74XX=y # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_IPR=y CONFIG_ATA=y +CONFIG_PATA_AMD=y +CONFIG_ATA_GENERIC=y CONFIG_NETDEVICES=y CONFIG_AMD8111_ETH=y CONFIG_TIGON3=y -- cgit From 23bf36cd120f97e85ce089c841197f28fc5e1c12 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 3 Feb 2016 16:50:23 +0100 Subject: powerpc: disable IDE subsystem in pasemi_defconfig This patch disables deprecated IDE subsystem in pasemi_defconfig (no IDE host drivers are selected in this config so there is no valid reason to enable IDE subsystem itself). Cc: Olof Johansson Signed-off-by: Bartlomiej Zolnierkiewicz --- arch/powerpc/configs/pasemi_defconfig | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 76f4edd441d3..5553c5ce4274 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -58,9 +58,6 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_EEPROM_LEGACY=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_IDE_TASK_IOCTL=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_CHR_DEV_OSST=y -- cgit From b0d3a074174138b7714533968be723bee87e239e Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 3 Feb 2016 16:50:24 +0100 Subject: powerpc: convert pmac32_defconfig to use libata PATA drivers IDE subsystem has been deprecated since 2009 and the majority (if not all) of Linux distributions have switched to use libata for ATA support exclusively. However there are still some users (mostly old or/and embedded non-x86 systems) that have not converted from using IDE subsystem to libata PATA drivers. This doesn't seem to be good thing in the long-term for Linux as while there is less and less PATA systems left in use: * testing efforts are divided between two subsystems * having duplicate drivers for same hardware confuses users This patch converts pmac32_defconfig to use libata PATA drivers. Signed-off-by: Bartlomiej Zolnierkiewicz --- arch/powerpc/configs/pmac32_defconfig | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index e5a674d4a716..fc1e7a7388b8 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig @@ -117,15 +117,6 @@ CONFIG_CONNECTOR=y CONFIG_MAC_FLOPPY=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECS=m -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_PDC202XX_NEW=y -CONFIG_BLK_DEV_SL82C105=y -CONFIG_BLK_DEV_IDE_PMAC=y -CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y -CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y @@ -140,6 +131,12 @@ CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 CONFIG_SCSI_MESH=y CONFIG_SCSI_MAC53C94=y +CONFIG_ATA=y +CONFIG_PATA_MACIO=y +CONFIG_PATA_PDC2027X=y +CONFIG_PATA_WINBOND=y +CONFIG_PATA_PCMCIA=m +CONFIG_ATA_GENERIC=y CONFIG_MD=y CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m -- cgit From 7ddf3db3d775d09da87f38ab39f26184dfae328a Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 3 Feb 2016 16:50:25 +0100 Subject: powerpc: convert ppc64_defconfig to use libata PATA drivers IDE subsystem has been deprecated since 2009 and the majority (if not all) of Linux distributions have switched to use libata for ATA support exclusively. However there are still some users (mostly old or/and embedded non-x86 systems) that have not converted from using IDE subsystem to libata PATA drivers. This doesn't seem to be good thing in the long-term for Linux as while there is less and less PATA systems left in use: * testing efforts are divided between two subsystems * having duplicate drivers for same hardware confuses users This patch converts ppc64_defconfig to use libata PATA drivers. Signed-off-by: Bartlomiej Zolnierkiewicz --- arch/powerpc/configs/ppc64_defconfig | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 58a98d40086f..df5f1616375f 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -85,12 +85,6 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_VIRTIO_BLK=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_AMD74XX=y -CONFIG_BLK_DEV_IDE_PMAC=y -CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y @@ -120,6 +114,9 @@ CONFIG_SATA_AHCI=y CONFIG_SATA_SIL24=y CONFIG_SATA_MV=y CONFIG_SATA_SVW=y +CONFIG_PATA_AMD=y +CONFIG_PATA_MACIO=y +CONFIG_ATA_GENERIC=y CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=y -- cgit From 4a0b4bfe0101901126dc4dba0ba90498dbb95a22 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 3 Feb 2016 16:50:26 +0100 Subject: powerpc: convert ppc64e_defconfig to use libata PATA drivers IDE subsystem has been deprecated since 2009 and the majority (if not all) of Linux distributions have switched to use libata for ATA support exclusively. However there are still some users (mostly old or/and embedded non-x86 systems) that have not converted from using IDE subsystem to libata PATA drivers. This doesn't seem to be good thing in the long-term for Linux as while there is less and less PATA systems left in use: * testing efforts are divided between two subsystems * having duplicate drivers for same hardware confuses users This patch converts ppc64e_defconfig to use libata PATA drivers. Signed-off-by: Bartlomiej Zolnierkiewicz --- arch/powerpc/configs/ppc64e_defconfig | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index fd2edd650c20..11a3473f9e2e 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig @@ -59,10 +59,6 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_AMD74XX=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y @@ -79,6 +75,8 @@ CONFIG_SCSI_DEBUG=m CONFIG_ATA=y CONFIG_SATA_SIL24=y CONFIG_SATA_SVW=y +CONFIG_PATA_AMD=y +CONFIG_ATA_GENERIC=y CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=y -- cgit From 7f10fe36369068341aff05c067a786807dfd4ff9 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 3 Feb 2016 16:50:27 +0100 Subject: powerpc: convert ppc6xx_defconfig to use libata PATA drivers IDE subsystem has been deprecated since 2009 and the majority (if not all) of Linux distributions have switched to use libata for ATA support exclusively. However there are still some users (mostly old or/and embedded non-x86 systems) that have not converted from using IDE subsystem to libata PATA drivers. This doesn't seem to be good thing in the long-term for Linux as while there is less and less PATA systems left in use: * testing efforts are divided between two subsystems * having duplicate drivers for same hardware confuses users This patch converts ppc6xx_defconfig to use libata PATA drivers. Signed-off-by: Bartlomiej Zolnierkiewicz --- arch/powerpc/configs/ppc6xx_defconfig | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 8fbf49801233..3ce91a3df27f 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -378,13 +378,6 @@ CONFIG_EEPROM_AT24=m CONFIG_EEPROM_LEGACY=m CONFIG_EEPROM_MAX6875=m CONFIG_EEPROM_93CX6=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=m -CONFIG_IDE_TASK_IOCTL=y -# CONFIG_IDEPCI_PCIBUS_ORDER is not set -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_IDE_PMAC=y -CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y CONFIG_RAID_ATTRS=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m @@ -411,13 +404,14 @@ CONFIG_ATA=y CONFIG_SATA_FSL=m CONFIG_PDC_ADMA=m CONFIG_ATA_PIIX=m +CONFIG_PATA_MACIO=y CONFIG_PATA_MPC52xx=m CONFIG_PATA_OPTIDMA=m CONFIG_PATA_SCH=m CONFIG_PATA_VIA=m CONFIG_PATA_PLATFORM=m CONFIG_PATA_OF_PLATFORM=m -CONFIG_ATA_GENERIC=m +CONFIG_ATA_GENERIC=y CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=m -- cgit From 80269fe4203432168dae6e49770d64bf7c7b8d5f Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 3 Feb 2016 16:50:29 +0100 Subject: powerpc: convert pseries_defconfig to use libata PATA drivers IDE subsystem has been deprecated since 2009 and the majority (if not all) of Linux distributions have switched to use libata for ATA support exclusively. However there are still some users (mostly old or/and embedded non-x86 systems) that have not converted from using IDE subsystem to libata PATA drivers. This doesn't seem to be good thing in the long-term for Linux as while there is less and less PATA systems left in use: * testing efforts are divided between two subsystems * having duplicate drivers for same hardware confuses users This patch converts pseries_defconfig to use libata PATA drivers. Signed-off-by: Bartlomiej Zolnierkiewicz --- arch/powerpc/configs/pseries_defconfig | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index c8cee26fd9a0..536e81110936 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -92,10 +92,6 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_VIRTIO_BLK=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_AMD74XX=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y @@ -122,7 +118,8 @@ CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_ALUA=m CONFIG_ATA=y CONFIG_SATA_AHCI=y -# CONFIG_ATA_SFF is not set +CONFIG_PATA_AMD=y +CONFIG_ATA_GENERIC=y CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=y -- cgit From 1aed61f9c9664cc7331079a66aba99b5c5a38cea Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 3 Feb 2016 16:50:30 +0100 Subject: powerpc: convert storcenter_defconfig to use libata PATA drivers IDE subsystem has been deprecated since 2009 and the majority (if not all) of Linux distributions have switched to use libata for ATA support exclusively. However there are still some users (mostly old or/and embedded non-x86 systems) that have not converted from using IDE subsystem to libata PATA drivers. This doesn't seem to be good thing in the long-term for Linux as while there is less and less PATA systems left in use: * testing efforts are divided between two subsystems * having duplicate drivers for same hardware confuses users This patch converts storcenter_defconfig to use libata PATA drivers. Signed-off-by: Bartlomiej Zolnierkiewicz --- arch/powerpc/configs/storcenter_defconfig | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig index e9122b15e5fd..74bca2eccd0f 100644 --- a/arch/powerpc/configs/storcenter_defconfig +++ b/arch/powerpc/configs/storcenter_defconfig @@ -36,12 +36,11 @@ CONFIG_NFTL_RW=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP=y -CONFIG_IDE=y -CONFIG_BLK_DEV_VIA82CXXX=y -CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y CONFIG_SCSI_SPI_ATTRS=y +CONFIG_ATA=y +CONFIG_PATA_VIA=y CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=y -- cgit From ac8d3818aab7e08332a79e4252118297cb28e995 Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Sat, 5 Nov 2016 15:24:22 +1100 Subject: powerpc/mm: Fix typo in radix encodings print Rename "sift" to "shift". Signed-off-by: Balbir Singh Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable-radix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index ed7bddc456b7..525c36ebbc8d 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -248,7 +248,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node, /* top 3 bit is AP encoding */ shift = be32_to_cpu(prop[0]) & ~(0xe << 28); ap = be32_to_cpu(prop[0]) >> 29; - pr_info("Page size sift = %d AP=0x%x\n", shift, ap); + pr_info("Page size shift = %d AP=0x%x\n", shift, ap); idx = get_idx_from_shift(shift); if (idx < 0) -- cgit From 8f272a5dd6826f14e47110eccd37b6782bf74901 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 14 Nov 2016 16:28:10 +1100 Subject: powerpc/pseries: Move CMO code from plapr_wrappers.h to platforms/pseries Currently there's some CMO (Cooperative Memory Overcommit) code, in plpar_wrappers.h. Some of it is #ifdef CONFIG_PSERIES and some of it isn't. The end result being if a file includes plpar_wrappers.h it won't build with CONFIG_PSERIES=n. Fix it by moving the CMO code into platforms/pseries. The two hcall wrappers can just be moved into their only caller, cmm.c, and the accessors can go in pseries.h. Note we need the accessors because cmm.c can be built as a module, so there needs to be a split between the built-in code vs the module, and that's achieved by using those accessors. Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/hvcall.h | 21 ------------------- arch/powerpc/include/asm/plpar_wrappers.h | 32 ----------------------------- arch/powerpc/platforms/pseries/cmm.c | 34 +++++++++++++++++++++++++++++++ arch/powerpc/platforms/pseries/lparcfg.c | 1 + arch/powerpc/platforms/pseries/pseries.h | 19 +++++++++++++++++ 5 files changed, 54 insertions(+), 53 deletions(-) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 489748ed21b4..8d5f8352afd7 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -413,27 +413,6 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc) } } -#ifdef CONFIG_PPC_PSERIES -extern int CMO_PrPSP; -extern int CMO_SecPSP; -extern unsigned long CMO_PageSize; - -static inline int cmo_get_primary_psp(void) -{ - return CMO_PrPSP; -} - -static inline int cmo_get_secondary_psp(void) -{ - return CMO_SecPSP; -} - -static inline unsigned long cmo_get_page_size(void) -{ - return CMO_PageSize; -} -#endif /* CONFIG_PPC_PSERIES */ - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HVCALL_H */ diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 1b394247afc2..034a588b122c 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -93,38 +93,6 @@ static inline long register_dtl(unsigned long cpu, unsigned long vpa) return vpa_call(H_VPA_REG_DTL, cpu, vpa); } -static inline long plpar_page_set_loaned(unsigned long vpa) -{ - unsigned long cmo_page_sz = cmo_get_page_size(); - long rc = 0; - int i; - - for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz) - rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0); - - for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz) - plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, - vpa + i - cmo_page_sz, 0); - - return rc; -} - -static inline long plpar_page_set_active(unsigned long vpa) -{ - unsigned long cmo_page_sz = cmo_get_page_size(); - long rc = 0; - int i; - - for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz) - rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0); - - for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz) - plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, - vpa + i - cmo_page_sz, 0); - - return rc; -} - extern void vpa_init(int cpu); static inline long plpar_pte_enter(unsigned long flags, diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 66e7227469b8..972328829387 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -41,6 +41,8 @@ #include #include +#include "pseries.h" + #define CMM_DRIVER_VERSION "1.0.0" #define CMM_DEFAULT_DELAY 1 #define CMM_HOTPLUG_DELAY 5 @@ -109,6 +111,38 @@ static int hotplug_occurred; /* protected by the hotplug mutex */ static struct task_struct *cmm_thread_ptr; +static long plpar_page_set_loaned(unsigned long vpa) +{ + unsigned long cmo_page_sz = cmo_get_page_size(); + long rc = 0; + int i; + + for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz) + rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0); + + for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz) + plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, + vpa + i - cmo_page_sz, 0); + + return rc; +} + +static long plpar_page_set_active(unsigned long vpa) +{ + unsigned long cmo_page_sz = cmo_get_page_size(); + long rc = 0; + int i; + + for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz) + rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0); + + for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz) + plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, + vpa + i - cmo_page_sz, 0); + + return rc; +} + /** * cmm_alloc_pages - Allocate pages and mark them as loaned * @nr: number of pages to allocate diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index afa05a2cb702..e6397976060e 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -37,6 +37,7 @@ #include #include +#include "pseries.h" /* * This isn't a module but we expose that to userspace diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index b1be7b713fe6..1361a9db534b 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -79,4 +79,23 @@ extern struct pci_controller_ops pseries_pci_controller_ops; unsigned long pseries_memory_block_size(void); +extern int CMO_PrPSP; +extern int CMO_SecPSP; +extern unsigned long CMO_PageSize; + +static inline int cmo_get_primary_psp(void) +{ + return CMO_PrPSP; +} + +static inline int cmo_get_secondary_psp(void) +{ + return CMO_SecPSP; +} + +static inline unsigned long cmo_get_page_size(void) +{ + return CMO_PageSize; +} + #endif /* _PSERIES_PSERIES_H */ -- cgit From 8eb07b187000d48152c4ef97f075bdfd82575e8f Mon Sep 17 00:00:00 2001 From: Rashmica Gupta Date: Fri, 27 May 2016 15:48:59 +1000 Subject: powerpc/mm: Dump linux pagetables Useful to be able to dump the kernels page tables to check permissions and memory types - derived from arm64's implementation. Add a debugfs file to check the page tables. To use this the PPC_PTDUMP config option must be selected. Signed-off-by: Rashmica Gupta Signed-off-by: Michael Ellerman --- arch/powerpc/Kconfig.debug | 12 + arch/powerpc/mm/Makefile | 1 + arch/powerpc/mm/dump_linuxpagetables.c | 440 +++++++++++++++++++++++++++++++++ 3 files changed, 453 insertions(+) create mode 100644 arch/powerpc/mm/dump_linuxpagetables.c diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 63292f64b25a..20cf770611ec 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -354,4 +354,16 @@ config FAIL_IOMMU If you are unsure, say N. +config PPC_PTDUMP + bool "Export kernel pagetable layout to userspace via debugfs" + depends on DEBUG_KERNEL + select DEBUG_FS + help + This option exports the state of the kernel pagetables to a + debugfs file. This is only useful for kernel developers who are + working in architecture specific areas of the kernel - probably + not a good idea to enable this feature in a production kernel. + + If you are unsure, say N. + endmenu diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 1a4e570f7894..5b3e4d560315 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -42,3 +42,4 @@ obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o obj-$(CONFIG_SPAPR_TCE_IOMMU) += mmu_context_iommu.o +obj-$(CONFIG_PPC_PTDUMP) += dump_linuxpagetables.o diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c new file mode 100644 index 000000000000..d242bc79ae64 --- /dev/null +++ b/arch/powerpc/mm/dump_linuxpagetables.c @@ -0,0 +1,440 @@ +/* + * Copyright 2016, Rashmica Gupta, IBM Corp. + * + * This traverses the kernel pagetables and dumps the + * information about the used sections of memory to + * /sys/kernel/debug/kernel_pagetables. + * + * Derived from the arm64 implementation: + * Copyright (c) 2014, The Linux Foundation, Laura Abbott. + * (C) Copyright 2008 Intel Corporation, Arjan van de Ven. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * To visualise what is happening, + * + * - PTRS_PER_P** = how many entries there are in the corresponding P** + * - P**_SHIFT = how many bits of the address we use to index into the + * corresponding P** + * - P**_SIZE is how much memory we can access through the table - not the + * size of the table itself. + * P**={PGD, PUD, PMD, PTE} + * + * + * Each entry of the PGD points to a PUD. Each entry of a PUD points to a + * PMD. Each entry of a PMD points to a PTE. And every PTE entry points to + * a page. + * + * In the case where there are only 3 levels, the PUD is folded into the + * PGD: every PUD has only one entry which points to the PMD. + * + * The page dumper groups page table entries of the same type into a single + * description. It uses pg_state to track the range information while + * iterating over the PTE entries. When the continuity is broken it then + * dumps out a description of the range - ie PTEs that are virtually contiguous + * with the same PTE flags are chunked together. This is to make it clear how + * different areas of the kernel virtual memory are used. + * + */ +struct pg_state { + struct seq_file *seq; + const struct addr_marker *marker; + unsigned long start_address; + unsigned int level; + u64 current_flags; +}; + +struct addr_marker { + unsigned long start_address; + const char *name; +}; + +static struct addr_marker address_markers[] = { + { 0, "Start of kernel VM" }, + { 0, "vmalloc() Area" }, + { 0, "vmalloc() End" }, + { 0, "isa I/O start" }, + { 0, "isa I/O end" }, + { 0, "phb I/O start" }, + { 0, "phb I/O end" }, + { 0, "I/O remap start" }, + { 0, "I/O remap end" }, + { 0, "vmemmap start" }, + { -1, NULL }, +}; + +struct flag_info { + u64 mask; + u64 val; + const char *set; + const char *clear; + bool is_val; + int shift; +}; + +static const struct flag_info flag_array[] = { + { +#ifdef CONFIG_PPC_STD_MMU_64 + .mask = _PAGE_PRIVILEGED, + .val = 0, +#else + .mask = _PAGE_USER, + .val = _PAGE_USER, +#endif + .set = "user", + .clear = " ", + }, { + .mask = _PAGE_RW, + .val = _PAGE_RW, + .set = "rw", + .clear = "ro", + }, { + .mask = _PAGE_EXEC, + .val = _PAGE_EXEC, + .set = " X ", + .clear = " ", + }, { + .mask = _PAGE_PTE, + .val = _PAGE_PTE, + .set = "pte", + .clear = " ", + }, { + .mask = _PAGE_PRESENT, + .val = _PAGE_PRESENT, + .set = "present", + .clear = " ", + }, { +#ifdef CONFIG_PPC_STD_MMU_64 + .mask = H_PAGE_HASHPTE, + .val = H_PAGE_HASHPTE, +#else + .mask = _PAGE_HASHPTE, + .val = _PAGE_HASHPTE, +#endif + .set = "hpte", + .clear = " ", + }, { +#ifndef CONFIG_PPC_STD_MMU_64 + .mask = _PAGE_GUARDED, + .val = _PAGE_GUARDED, + .set = "guarded", + .clear = " ", + }, { +#endif + .mask = _PAGE_DIRTY, + .val = _PAGE_DIRTY, + .set = "dirty", + .clear = " ", + }, { + .mask = _PAGE_ACCESSED, + .val = _PAGE_ACCESSED, + .set = "accessed", + .clear = " ", + }, { +#ifndef CONFIG_PPC_STD_MMU_64 + .mask = _PAGE_WRITETHRU, + .val = _PAGE_WRITETHRU, + .set = "write through", + .clear = " ", + }, { +#endif + .mask = _PAGE_NO_CACHE, + .val = _PAGE_NO_CACHE, + .set = "no cache", + .clear = " ", + }, { + .mask = H_PAGE_BUSY, + .val = H_PAGE_BUSY, + .set = "busy", + }, { +#ifdef CONFIG_PPC_64K_PAGES + .mask = H_PAGE_COMBO, + .val = H_PAGE_COMBO, + .set = "combo", + }, { + .mask = H_PAGE_4K_PFN, + .val = H_PAGE_4K_PFN, + .set = "4K_pfn", + }, { +#endif + .mask = H_PAGE_F_GIX, + .val = H_PAGE_F_GIX, + .set = "f_gix", + .is_val = true, + .shift = H_PAGE_F_GIX_SHIFT, + }, { + .mask = H_PAGE_F_SECOND, + .val = H_PAGE_F_SECOND, + .set = "f_second", + }, { + .mask = _PAGE_SPECIAL, + .val = _PAGE_SPECIAL, + .set = "special", + } +}; + +struct pgtable_level { + const struct flag_info *flag; + size_t num; + u64 mask; +}; + +static struct pgtable_level pg_level[] = { + { + }, { /* pgd */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, { /* pud */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, { /* pmd */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, { /* pte */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, +}; + +static void dump_flag_info(struct pg_state *st, const struct flag_info + *flag, u64 pte, int num) +{ + unsigned int i; + + for (i = 0; i < num; i++, flag++) { + const char *s = NULL; + u64 val; + + /* flag not defined so don't check it */ + if (flag->mask == 0) + continue; + /* Some 'flags' are actually values */ + if (flag->is_val) { + val = pte & flag->val; + if (flag->shift) + val = val >> flag->shift; + seq_printf(st->seq, " %s:%llx", flag->set, val); + } else { + if ((pte & flag->mask) == flag->val) + s = flag->set; + else + s = flag->clear; + if (s) + seq_printf(st->seq, " %s", s); + } + st->current_flags &= ~flag->mask; + } + if (st->current_flags != 0) + seq_printf(st->seq, " unknown flags:%llx", st->current_flags); +} + +static void dump_addr(struct pg_state *st, unsigned long addr) +{ + static const char units[] = "KMGTPE"; + const char *unit = units; + unsigned long delta; + + seq_printf(st->seq, "0x%016lx-0x%016lx ", st->start_address, addr-1); + delta = (addr - st->start_address) >> 10; + /* Work out what appropriate unit to use */ + while (!(delta & 1023) && unit[1]) { + delta >>= 10; + unit++; + } + seq_printf(st->seq, "%9lu%c", delta, *unit); + +} + +static void note_page(struct pg_state *st, unsigned long addr, + unsigned int level, u64 val) +{ + u64 flag = val & pg_level[level].mask; + /* At first no level is set */ + if (!st->level) { + st->level = level; + st->current_flags = flag; + st->start_address = addr; + seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + /* + * Dump the section of virtual memory when: + * - the PTE flags from one entry to the next differs. + * - we change levels in the tree. + * - the address is in a different section of memory and is thus + * used for a different purpose, regardless of the flags. + */ + } else if (flag != st->current_flags || level != st->level || + addr >= st->marker[1].start_address) { + + /* Check the PTE flags */ + if (st->current_flags) { + dump_addr(st, addr); + + /* Dump all the flags */ + if (pg_level[st->level].flag) + dump_flag_info(st, pg_level[st->level].flag, + st->current_flags, + pg_level[st->level].num); + + seq_puts(st->seq, "\n"); + } + + /* + * Address indicates we have passed the end of the + * current section of virtual memory + */ + while (addr >= st->marker[1].start_address) { + st->marker++; + seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + } + st->start_address = addr; + st->current_flags = flag; + st->level = level; + } +} + +static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) +{ + pte_t *pte = pte_offset_kernel(pmd, 0); + unsigned long addr; + unsigned int i; + + for (i = 0; i < PTRS_PER_PTE; i++, pte++) { + addr = start + i * PAGE_SIZE; + note_page(st, addr, 4, pte_val(*pte)); + + } +} + +static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) +{ + pmd_t *pmd = pmd_offset(pud, 0); + unsigned long addr; + unsigned int i; + + for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { + addr = start + i * PMD_SIZE; + if (!pmd_none(*pmd)) + /* pmd exists */ + walk_pte(st, pmd, addr); + else + note_page(st, addr, 3, pmd_val(*pmd)); + } +} + +static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) +{ + pud_t *pud = pud_offset(pgd, 0); + unsigned long addr; + unsigned int i; + + for (i = 0; i < PTRS_PER_PUD; i++, pud++) { + addr = start + i * PUD_SIZE; + if (!pud_none(*pud)) + /* pud exists */ + walk_pmd(st, pud, addr); + else + note_page(st, addr, 2, pud_val(*pud)); + } +} + +static void walk_pagetables(struct pg_state *st) +{ + pgd_t *pgd = pgd_offset_k(0UL); + unsigned int i; + unsigned long addr; + + /* + * Traverse the linux pagetable structure and dump pages that are in + * the hash pagetable. + */ + for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { + addr = KERN_VIRT_START + i * PGDIR_SIZE; + if (!pgd_none(*pgd)) + /* pgd exists */ + walk_pud(st, pgd, addr); + else + note_page(st, addr, 1, pgd_val(*pgd)); + } +} + +static void populate_markers(void) +{ + address_markers[0].start_address = PAGE_OFFSET; + address_markers[1].start_address = VMALLOC_START; + address_markers[2].start_address = VMALLOC_END; + address_markers[3].start_address = ISA_IO_BASE; + address_markers[4].start_address = ISA_IO_END; + address_markers[5].start_address = PHB_IO_BASE; + address_markers[6].start_address = PHB_IO_END; + address_markers[7].start_address = IOREMAP_BASE; + address_markers[8].start_address = IOREMAP_END; +#ifdef CONFIG_PPC_STD_MMU_64 + address_markers[9].start_address = H_VMEMMAP_BASE; +#else + address_markers[9].start_address = VMEMMAP_BASE; +#endif +} + +static int ptdump_show(struct seq_file *m, void *v) +{ + struct pg_state st = { + .seq = m, + .start_address = KERN_VIRT_START, + .marker = address_markers, + }; + /* Traverse kernel page tables */ + walk_pagetables(&st); + note_page(&st, 0, 0, 0); + return 0; +} + + +static int ptdump_open(struct inode *inode, struct file *file) +{ + return single_open(file, ptdump_show, NULL); +} + +static const struct file_operations ptdump_fops = { + .open = ptdump_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void build_pgtable_complete_mask(void) +{ + unsigned int i, j; + + for (i = 0; i < ARRAY_SIZE(pg_level); i++) + if (pg_level[i].flag) + for (j = 0; j < pg_level[i].num; j++) + pg_level[i].mask |= pg_level[i].flag[j].mask; +} + +static int ptdump_init(void) +{ + struct dentry *debugfs_file; + + populate_markers(); + build_pgtable_complete_mask(); + debugfs_file = debugfs_create_file("kernel_pagetables", 0400, NULL, + NULL, &ptdump_fops); + return debugfs_file ? 0 : -ENOMEM; +} +device_initcall(ptdump_init); -- cgit From 1515ab932156257afd8a5864506dab80f63ff38b Mon Sep 17 00:00:00 2001 From: Rashmica Gupta Date: Fri, 27 May 2016 15:49:00 +1000 Subject: powerpc/mm: Dump hash table Useful to be able to dump the kernel hash page table to check which pages are hashed along with their sizes and other details. Add a debugfs file to check the hash page table. If radix is enabled (and so there is no hash page table) then this file doesn't exist. To use this the PPC_PTDUMP config option must be selected. Signed-off-by: Rashmica Gupta [mpe: Fix build with SPARSEMEM_VMEMMAP=n & PSERIES=n] Signed-off-by: Michael Ellerman --- arch/powerpc/mm/Makefile | 3 +- arch/powerpc/mm/dump_hashpagetable.c | 551 +++++++++++++++++++++++++++++++++++ 2 files changed, 553 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/mm/dump_hashpagetable.c diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 5b3e4d560315..f4ffe1f68ce9 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -42,4 +42,5 @@ obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o obj-$(CONFIG_SPAPR_TCE_IOMMU) += mmu_context_iommu.o -obj-$(CONFIG_PPC_PTDUMP) += dump_linuxpagetables.o +obj-$(CONFIG_PPC_PTDUMP) += dump_linuxpagetables.o \ + dump_hashpagetable.o diff --git a/arch/powerpc/mm/dump_hashpagetable.c b/arch/powerpc/mm/dump_hashpagetable.c new file mode 100644 index 000000000000..d979709a0239 --- /dev/null +++ b/arch/powerpc/mm/dump_hashpagetable.c @@ -0,0 +1,551 @@ +/* + * Copyright 2016, Rashmica Gupta, IBM Corp. + * + * This traverses the kernel virtual memory and dumps the pages that are in + * the hash pagetable, along with their flags to + * /sys/kernel/debug/kernel_hash_pagetable. + * + * If radix is enabled then there is no hash page table and so no debugfs file + * is generated. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct pg_state { + struct seq_file *seq; + const struct addr_marker *marker; + unsigned long start_address; + unsigned int level; + u64 current_flags; +}; + +struct addr_marker { + unsigned long start_address; + const char *name; +}; + +static struct addr_marker address_markers[] = { + { 0, "Start of kernel VM" }, + { 0, "vmalloc() Area" }, + { 0, "vmalloc() End" }, + { 0, "isa I/O start" }, + { 0, "isa I/O end" }, + { 0, "phb I/O start" }, + { 0, "phb I/O end" }, + { 0, "I/O remap start" }, + { 0, "I/O remap end" }, + { 0, "vmemmap start" }, + { -1, NULL }, +}; + +struct flag_info { + u64 mask; + u64 val; + const char *set; + const char *clear; + bool is_val; + int shift; +}; + +static const struct flag_info v_flag_array[] = { + { + .mask = SLB_VSID_B, + .val = SLB_VSID_B_256M, + .set = "ssize: 256M", + .clear = "ssize: 1T ", + }, { + .mask = HPTE_V_SECONDARY, + .val = HPTE_V_SECONDARY, + .set = "secondary", + .clear = "primary ", + }, { + .mask = HPTE_V_VALID, + .val = HPTE_V_VALID, + .set = "valid ", + .clear = "invalid", + }, { + .mask = HPTE_V_BOLTED, + .val = HPTE_V_BOLTED, + .set = "bolted", + .clear = "", + } +}; + +static const struct flag_info r_flag_array[] = { + { + .mask = HPTE_R_PP0 | HPTE_R_PP, + .val = PP_RWXX, + .set = "prot:RW--", + }, { + .mask = HPTE_R_PP0 | HPTE_R_PP, + .val = PP_RWRX, + .set = "prot:RWR-", + }, { + .mask = HPTE_R_PP0 | HPTE_R_PP, + .val = PP_RWRW, + .set = "prot:RWRW", + }, { + .mask = HPTE_R_PP0 | HPTE_R_PP, + .val = PP_RXRX, + .set = "prot:R-R-", + }, { + .mask = HPTE_R_PP0 | HPTE_R_PP, + .val = PP_RXXX, + .set = "prot:R---", + }, { + .mask = HPTE_R_KEY_HI | HPTE_R_KEY_LO, + .val = HPTE_R_KEY_HI | HPTE_R_KEY_LO, + .set = "key", + .clear = "", + .is_val = true, + }, { + .mask = HPTE_R_R, + .val = HPTE_R_R, + .set = "ref", + .clear = " ", + }, { + .mask = HPTE_R_C, + .val = HPTE_R_C, + .set = "changed", + .clear = " ", + }, { + .mask = HPTE_R_N, + .val = HPTE_R_N, + .set = "no execute", + }, { + .mask = HPTE_R_WIMG, + .val = HPTE_R_W, + .set = "writethru", + }, { + .mask = HPTE_R_WIMG, + .val = HPTE_R_I, + .set = "no cache", + }, { + .mask = HPTE_R_WIMG, + .val = HPTE_R_G, + .set = "guarded", + } +}; + +static int calculate_pagesize(struct pg_state *st, int ps, char s[]) +{ + static const char units[] = "BKMGTPE"; + const char *unit = units; + + while (ps > 9 && unit[1]) { + ps -= 10; + unit++; + } + seq_printf(st->seq, " %s_ps: %i%c\t", s, 1<mask == 0) + continue; + /* Some 'flags' are actually values */ + if (flag->is_val) { + val = pte & flag->val; + if (flag->shift) + val = val >> flag->shift; + seq_printf(st->seq, " %s:%llx", flag->set, val); + } else { + if ((pte & flag->mask) == flag->val) + s = flag->set; + else + s = flag->clear; + if (s) + seq_printf(st->seq, " %s", s); + } + } +} + +static void dump_hpte_info(struct pg_state *st, unsigned long ea, u64 v, u64 r, + unsigned long rpn, int bps, int aps, unsigned long lp) +{ + int aps_index; + + while (ea >= st->marker[1].start_address) { + st->marker++; + seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + } + seq_printf(st->seq, "0x%lx:\t", ea); + seq_printf(st->seq, "AVPN:%llx\t", HPTE_V_AVPN_VAL(v)); + dump_flag_info(st, v_flag_array, v, ARRAY_SIZE(v_flag_array)); + seq_printf(st->seq, " rpn: %lx\t", rpn); + dump_flag_info(st, r_flag_array, r, ARRAY_SIZE(r_flag_array)); + + calculate_pagesize(st, bps, "base"); + aps_index = calculate_pagesize(st, aps, "actual"); + if (aps_index != 2) + seq_printf(st->seq, "LP enc: %lx", lp); + seq_puts(st->seq, "\n"); +} + + +static int native_find(unsigned long ea, int psize, bool primary, u64 *v, u64 + *r) +{ + struct hash_pte *hptep; + unsigned long hash, vsid, vpn, hpte_group, want_v, hpte_v; + int i, ssize = mmu_kernel_ssize; + unsigned long shift = mmu_psize_defs[psize].shift; + + /* calculate hash */ + vsid = get_kernel_vsid(ea, ssize); + vpn = hpt_vpn(ea, vsid, ssize); + hash = hpt_hash(vpn, shift, ssize); + want_v = hpte_encode_avpn(vpn, psize, ssize); + + /* to check in the secondary hash table, we invert the hash */ + if (!primary) + hash = ~hash; + hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; + for (i = 0; i < HPTES_PER_GROUP; i++) { + hptep = htab_address + hpte_group; + hpte_v = be64_to_cpu(hptep->v); + + if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { + /* HPTE matches */ + *v = be64_to_cpu(hptep->v); + *r = be64_to_cpu(hptep->r); + return 0; + } + ++hpte_group; + } + return -1; +} + +#ifdef CONFIG_PPC_PSERIES +static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *r) +{ + struct hash_pte ptes[4]; + unsigned long vsid, vpn, hash, hpte_group, want_v; + int i, j, ssize = mmu_kernel_ssize; + long lpar_rc = 0; + unsigned long shift = mmu_psize_defs[psize].shift; + + /* calculate hash */ + vsid = get_kernel_vsid(ea, ssize); + vpn = hpt_vpn(ea, vsid, ssize); + hash = hpt_hash(vpn, shift, ssize); + want_v = hpte_encode_avpn(vpn, psize, ssize); + + /* to check in the secondary hash table, we invert the hash */ + if (!primary) + hash = ~hash; + hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + /* see if we can find an entry in the hpte with this hash */ + for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { + lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); + + if (lpar_rc != H_SUCCESS) + continue; + for (j = 0; j < 4; j++) { + if (HPTE_V_COMPARE(ptes[j].v, want_v) && + (ptes[j].v & HPTE_V_VALID)) { + /* HPTE matches */ + *v = ptes[j].v; + *r = ptes[j].r; + return 0; + } + } + } + return -1; +} +#endif + +static void decode_r(int bps, unsigned long r, unsigned long *rpn, int *aps, + unsigned long *lp_bits) +{ + struct mmu_psize_def entry; + unsigned long arpn, mask, lp; + int penc = -2, idx = 0, shift; + + /*. + * The LP field has 8 bits. Depending on the actual page size, some of + * these bits are concatenated with the APRN to get the RPN. The rest + * of the bits in the LP field is the LP value and is an encoding for + * the base page size and the actual page size. + * + * - find the mmu entry for our base page size + * - go through all page encodings and use the associated mask to + * find an encoding that matches our encoding in the LP field. + */ + arpn = (r & HPTE_R_RPN) >> HPTE_R_RPN_SHIFT; + lp = arpn & 0xff; + + entry = mmu_psize_defs[bps]; + while (idx < MMU_PAGE_COUNT) { + penc = entry.penc[idx]; + if ((penc != -1) && (mmu_psize_defs[idx].shift)) { + shift = mmu_psize_defs[idx].shift - HPTE_R_RPN_SHIFT; + mask = (0x1 << (shift)) - 1; + if ((lp & mask) == penc) { + *aps = mmu_psize_to_shift(idx); + *lp_bits = lp & mask; + *rpn = arpn >> shift; + return; + } + } + idx++; + } +} + +static int base_hpte_find(unsigned long ea, int psize, bool primary, u64 *v, + u64 *r) +{ +#ifdef CONFIG_PPC_PSERIES + if (firmware_has_feature(FW_FEATURE_LPAR)) + return pseries_find(ea, psize, primary, v, r); +#endif + return native_find(ea, psize, primary, v, r); +} + +static unsigned long hpte_find(struct pg_state *st, unsigned long ea, int psize) +{ + unsigned long slot; + u64 v = 0, r = 0; + unsigned long rpn, lp_bits; + int base_psize = 0, actual_psize = 0; + + if (ea <= PAGE_OFFSET) + return -1; + + /* Look in primary table */ + slot = base_hpte_find(ea, psize, true, &v, &r); + + /* Look in secondary table */ + if (slot == -1) + slot = base_hpte_find(ea, psize, true, &v, &r); + + /* No entry found */ + if (slot == -1) + return -1; + + /* + * We found an entry in the hash page table: + * - check that this has the same base page + * - find the actual page size + * - find the RPN + */ + base_psize = mmu_psize_to_shift(psize); + + if ((v & HPTE_V_LARGE) == HPTE_V_LARGE) { + decode_r(psize, r, &rpn, &actual_psize, &lp_bits); + } else { + /* 4K actual page size */ + actual_psize = 12; + rpn = (r & HPTE_R_RPN) >> HPTE_R_RPN_SHIFT; + /* In this case there are no LP bits */ + lp_bits = -1; + } + /* + * We didn't find a matching encoding, so the PTE we found isn't for + * this address. + */ + if (actual_psize == -1) + return -1; + + dump_hpte_info(st, ea, v, r, rpn, base_psize, actual_psize, lp_bits); + return 0; +} + +static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) +{ + pte_t *pte = pte_offset_kernel(pmd, 0); + unsigned long addr, pteval, psize; + int i, status; + + for (i = 0; i < PTRS_PER_PTE; i++, pte++) { + addr = start + i * PAGE_SIZE; + pteval = pte_val(*pte); + + if (addr < VMALLOC_END) + psize = mmu_vmalloc_psize; + else + psize = mmu_io_psize; +#ifdef CONFIG_PPC_64K_PAGES + /* check for secret 4K mappings */ + if (((pteval & H_PAGE_COMBO) == H_PAGE_COMBO) || + ((pteval & H_PAGE_4K_PFN) == H_PAGE_4K_PFN)) + psize = mmu_io_psize; +#endif + /* check for hashpte */ + status = hpte_find(st, addr, psize); + + if (((pteval & H_PAGE_HASHPTE) != H_PAGE_HASHPTE) + && (status != -1)) { + /* found a hpte that is not in the linux page tables */ + seq_printf(st->seq, "page probably bolted before linux" + " pagetables were set: addr:%lx, pteval:%lx\n", + addr, pteval); + } + } +} + +static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) +{ + pmd_t *pmd = pmd_offset(pud, 0); + unsigned long addr; + unsigned int i; + + for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { + addr = start + i * PMD_SIZE; + if (!pmd_none(*pmd)) + /* pmd exists */ + walk_pte(st, pmd, addr); + } +} + +static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) +{ + pud_t *pud = pud_offset(pgd, 0); + unsigned long addr; + unsigned int i; + + for (i = 0; i < PTRS_PER_PUD; i++, pud++) { + addr = start + i * PUD_SIZE; + if (!pud_none(*pud)) + /* pud exists */ + walk_pmd(st, pud, addr); + } +} + +static void walk_pagetables(struct pg_state *st) +{ + pgd_t *pgd = pgd_offset_k(0UL); + unsigned int i; + unsigned long addr; + + /* + * Traverse the linux pagetable structure and dump pages that are in + * the hash pagetable. + */ + for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { + addr = KERN_VIRT_START + i * PGDIR_SIZE; + if (!pgd_none(*pgd)) + /* pgd exists */ + walk_pud(st, pgd, addr); + } +} + + +static void walk_linearmapping(struct pg_state *st) +{ + unsigned long addr; + + /* + * Traverse the linear mapping section of virtual memory and dump pages + * that are in the hash pagetable. + */ + unsigned long psize = 1 << mmu_psize_defs[mmu_linear_psize].shift; + + for (addr = PAGE_OFFSET; addr < PAGE_OFFSET + + memblock_phys_mem_size(); addr += psize) + hpte_find(st, addr, mmu_linear_psize); +} + +static void walk_vmemmap(struct pg_state *st) +{ +#ifdef CONFIG_SPARSEMEM_VMEMMAP + struct vmemmap_backing *ptr = vmemmap_list; + + /* + * Traverse the vmemmaped memory and dump pages that are in the hash + * pagetable. + */ + while (ptr->list) { + hpte_find(st, ptr->virt_addr, mmu_vmemmap_psize); + ptr = ptr->list; + } + seq_puts(st->seq, "---[ vmemmap end ]---\n"); +#endif +} + +static void populate_markers(void) +{ + address_markers[0].start_address = PAGE_OFFSET; + address_markers[1].start_address = VMALLOC_START; + address_markers[2].start_address = VMALLOC_END; + address_markers[3].start_address = ISA_IO_BASE; + address_markers[4].start_address = ISA_IO_END; + address_markers[5].start_address = PHB_IO_BASE; + address_markers[6].start_address = PHB_IO_END; + address_markers[7].start_address = IOREMAP_BASE; + address_markers[8].start_address = IOREMAP_END; +#ifdef CONFIG_PPC_STD_MMU_64 + address_markers[9].start_address = H_VMEMMAP_BASE; +#else + address_markers[9].start_address = VMEMMAP_BASE; +#endif +} + +static int ptdump_show(struct seq_file *m, void *v) +{ + struct pg_state st = { + .seq = m, + .start_address = PAGE_OFFSET, + .marker = address_markers, + }; + /* + * Traverse the 0xc, 0xd and 0xf areas of the kernel virtual memory and + * dump pages that are in the hash pagetable. + */ + walk_linearmapping(&st); + walk_pagetables(&st); + walk_vmemmap(&st); + return 0; +} + +static int ptdump_open(struct inode *inode, struct file *file) +{ + return single_open(file, ptdump_show, NULL); +} + +static const struct file_operations ptdump_fops = { + .open = ptdump_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int ptdump_init(void) +{ + struct dentry *debugfs_file; + + if (!radix_enabled()) { + populate_markers(); + debugfs_file = debugfs_create_file("kernel_hash_pagetable", + 0400, NULL, NULL, &ptdump_fops); + return debugfs_file ? 0 : -ENOMEM; + } + return 0; +} +device_initcall(ptdump_init); -- cgit From efe71a67b5aa89e0348ac862ca92a57d98d3fd8d Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 30 Sep 2016 10:32:50 +0800 Subject: selftests/powerpc: Add more SPR numbers, TM & VMX instructions to 'reg.h'/'instructions.h' This patch adds SPR number for TAR, PPR, DSCR special purpose registers. It also adds TM, VSX, VMX related instructions which will then be used by patches later in the series. Now that the new DSCR register definitions (SPRN_DSCR_PRIV and SPRN_DSCR) are defined outside this directory, use them instead. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/dscr/dscr.h | 10 ++++----- tools/testing/selftests/powerpc/reg.h | 35 ++++++++++++++++++++++++++--- 2 files changed, 36 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/powerpc/dscr/dscr.h b/tools/testing/selftests/powerpc/dscr/dscr.h index a36af1b2c2bb..18ea223bd398 100644 --- a/tools/testing/selftests/powerpc/dscr/dscr.h +++ b/tools/testing/selftests/powerpc/dscr/dscr.h @@ -28,8 +28,6 @@ #include "utils.h" -#define SPRN_DSCR 0x11 /* Privilege state SPR */ -#define SPRN_DSCR_USR 0x03 /* Problem state SPR */ #define THREADS 100 /* Max threads */ #define COUNT 100 /* Max iterations */ #define DSCR_MAX 16 /* Max DSCR value */ @@ -48,14 +46,14 @@ inline unsigned long get_dscr(void) { unsigned long ret; - asm volatile("mfspr %0,%1" : "=r" (ret): "i" (SPRN_DSCR)); + asm volatile("mfspr %0,%1" : "=r" (ret) : "i" (SPRN_DSCR_PRIV)); return ret; } inline void set_dscr(unsigned long val) { - asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR)); + asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR_PRIV)); } /* Problem state DSCR access */ @@ -63,14 +61,14 @@ inline unsigned long get_dscr_usr(void) { unsigned long ret; - asm volatile("mfspr %0,%1" : "=r" (ret): "i" (SPRN_DSCR_USR)); + asm volatile("mfspr %0,%1" : "=r" (ret) : "i" (SPRN_DSCR)); return ret; } inline void set_dscr_usr(unsigned long val) { - asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR_USR)); + asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR)); } /* Default DSCR access */ diff --git a/tools/testing/selftests/powerpc/reg.h b/tools/testing/selftests/powerpc/reg.h index 65bfdeeebdee..bb843b2d9bac 100644 --- a/tools/testing/selftests/powerpc/reg.h +++ b/tools/testing/selftests/powerpc/reg.h @@ -46,10 +46,39 @@ #define SPRN_SDAR 781 #define SPRN_SIER 768 -#define SPRN_TEXASR 0x82 +#define SPRN_TEXASR 0x82 /* Transaction Exception and Status Register */ #define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */ #define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */ -#define TEXASR_FS 0x08000000 -#define SPRN_TAR 0x32f +#define SPRN_TAR 0x32f /* Target Address Register */ + +#define SPRN_DSCR_PRIV 0x11 /* Privilege State DSCR */ +#define SPRN_DSCR 0x03 /* Data Stream Control Register */ +#define SPRN_PPR 896 /* Program Priority Register */ + +/* TEXASR register bits */ +#define TEXASR_FC 0xFE00000000000000 +#define TEXASR_FP 0x0100000000000000 +#define TEXASR_DA 0x0080000000000000 +#define TEXASR_NO 0x0040000000000000 +#define TEXASR_FO 0x0020000000000000 +#define TEXASR_SIC 0x0010000000000000 +#define TEXASR_NTC 0x0008000000000000 +#define TEXASR_TC 0x0004000000000000 +#define TEXASR_TIC 0x0002000000000000 +#define TEXASR_IC 0x0001000000000000 +#define TEXASR_IFC 0x0000800000000000 +#define TEXASR_ABT 0x0000000100000000 +#define TEXASR_SPD 0x0000000080000000 +#define TEXASR_HV 0x0000000020000000 +#define TEXASR_PR 0x0000000010000000 +#define TEXASR_FS 0x0000000008000000 +#define TEXASR_TE 0x0000000004000000 +#define TEXASR_ROT 0x0000000002000000 + +/* Vector Instructions */ +#define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \ + ((rb) << 11) | (((xs) >> 5))) +#define STXVD2X(xs, ra, rb) .long (0x7c000798 | VSX_XX1((xs), (ra), (rb))) +#define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb))) #endif /* _SELFTESTS_POWERPC_REG_H */ -- cgit From 15ec3997aa3bffc461f7b47ca9365d4b0323c671 Mon Sep 17 00:00:00 2001 From: Simon Guo Date: Fri, 30 Sep 2016 10:32:51 +0800 Subject: selftests/powerpc: Move shared headers into new include dir There are some functions, especially register related, which can be shared across multiple selftests/powerpc test directories. This patch creates a new include directory to store those shared files, so that the file layout becomes more neat. Signed-off-by: Simon Guo [mpe: Reworked to move the headers only] Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/Makefile | 2 +- tools/testing/selftests/powerpc/basic_asm.h | 70 ---------------- .../selftests/powerpc/benchmarks/context_switch.c | 2 +- .../testing/selftests/powerpc/copyloops/validate.c | 2 +- tools/testing/selftests/powerpc/fpu_asm.h | 80 ------------------ tools/testing/selftests/powerpc/gpr_asm.h | 96 ---------------------- .../testing/selftests/powerpc/include/basic_asm.h | 73 ++++++++++++++++ tools/testing/selftests/powerpc/include/fpu_asm.h | 80 ++++++++++++++++++ tools/testing/selftests/powerpc/include/gpr_asm.h | 96 ++++++++++++++++++++++ .../selftests/powerpc/include/instructions.h | 68 +++++++++++++++ tools/testing/selftests/powerpc/include/reg.h | 84 +++++++++++++++++++ tools/testing/selftests/powerpc/include/subunit.h | 52 ++++++++++++ tools/testing/selftests/powerpc/include/utils.h | 77 +++++++++++++++++ tools/testing/selftests/powerpc/include/vmx_asm.h | 96 ++++++++++++++++++++++ tools/testing/selftests/powerpc/include/vsx_asm.h | 71 ++++++++++++++++ tools/testing/selftests/powerpc/instructions.h | 68 --------------- tools/testing/selftests/powerpc/math/fpu_asm.S | 4 +- tools/testing/selftests/powerpc/math/vmx_asm.S | 4 +- tools/testing/selftests/powerpc/math/vsx_asm.S | 4 +- tools/testing/selftests/powerpc/reg.h | 84 ------------------- tools/testing/selftests/powerpc/signal/signal.S | 2 +- .../testing/selftests/powerpc/stringloops/memcmp.c | 2 +- tools/testing/selftests/powerpc/subunit.h | 52 ------------ tools/testing/selftests/powerpc/tm/tm-signal.S | 10 +-- tools/testing/selftests/powerpc/tm/tm.h | 2 +- tools/testing/selftests/powerpc/utils.h | 77 ----------------- tools/testing/selftests/powerpc/vmx_asm.h | 96 ---------------------- tools/testing/selftests/powerpc/vsx_asm.h | 71 ---------------- 28 files changed, 714 insertions(+), 711 deletions(-) delete mode 100644 tools/testing/selftests/powerpc/basic_asm.h delete mode 100644 tools/testing/selftests/powerpc/fpu_asm.h delete mode 100644 tools/testing/selftests/powerpc/gpr_asm.h create mode 100644 tools/testing/selftests/powerpc/include/basic_asm.h create mode 100644 tools/testing/selftests/powerpc/include/fpu_asm.h create mode 100644 tools/testing/selftests/powerpc/include/gpr_asm.h create mode 100644 tools/testing/selftests/powerpc/include/instructions.h create mode 100644 tools/testing/selftests/powerpc/include/reg.h create mode 100644 tools/testing/selftests/powerpc/include/subunit.h create mode 100644 tools/testing/selftests/powerpc/include/utils.h create mode 100644 tools/testing/selftests/powerpc/include/vmx_asm.h create mode 100644 tools/testing/selftests/powerpc/include/vsx_asm.h delete mode 100644 tools/testing/selftests/powerpc/instructions.h delete mode 100644 tools/testing/selftests/powerpc/reg.h delete mode 100644 tools/testing/selftests/powerpc/subunit.h delete mode 100644 tools/testing/selftests/powerpc/utils.h delete mode 100644 tools/testing/selftests/powerpc/vmx_asm.h delete mode 100644 tools/testing/selftests/powerpc/vsx_asm.h diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile index db54a33f850f..561afa1921b9 100644 --- a/tools/testing/selftests/powerpc/Makefile +++ b/tools/testing/selftests/powerpc/Makefile @@ -8,7 +8,7 @@ ifeq ($(ARCH),powerpc) GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown") -CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS) +CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR)/include $(CFLAGS) export CFLAGS diff --git a/tools/testing/selftests/powerpc/basic_asm.h b/tools/testing/selftests/powerpc/basic_asm.h deleted file mode 100644 index 3349a0704d1a..000000000000 --- a/tools/testing/selftests/powerpc/basic_asm.h +++ /dev/null @@ -1,70 +0,0 @@ -#ifndef _SELFTESTS_POWERPC_BASIC_ASM_H -#define _SELFTESTS_POWERPC_BASIC_ASM_H - -#include -#include - -#define LOAD_REG_IMMEDIATE(reg,expr) \ - lis reg,(expr)@highest; \ - ori reg,reg,(expr)@higher; \ - rldicr reg,reg,32,31; \ - oris reg,reg,(expr)@high; \ - ori reg,reg,(expr)@l; - -/* - * Note: These macros assume that variables being stored on the stack are - * doublewords, while this is usually the case it may not always be the - * case for each use case. - */ -#if defined(_CALL_ELF) && _CALL_ELF == 2 -#define STACK_FRAME_MIN_SIZE 32 -#define STACK_FRAME_TOC_POS 24 -#define __STACK_FRAME_PARAM(_param) (32 + ((_param)*8)) -#define __STACK_FRAME_LOCAL(_num_params,_var_num) ((STACK_FRAME_PARAM(_num_params)) + ((_var_num)*8)) -#else -#define STACK_FRAME_MIN_SIZE 112 -#define STACK_FRAME_TOC_POS 40 -#define __STACK_FRAME_PARAM(i) (48 + ((i)*8)) - -/* - * Caveat: if a function passed more than 8 doublewords, the caller will have - * made more space... which would render the 112 incorrect. - */ -#define __STACK_FRAME_LOCAL(_num_params,_var_num) (112 + ((_var_num)*8)) -#endif - -/* Parameter x saved to the stack */ -#define STACK_FRAME_PARAM(var) __STACK_FRAME_PARAM(var) - -/* Local variable x saved to the stack after x parameters */ -#define STACK_FRAME_LOCAL(num_params,var) __STACK_FRAME_LOCAL(num_params,var) -#define STACK_FRAME_LR_POS 16 -#define STACK_FRAME_CR_POS 8 - -/* - * It is very important to note here that _extra is the extra amount of - * stack space needed. This space can be accessed using STACK_FRAME_PARAM() - * or STACK_FRAME_LOCAL() macros. - * - * r1 and r2 are not defined in ppc-asm.h (instead they are defined as sp - * and toc). Kernel programmers tend to prefer rX even for r1 and r2, hence - * %1 and %r2. r0 is defined in ppc-asm.h and therefore %r0 gets - * preprocessed incorrectly, hence r0. - */ -#define PUSH_BASIC_STACK(_extra) \ - mflr r0; \ - std r0,STACK_FRAME_LR_POS(%r1); \ - stdu %r1,-(_extra + STACK_FRAME_MIN_SIZE)(%r1); \ - mfcr r0; \ - stw r0,STACK_FRAME_CR_POS(%r1); \ - std %r2,STACK_FRAME_TOC_POS(%r1); - -#define POP_BASIC_STACK(_extra) \ - ld %r2,STACK_FRAME_TOC_POS(%r1); \ - lwz r0,STACK_FRAME_CR_POS(%r1); \ - mtcr r0; \ - addi %r1,%r1,(_extra + STACK_FRAME_MIN_SIZE); \ - ld r0,STACK_FRAME_LR_POS(%r1); \ - mtlr r0; - -#endif /* _SELFTESTS_POWERPC_BASIC_ASM_H */ diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c index a36883ad48a4..778f5fbfd784 100644 --- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c +++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c @@ -28,7 +28,7 @@ #ifdef __powerpc__ #include #endif -#include "../utils.h" +#include "utils.h" static unsigned int timeout = 30; diff --git a/tools/testing/selftests/powerpc/copyloops/validate.c b/tools/testing/selftests/powerpc/copyloops/validate.c index 1750ff57ee58..7fb436f82d16 100644 --- a/tools/testing/selftests/powerpc/copyloops/validate.c +++ b/tools/testing/selftests/powerpc/copyloops/validate.c @@ -3,7 +3,7 @@ #include #include -#include "../utils.h" +#include "utils.h" #define MAX_LEN 8192 #define MAX_OFFSET 16 diff --git a/tools/testing/selftests/powerpc/fpu_asm.h b/tools/testing/selftests/powerpc/fpu_asm.h deleted file mode 100644 index 6a387d255e27..000000000000 --- a/tools/testing/selftests/powerpc/fpu_asm.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2016, Cyril Bur, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _SELFTESTS_POWERPC_FPU_ASM_H -#define _SELFTESTS_POWERPC_FPU_ASM_H -#include "basic_asm.h" - -#define PUSH_FPU(stack_size) \ - stfd f31,(stack_size + STACK_FRAME_MIN_SIZE)(%r1); \ - stfd f30,(stack_size + STACK_FRAME_MIN_SIZE - 8)(%r1); \ - stfd f29,(stack_size + STACK_FRAME_MIN_SIZE - 16)(%r1); \ - stfd f28,(stack_size + STACK_FRAME_MIN_SIZE - 24)(%r1); \ - stfd f27,(stack_size + STACK_FRAME_MIN_SIZE - 32)(%r1); \ - stfd f26,(stack_size + STACK_FRAME_MIN_SIZE - 40)(%r1); \ - stfd f25,(stack_size + STACK_FRAME_MIN_SIZE - 48)(%r1); \ - stfd f24,(stack_size + STACK_FRAME_MIN_SIZE - 56)(%r1); \ - stfd f23,(stack_size + STACK_FRAME_MIN_SIZE - 64)(%r1); \ - stfd f22,(stack_size + STACK_FRAME_MIN_SIZE - 72)(%r1); \ - stfd f21,(stack_size + STACK_FRAME_MIN_SIZE - 80)(%r1); \ - stfd f20,(stack_size + STACK_FRAME_MIN_SIZE - 88)(%r1); \ - stfd f19,(stack_size + STACK_FRAME_MIN_SIZE - 96)(%r1); \ - stfd f18,(stack_size + STACK_FRAME_MIN_SIZE - 104)(%r1); \ - stfd f17,(stack_size + STACK_FRAME_MIN_SIZE - 112)(%r1); \ - stfd f16,(stack_size + STACK_FRAME_MIN_SIZE - 120)(%r1); \ - stfd f15,(stack_size + STACK_FRAME_MIN_SIZE - 128)(%r1); \ - stfd f14,(stack_size + STACK_FRAME_MIN_SIZE - 136)(%r1); - -#define POP_FPU(stack_size) \ - lfd f31,(stack_size + STACK_FRAME_MIN_SIZE)(%r1); \ - lfd f30,(stack_size + STACK_FRAME_MIN_SIZE - 8)(%r1); \ - lfd f29,(stack_size + STACK_FRAME_MIN_SIZE - 16)(%r1); \ - lfd f28,(stack_size + STACK_FRAME_MIN_SIZE - 24)(%r1); \ - lfd f27,(stack_size + STACK_FRAME_MIN_SIZE - 32)(%r1); \ - lfd f26,(stack_size + STACK_FRAME_MIN_SIZE - 40)(%r1); \ - lfd f25,(stack_size + STACK_FRAME_MIN_SIZE - 48)(%r1); \ - lfd f24,(stack_size + STACK_FRAME_MIN_SIZE - 56)(%r1); \ - lfd f23,(stack_size + STACK_FRAME_MIN_SIZE - 64)(%r1); \ - lfd f22,(stack_size + STACK_FRAME_MIN_SIZE - 72)(%r1); \ - lfd f21,(stack_size + STACK_FRAME_MIN_SIZE - 80)(%r1); \ - lfd f20,(stack_size + STACK_FRAME_MIN_SIZE - 88)(%r1); \ - lfd f19,(stack_size + STACK_FRAME_MIN_SIZE - 96)(%r1); \ - lfd f18,(stack_size + STACK_FRAME_MIN_SIZE - 104)(%r1); \ - lfd f17,(stack_size + STACK_FRAME_MIN_SIZE - 112)(%r1); \ - lfd f16,(stack_size + STACK_FRAME_MIN_SIZE - 120)(%r1); \ - lfd f15,(stack_size + STACK_FRAME_MIN_SIZE - 128)(%r1); \ - lfd f14,(stack_size + STACK_FRAME_MIN_SIZE - 136)(%r1); - -/* - * Careful calling this, it will 'clobber' fpu (by design) - * Don't call this from C - */ -FUNC_START(load_fpu) - lfd f14,0(r3) - lfd f15,8(r3) - lfd f16,16(r3) - lfd f17,24(r3) - lfd f18,32(r3) - lfd f19,40(r3) - lfd f20,48(r3) - lfd f21,56(r3) - lfd f22,64(r3) - lfd f23,72(r3) - lfd f24,80(r3) - lfd f25,88(r3) - lfd f26,96(r3) - lfd f27,104(r3) - lfd f28,112(r3) - lfd f29,120(r3) - lfd f30,128(r3) - lfd f31,136(r3) - blr -FUNC_END(load_fpu) - -#endif /* _SELFTESTS_POWERPC_FPU_ASM_H */ diff --git a/tools/testing/selftests/powerpc/gpr_asm.h b/tools/testing/selftests/powerpc/gpr_asm.h deleted file mode 100644 index f6f38852d3a0..000000000000 --- a/tools/testing/selftests/powerpc/gpr_asm.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2016, Cyril Bur, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _SELFTESTS_POWERPC_GPR_ASM_H -#define _SELFTESTS_POWERPC_GPR_ASM_H - -#include "basic_asm.h" - -#define __PUSH_NVREGS(top_pos); \ - std r31,(top_pos)(%r1); \ - std r30,(top_pos - 8)(%r1); \ - std r29,(top_pos - 16)(%r1); \ - std r28,(top_pos - 24)(%r1); \ - std r27,(top_pos - 32)(%r1); \ - std r26,(top_pos - 40)(%r1); \ - std r25,(top_pos - 48)(%r1); \ - std r24,(top_pos - 56)(%r1); \ - std r23,(top_pos - 64)(%r1); \ - std r22,(top_pos - 72)(%r1); \ - std r21,(top_pos - 80)(%r1); \ - std r20,(top_pos - 88)(%r1); \ - std r19,(top_pos - 96)(%r1); \ - std r18,(top_pos - 104)(%r1); \ - std r17,(top_pos - 112)(%r1); \ - std r16,(top_pos - 120)(%r1); \ - std r15,(top_pos - 128)(%r1); \ - std r14,(top_pos - 136)(%r1) - -#define __POP_NVREGS(top_pos); \ - ld r31,(top_pos)(%r1); \ - ld r30,(top_pos - 8)(%r1); \ - ld r29,(top_pos - 16)(%r1); \ - ld r28,(top_pos - 24)(%r1); \ - ld r27,(top_pos - 32)(%r1); \ - ld r26,(top_pos - 40)(%r1); \ - ld r25,(top_pos - 48)(%r1); \ - ld r24,(top_pos - 56)(%r1); \ - ld r23,(top_pos - 64)(%r1); \ - ld r22,(top_pos - 72)(%r1); \ - ld r21,(top_pos - 80)(%r1); \ - ld r20,(top_pos - 88)(%r1); \ - ld r19,(top_pos - 96)(%r1); \ - ld r18,(top_pos - 104)(%r1); \ - ld r17,(top_pos - 112)(%r1); \ - ld r16,(top_pos - 120)(%r1); \ - ld r15,(top_pos - 128)(%r1); \ - ld r14,(top_pos - 136)(%r1) - -#define PUSH_NVREGS(stack_size) \ - __PUSH_NVREGS(stack_size + STACK_FRAME_MIN_SIZE) - -/* 18 NV FPU REGS */ -#define PUSH_NVREGS_BELOW_FPU(stack_size) \ - __PUSH_NVREGS(stack_size + STACK_FRAME_MIN_SIZE - (18 * 8)) - -#define POP_NVREGS(stack_size) \ - __POP_NVREGS(stack_size + STACK_FRAME_MIN_SIZE) - -/* 18 NV FPU REGS */ -#define POP_NVREGS_BELOW_FPU(stack_size) \ - __POP_NVREGS(stack_size + STACK_FRAME_MIN_SIZE - (18 * 8)) - -/* - * Careful calling this, it will 'clobber' NVGPRs (by design) - * Don't call this from C - */ -FUNC_START(load_gpr) - ld r14,0(r3) - ld r15,8(r3) - ld r16,16(r3) - ld r17,24(r3) - ld r18,32(r3) - ld r19,40(r3) - ld r20,48(r3) - ld r21,56(r3) - ld r22,64(r3) - ld r23,72(r3) - ld r24,80(r3) - ld r25,88(r3) - ld r26,96(r3) - ld r27,104(r3) - ld r28,112(r3) - ld r29,120(r3) - ld r30,128(r3) - ld r31,136(r3) - blr -FUNC_END(load_gpr) - - -#endif /* _SELFTESTS_POWERPC_GPR_ASM_H */ diff --git a/tools/testing/selftests/powerpc/include/basic_asm.h b/tools/testing/selftests/powerpc/include/basic_asm.h new file mode 100644 index 000000000000..12eaddf72e66 --- /dev/null +++ b/tools/testing/selftests/powerpc/include/basic_asm.h @@ -0,0 +1,73 @@ +#ifndef _SELFTESTS_POWERPC_BASIC_ASM_H +#define _SELFTESTS_POWERPC_BASIC_ASM_H + +#include +#include + +#define LOAD_REG_IMMEDIATE(reg, expr) \ + lis reg, (expr)@highest; \ + ori reg, reg, (expr)@higher; \ + rldicr reg, reg, 32, 31; \ + oris reg, reg, (expr)@high; \ + ori reg, reg, (expr)@l; + +/* + * Note: These macros assume that variables being stored on the stack are + * doublewords, while this is usually the case it may not always be the + * case for each use case. + */ +#if defined(_CALL_ELF) && _CALL_ELF == 2 +#define STACK_FRAME_MIN_SIZE 32 +#define STACK_FRAME_TOC_POS 24 +#define __STACK_FRAME_PARAM(_param) (32 + ((_param)*8)) +#define __STACK_FRAME_LOCAL(_num_params, _var_num) \ + ((STACK_FRAME_PARAM(_num_params)) + ((_var_num)*8)) +#else +#define STACK_FRAME_MIN_SIZE 112 +#define STACK_FRAME_TOC_POS 40 +#define __STACK_FRAME_PARAM(i) (48 + ((i)*8)) + +/* + * Caveat: if a function passed more than 8 doublewords, the caller will have + * made more space... which would render the 112 incorrect. + */ +#define __STACK_FRAME_LOCAL(_num_params, _var_num) \ + (112 + ((_var_num)*8)) +#endif + +/* Parameter x saved to the stack */ +#define STACK_FRAME_PARAM(var) __STACK_FRAME_PARAM(var) + +/* Local variable x saved to the stack after x parameters */ +#define STACK_FRAME_LOCAL(num_params, var) \ + __STACK_FRAME_LOCAL(num_params, var) +#define STACK_FRAME_LR_POS 16 +#define STACK_FRAME_CR_POS 8 + +/* + * It is very important to note here that _extra is the extra amount of + * stack space needed. This space can be accessed using STACK_FRAME_PARAM() + * or STACK_FRAME_LOCAL() macros. + * + * r1 and r2 are not defined in ppc-asm.h (instead they are defined as sp + * and toc). Kernel programmers tend to prefer rX even for r1 and r2, hence + * %1 and %r2. r0 is defined in ppc-asm.h and therefore %r0 gets + * preprocessed incorrectly, hence r0. + */ +#define PUSH_BASIC_STACK(_extra) \ + mflr r0; \ + std r0, STACK_FRAME_LR_POS(%r1); \ + stdu %r1, -(_extra + STACK_FRAME_MIN_SIZE)(%r1); \ + mfcr r0; \ + stw r0, STACK_FRAME_CR_POS(%r1); \ + std %r2, STACK_FRAME_TOC_POS(%r1); + +#define POP_BASIC_STACK(_extra) \ + ld %r2, STACK_FRAME_TOC_POS(%r1); \ + lwz r0, STACK_FRAME_CR_POS(%r1); \ + mtcr r0; \ + addi %r1, %r1, (_extra + STACK_FRAME_MIN_SIZE); \ + ld r0, STACK_FRAME_LR_POS(%r1); \ + mtlr r0; + +#endif /* _SELFTESTS_POWERPC_BASIC_ASM_H */ diff --git a/tools/testing/selftests/powerpc/include/fpu_asm.h b/tools/testing/selftests/powerpc/include/fpu_asm.h new file mode 100644 index 000000000000..6a387d255e27 --- /dev/null +++ b/tools/testing/selftests/powerpc/include/fpu_asm.h @@ -0,0 +1,80 @@ +/* + * Copyright 2016, Cyril Bur, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _SELFTESTS_POWERPC_FPU_ASM_H +#define _SELFTESTS_POWERPC_FPU_ASM_H +#include "basic_asm.h" + +#define PUSH_FPU(stack_size) \ + stfd f31,(stack_size + STACK_FRAME_MIN_SIZE)(%r1); \ + stfd f30,(stack_size + STACK_FRAME_MIN_SIZE - 8)(%r1); \ + stfd f29,(stack_size + STACK_FRAME_MIN_SIZE - 16)(%r1); \ + stfd f28,(stack_size + STACK_FRAME_MIN_SIZE - 24)(%r1); \ + stfd f27,(stack_size + STACK_FRAME_MIN_SIZE - 32)(%r1); \ + stfd f26,(stack_size + STACK_FRAME_MIN_SIZE - 40)(%r1); \ + stfd f25,(stack_size + STACK_FRAME_MIN_SIZE - 48)(%r1); \ + stfd f24,(stack_size + STACK_FRAME_MIN_SIZE - 56)(%r1); \ + stfd f23,(stack_size + STACK_FRAME_MIN_SIZE - 64)(%r1); \ + stfd f22,(stack_size + STACK_FRAME_MIN_SIZE - 72)(%r1); \ + stfd f21,(stack_size + STACK_FRAME_MIN_SIZE - 80)(%r1); \ + stfd f20,(stack_size + STACK_FRAME_MIN_SIZE - 88)(%r1); \ + stfd f19,(stack_size + STACK_FRAME_MIN_SIZE - 96)(%r1); \ + stfd f18,(stack_size + STACK_FRAME_MIN_SIZE - 104)(%r1); \ + stfd f17,(stack_size + STACK_FRAME_MIN_SIZE - 112)(%r1); \ + stfd f16,(stack_size + STACK_FRAME_MIN_SIZE - 120)(%r1); \ + stfd f15,(stack_size + STACK_FRAME_MIN_SIZE - 128)(%r1); \ + stfd f14,(stack_size + STACK_FRAME_MIN_SIZE - 136)(%r1); + +#define POP_FPU(stack_size) \ + lfd f31,(stack_size + STACK_FRAME_MIN_SIZE)(%r1); \ + lfd f30,(stack_size + STACK_FRAME_MIN_SIZE - 8)(%r1); \ + lfd f29,(stack_size + STACK_FRAME_MIN_SIZE - 16)(%r1); \ + lfd f28,(stack_size + STACK_FRAME_MIN_SIZE - 24)(%r1); \ + lfd f27,(stack_size + STACK_FRAME_MIN_SIZE - 32)(%r1); \ + lfd f26,(stack_size + STACK_FRAME_MIN_SIZE - 40)(%r1); \ + lfd f25,(stack_size + STACK_FRAME_MIN_SIZE - 48)(%r1); \ + lfd f24,(stack_size + STACK_FRAME_MIN_SIZE - 56)(%r1); \ + lfd f23,(stack_size + STACK_FRAME_MIN_SIZE - 64)(%r1); \ + lfd f22,(stack_size + STACK_FRAME_MIN_SIZE - 72)(%r1); \ + lfd f21,(stack_size + STACK_FRAME_MIN_SIZE - 80)(%r1); \ + lfd f20,(stack_size + STACK_FRAME_MIN_SIZE - 88)(%r1); \ + lfd f19,(stack_size + STACK_FRAME_MIN_SIZE - 96)(%r1); \ + lfd f18,(stack_size + STACK_FRAME_MIN_SIZE - 104)(%r1); \ + lfd f17,(stack_size + STACK_FRAME_MIN_SIZE - 112)(%r1); \ + lfd f16,(stack_size + STACK_FRAME_MIN_SIZE - 120)(%r1); \ + lfd f15,(stack_size + STACK_FRAME_MIN_SIZE - 128)(%r1); \ + lfd f14,(stack_size + STACK_FRAME_MIN_SIZE - 136)(%r1); + +/* + * Careful calling this, it will 'clobber' fpu (by design) + * Don't call this from C + */ +FUNC_START(load_fpu) + lfd f14,0(r3) + lfd f15,8(r3) + lfd f16,16(r3) + lfd f17,24(r3) + lfd f18,32(r3) + lfd f19,40(r3) + lfd f20,48(r3) + lfd f21,56(r3) + lfd f22,64(r3) + lfd f23,72(r3) + lfd f24,80(r3) + lfd f25,88(r3) + lfd f26,96(r3) + lfd f27,104(r3) + lfd f28,112(r3) + lfd f29,120(r3) + lfd f30,128(r3) + lfd f31,136(r3) + blr +FUNC_END(load_fpu) + +#endif /* _SELFTESTS_POWERPC_FPU_ASM_H */ diff --git a/tools/testing/selftests/powerpc/include/gpr_asm.h b/tools/testing/selftests/powerpc/include/gpr_asm.h new file mode 100644 index 000000000000..f6f38852d3a0 --- /dev/null +++ b/tools/testing/selftests/powerpc/include/gpr_asm.h @@ -0,0 +1,96 @@ +/* + * Copyright 2016, Cyril Bur, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _SELFTESTS_POWERPC_GPR_ASM_H +#define _SELFTESTS_POWERPC_GPR_ASM_H + +#include "basic_asm.h" + +#define __PUSH_NVREGS(top_pos); \ + std r31,(top_pos)(%r1); \ + std r30,(top_pos - 8)(%r1); \ + std r29,(top_pos - 16)(%r1); \ + std r28,(top_pos - 24)(%r1); \ + std r27,(top_pos - 32)(%r1); \ + std r26,(top_pos - 40)(%r1); \ + std r25,(top_pos - 48)(%r1); \ + std r24,(top_pos - 56)(%r1); \ + std r23,(top_pos - 64)(%r1); \ + std r22,(top_pos - 72)(%r1); \ + std r21,(top_pos - 80)(%r1); \ + std r20,(top_pos - 88)(%r1); \ + std r19,(top_pos - 96)(%r1); \ + std r18,(top_pos - 104)(%r1); \ + std r17,(top_pos - 112)(%r1); \ + std r16,(top_pos - 120)(%r1); \ + std r15,(top_pos - 128)(%r1); \ + std r14,(top_pos - 136)(%r1) + +#define __POP_NVREGS(top_pos); \ + ld r31,(top_pos)(%r1); \ + ld r30,(top_pos - 8)(%r1); \ + ld r29,(top_pos - 16)(%r1); \ + ld r28,(top_pos - 24)(%r1); \ + ld r27,(top_pos - 32)(%r1); \ + ld r26,(top_pos - 40)(%r1); \ + ld r25,(top_pos - 48)(%r1); \ + ld r24,(top_pos - 56)(%r1); \ + ld r23,(top_pos - 64)(%r1); \ + ld r22,(top_pos - 72)(%r1); \ + ld r21,(top_pos - 80)(%r1); \ + ld r20,(top_pos - 88)(%r1); \ + ld r19,(top_pos - 96)(%r1); \ + ld r18,(top_pos - 104)(%r1); \ + ld r17,(top_pos - 112)(%r1); \ + ld r16,(top_pos - 120)(%r1); \ + ld r15,(top_pos - 128)(%r1); \ + ld r14,(top_pos - 136)(%r1) + +#define PUSH_NVREGS(stack_size) \ + __PUSH_NVREGS(stack_size + STACK_FRAME_MIN_SIZE) + +/* 18 NV FPU REGS */ +#define PUSH_NVREGS_BELOW_FPU(stack_size) \ + __PUSH_NVREGS(stack_size + STACK_FRAME_MIN_SIZE - (18 * 8)) + +#define POP_NVREGS(stack_size) \ + __POP_NVREGS(stack_size + STACK_FRAME_MIN_SIZE) + +/* 18 NV FPU REGS */ +#define POP_NVREGS_BELOW_FPU(stack_size) \ + __POP_NVREGS(stack_size + STACK_FRAME_MIN_SIZE - (18 * 8)) + +/* + * Careful calling this, it will 'clobber' NVGPRs (by design) + * Don't call this from C + */ +FUNC_START(load_gpr) + ld r14,0(r3) + ld r15,8(r3) + ld r16,16(r3) + ld r17,24(r3) + ld r18,32(r3) + ld r19,40(r3) + ld r20,48(r3) + ld r21,56(r3) + ld r22,64(r3) + ld r23,72(r3) + ld r24,80(r3) + ld r25,88(r3) + ld r26,96(r3) + ld r27,104(r3) + ld r28,112(r3) + ld r29,120(r3) + ld r30,128(r3) + ld r31,136(r3) + blr +FUNC_END(load_gpr) + + +#endif /* _SELFTESTS_POWERPC_GPR_ASM_H */ diff --git a/tools/testing/selftests/powerpc/include/instructions.h b/tools/testing/selftests/powerpc/include/instructions.h new file mode 100644 index 000000000000..0fb0bd3b28c9 --- /dev/null +++ b/tools/testing/selftests/powerpc/include/instructions.h @@ -0,0 +1,68 @@ +#ifndef _SELFTESTS_POWERPC_INSTRUCTIONS_H +#define _SELFTESTS_POWERPC_INSTRUCTIONS_H + +#include +#include + +/* This defines the "copy" instruction from Power ISA 3.0 Book II, section 4.4. */ +#define __COPY(RA, RB, L) \ + (0x7c00060c | (RA) << (31-15) | (RB) << (31-20) | (L) << (31-10)) +#define COPY(RA, RB, L) \ + .long __COPY((RA), (RB), (L)) + +static inline void copy(void *i) +{ + asm volatile(str(COPY(0, %0, 0))";" + : + : "b" (i) + : "memory" + ); +} + +static inline void copy_first(void *i) +{ + asm volatile(str(COPY(0, %0, 1))";" + : + : "b" (i) + : "memory" + ); +} + +/* This defines the "paste" instruction from Power ISA 3.0 Book II, section 4.4. */ +#define __PASTE(RA, RB, L, RC) \ + (0x7c00070c | (RA) << (31-15) | (RB) << (31-20) | (L) << (31-10) | (RC) << (31-31)) +#define PASTE(RA, RB, L, RC) \ + .long __PASTE((RA), (RB), (L), (RC)) + +static inline int paste(void *i) +{ + int cr; + + asm volatile(str(PASTE(0, %1, 0, 0))";" + "mfcr %0;" + : "=r" (cr) + : "b" (i) + : "memory" + ); + return cr; +} + +static inline int paste_last(void *i) +{ + int cr; + + asm volatile(str(PASTE(0, %1, 1, 1))";" + "mfcr %0;" + : "=r" (cr) + : "b" (i) + : "memory" + ); + return cr; +} + +#define PPC_INST_COPY __COPY(0, 0, 0) +#define PPC_INST_COPY_FIRST __COPY(0, 0, 1) +#define PPC_INST_PASTE __PASTE(0, 0, 0, 0) +#define PPC_INST_PASTE_LAST __PASTE(0, 0, 1, 1) + +#endif /* _SELFTESTS_POWERPC_INSTRUCTIONS_H */ diff --git a/tools/testing/selftests/powerpc/include/reg.h b/tools/testing/selftests/powerpc/include/reg.h new file mode 100644 index 000000000000..bb843b2d9bac --- /dev/null +++ b/tools/testing/selftests/powerpc/include/reg.h @@ -0,0 +1,84 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#ifndef _SELFTESTS_POWERPC_REG_H +#define _SELFTESTS_POWERPC_REG_H + +#define __stringify_1(x) #x +#define __stringify(x) __stringify_1(x) + +#define mfspr(rn) ({unsigned long rval; \ + asm volatile("mfspr %0," _str(rn) \ + : "=r" (rval)); rval; }) +#define mtspr(rn, v) asm volatile("mtspr " _str(rn) ",%0" : \ + : "r" ((unsigned long)(v)) \ + : "memory") + +#define mb() asm volatile("sync" : : : "memory"); + +#define SPRN_MMCR2 769 +#define SPRN_MMCRA 770 +#define SPRN_MMCR0 779 +#define MMCR0_PMAO 0x00000080 +#define MMCR0_PMAE 0x04000000 +#define MMCR0_FC 0x80000000 +#define SPRN_EBBHR 804 +#define SPRN_EBBRR 805 +#define SPRN_BESCR 806 /* Branch event status & control register */ +#define SPRN_BESCRS 800 /* Branch event status & control set (1 bits set to 1) */ +#define SPRN_BESCRSU 801 /* Branch event status & control set upper */ +#define SPRN_BESCRR 802 /* Branch event status & control REset (1 bits set to 0) */ +#define SPRN_BESCRRU 803 /* Branch event status & control REset upper */ + +#define BESCR_PMEO 0x1 /* PMU Event-based exception Occurred */ +#define BESCR_PME (0x1ul << 32) /* PMU Event-based exception Enable */ + +#define SPRN_PMC1 771 +#define SPRN_PMC2 772 +#define SPRN_PMC3 773 +#define SPRN_PMC4 774 +#define SPRN_PMC5 775 +#define SPRN_PMC6 776 + +#define SPRN_SIAR 780 +#define SPRN_SDAR 781 +#define SPRN_SIER 768 + +#define SPRN_TEXASR 0x82 /* Transaction Exception and Status Register */ +#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */ +#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */ +#define SPRN_TAR 0x32f /* Target Address Register */ + +#define SPRN_DSCR_PRIV 0x11 /* Privilege State DSCR */ +#define SPRN_DSCR 0x03 /* Data Stream Control Register */ +#define SPRN_PPR 896 /* Program Priority Register */ + +/* TEXASR register bits */ +#define TEXASR_FC 0xFE00000000000000 +#define TEXASR_FP 0x0100000000000000 +#define TEXASR_DA 0x0080000000000000 +#define TEXASR_NO 0x0040000000000000 +#define TEXASR_FO 0x0020000000000000 +#define TEXASR_SIC 0x0010000000000000 +#define TEXASR_NTC 0x0008000000000000 +#define TEXASR_TC 0x0004000000000000 +#define TEXASR_TIC 0x0002000000000000 +#define TEXASR_IC 0x0001000000000000 +#define TEXASR_IFC 0x0000800000000000 +#define TEXASR_ABT 0x0000000100000000 +#define TEXASR_SPD 0x0000000080000000 +#define TEXASR_HV 0x0000000020000000 +#define TEXASR_PR 0x0000000010000000 +#define TEXASR_FS 0x0000000008000000 +#define TEXASR_TE 0x0000000004000000 +#define TEXASR_ROT 0x0000000002000000 + +/* Vector Instructions */ +#define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \ + ((rb) << 11) | (((xs) >> 5))) +#define STXVD2X(xs, ra, rb) .long (0x7c000798 | VSX_XX1((xs), (ra), (rb))) +#define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb))) + +#endif /* _SELFTESTS_POWERPC_REG_H */ diff --git a/tools/testing/selftests/powerpc/include/subunit.h b/tools/testing/selftests/powerpc/include/subunit.h new file mode 100644 index 000000000000..9c6c4e901ab6 --- /dev/null +++ b/tools/testing/selftests/powerpc/include/subunit.h @@ -0,0 +1,52 @@ +/* + * Copyright 2013, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#ifndef _SELFTESTS_POWERPC_SUBUNIT_H +#define _SELFTESTS_POWERPC_SUBUNIT_H + +static inline void test_start(char *name) +{ + printf("test: %s\n", name); +} + +static inline void test_failure_detail(char *name, char *detail) +{ + printf("failure: %s [%s]\n", name, detail); +} + +static inline void test_failure(char *name) +{ + printf("failure: %s\n", name); +} + +static inline void test_error(char *name) +{ + printf("error: %s\n", name); +} + +static inline void test_skip(char *name) +{ + printf("skip: %s\n", name); +} + +static inline void test_success(char *name) +{ + printf("success: %s\n", name); +} + +static inline void test_finish(char *name, int status) +{ + if (status) + test_failure(name); + else + test_success(name); +} + +static inline void test_set_git_version(char *value) +{ + printf("tags: git_version:%s\n", value); +} + +#endif /* _SELFTESTS_POWERPC_SUBUNIT_H */ diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h new file mode 100644 index 000000000000..53405e8a52ab --- /dev/null +++ b/tools/testing/selftests/powerpc/include/utils.h @@ -0,0 +1,77 @@ +/* + * Copyright 2013, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#ifndef _SELFTESTS_POWERPC_UTILS_H +#define _SELFTESTS_POWERPC_UTILS_H + +#define __cacheline_aligned __attribute__((aligned(128))) + +#include +#include +#include +#include "reg.h" + +/* Avoid headaches with PRI?64 - just use %ll? always */ +typedef unsigned long long u64; +typedef signed long long s64; + +/* Just for familiarity */ +typedef uint32_t u32; +typedef uint16_t u16; +typedef uint8_t u8; + +void test_harness_set_timeout(uint64_t time); +int test_harness(int (test_function)(void), char *name); +extern void *get_auxv_entry(int type); +int pick_online_cpu(void); + +static inline bool have_hwcap(unsigned long ftr) +{ + return ((unsigned long)get_auxv_entry(AT_HWCAP) & ftr) == ftr; +} + +#ifdef AT_HWCAP2 +static inline bool have_hwcap2(unsigned long ftr2) +{ + return ((unsigned long)get_auxv_entry(AT_HWCAP2) & ftr2) == ftr2; +} +#else +static inline bool have_hwcap2(unsigned long ftr2) +{ + return false; +} +#endif + +/* Yes, this is evil */ +#define FAIL_IF(x) \ +do { \ + if ((x)) { \ + fprintf(stderr, \ + "[FAIL] Test FAILED on line %d\n", __LINE__); \ + return 1; \ + } \ +} while (0) + +/* The test harness uses this, yes it's gross */ +#define MAGIC_SKIP_RETURN_VALUE 99 + +#define SKIP_IF(x) \ +do { \ + if ((x)) { \ + fprintf(stderr, \ + "[SKIP] Test skipped on line %d\n", __LINE__); \ + return MAGIC_SKIP_RETURN_VALUE; \ + } \ +} while (0) + +#define _str(s) #s +#define str(s) _str(s) + +/* POWER9 feature */ +#ifndef PPC_FEATURE2_ARCH_3_00 +#define PPC_FEATURE2_ARCH_3_00 0x00800000 +#endif + +#endif /* _SELFTESTS_POWERPC_UTILS_H */ diff --git a/tools/testing/selftests/powerpc/include/vmx_asm.h b/tools/testing/selftests/powerpc/include/vmx_asm.h new file mode 100644 index 000000000000..2eaaeca9cf1d --- /dev/null +++ b/tools/testing/selftests/powerpc/include/vmx_asm.h @@ -0,0 +1,96 @@ +/* + * Copyright 2015, Cyril Bur, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include "basic_asm.h" + +/* POS MUST BE 16 ALIGNED! */ +#define PUSH_VMX(pos,reg) \ + li reg,pos; \ + stvx v20,reg,%r1; \ + addi reg,reg,16; \ + stvx v21,reg,%r1; \ + addi reg,reg,16; \ + stvx v22,reg,%r1; \ + addi reg,reg,16; \ + stvx v23,reg,%r1; \ + addi reg,reg,16; \ + stvx v24,reg,%r1; \ + addi reg,reg,16; \ + stvx v25,reg,%r1; \ + addi reg,reg,16; \ + stvx v26,reg,%r1; \ + addi reg,reg,16; \ + stvx v27,reg,%r1; \ + addi reg,reg,16; \ + stvx v28,reg,%r1; \ + addi reg,reg,16; \ + stvx v29,reg,%r1; \ + addi reg,reg,16; \ + stvx v30,reg,%r1; \ + addi reg,reg,16; \ + stvx v31,reg,%r1; + +/* POS MUST BE 16 ALIGNED! */ +#define POP_VMX(pos,reg) \ + li reg,pos; \ + lvx v20,reg,%r1; \ + addi reg,reg,16; \ + lvx v21,reg,%r1; \ + addi reg,reg,16; \ + lvx v22,reg,%r1; \ + addi reg,reg,16; \ + lvx v23,reg,%r1; \ + addi reg,reg,16; \ + lvx v24,reg,%r1; \ + addi reg,reg,16; \ + lvx v25,reg,%r1; \ + addi reg,reg,16; \ + lvx v26,reg,%r1; \ + addi reg,reg,16; \ + lvx v27,reg,%r1; \ + addi reg,reg,16; \ + lvx v28,reg,%r1; \ + addi reg,reg,16; \ + lvx v29,reg,%r1; \ + addi reg,reg,16; \ + lvx v30,reg,%r1; \ + addi reg,reg,16; \ + lvx v31,reg,%r1; + +/* + * Careful this will 'clobber' vmx (by design) + * Don't call this from C + */ +FUNC_START(load_vmx) + li r5,0 + lvx v20,r5,r3 + addi r5,r5,16 + lvx v21,r5,r3 + addi r5,r5,16 + lvx v22,r5,r3 + addi r5,r5,16 + lvx v23,r5,r3 + addi r5,r5,16 + lvx v24,r5,r3 + addi r5,r5,16 + lvx v25,r5,r3 + addi r5,r5,16 + lvx v26,r5,r3 + addi r5,r5,16 + lvx v27,r5,r3 + addi r5,r5,16 + lvx v28,r5,r3 + addi r5,r5,16 + lvx v29,r5,r3 + addi r5,r5,16 + lvx v30,r5,r3 + addi r5,r5,16 + lvx v31,r5,r3 + blr +FUNC_END(load_vmx) diff --git a/tools/testing/selftests/powerpc/include/vsx_asm.h b/tools/testing/selftests/powerpc/include/vsx_asm.h new file mode 100644 index 000000000000..d828bfb6ef2d --- /dev/null +++ b/tools/testing/selftests/powerpc/include/vsx_asm.h @@ -0,0 +1,71 @@ +/* + * Copyright 2015, Cyril Bur, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include "basic_asm.h" + +/* + * Careful this will 'clobber' vsx (by design), VSX are always + * volatile though so unlike vmx this isn't so much of an issue + * Still should avoid calling from C + */ +FUNC_START(load_vsx) + li r5,0 + lxvx vs20,r5,r3 + addi r5,r5,16 + lxvx vs21,r5,r3 + addi r5,r5,16 + lxvx vs22,r5,r3 + addi r5,r5,16 + lxvx vs23,r5,r3 + addi r5,r5,16 + lxvx vs24,r5,r3 + addi r5,r5,16 + lxvx vs25,r5,r3 + addi r5,r5,16 + lxvx vs26,r5,r3 + addi r5,r5,16 + lxvx vs27,r5,r3 + addi r5,r5,16 + lxvx vs28,r5,r3 + addi r5,r5,16 + lxvx vs29,r5,r3 + addi r5,r5,16 + lxvx vs30,r5,r3 + addi r5,r5,16 + lxvx vs31,r5,r3 + blr +FUNC_END(load_vsx) + +FUNC_START(store_vsx) + li r5,0 + stxvx vs20,r5,r3 + addi r5,r5,16 + stxvx vs21,r5,r3 + addi r5,r5,16 + stxvx vs22,r5,r3 + addi r5,r5,16 + stxvx vs23,r5,r3 + addi r5,r5,16 + stxvx vs24,r5,r3 + addi r5,r5,16 + stxvx vs25,r5,r3 + addi r5,r5,16 + stxvx vs26,r5,r3 + addi r5,r5,16 + stxvx vs27,r5,r3 + addi r5,r5,16 + stxvx vs28,r5,r3 + addi r5,r5,16 + stxvx vs29,r5,r3 + addi r5,r5,16 + stxvx vs30,r5,r3 + addi r5,r5,16 + stxvx vs31,r5,r3 + blr +FUNC_END(store_vsx) diff --git a/tools/testing/selftests/powerpc/instructions.h b/tools/testing/selftests/powerpc/instructions.h deleted file mode 100644 index 0fb0bd3b28c9..000000000000 --- a/tools/testing/selftests/powerpc/instructions.h +++ /dev/null @@ -1,68 +0,0 @@ -#ifndef _SELFTESTS_POWERPC_INSTRUCTIONS_H -#define _SELFTESTS_POWERPC_INSTRUCTIONS_H - -#include -#include - -/* This defines the "copy" instruction from Power ISA 3.0 Book II, section 4.4. */ -#define __COPY(RA, RB, L) \ - (0x7c00060c | (RA) << (31-15) | (RB) << (31-20) | (L) << (31-10)) -#define COPY(RA, RB, L) \ - .long __COPY((RA), (RB), (L)) - -static inline void copy(void *i) -{ - asm volatile(str(COPY(0, %0, 0))";" - : - : "b" (i) - : "memory" - ); -} - -static inline void copy_first(void *i) -{ - asm volatile(str(COPY(0, %0, 1))";" - : - : "b" (i) - : "memory" - ); -} - -/* This defines the "paste" instruction from Power ISA 3.0 Book II, section 4.4. */ -#define __PASTE(RA, RB, L, RC) \ - (0x7c00070c | (RA) << (31-15) | (RB) << (31-20) | (L) << (31-10) | (RC) << (31-31)) -#define PASTE(RA, RB, L, RC) \ - .long __PASTE((RA), (RB), (L), (RC)) - -static inline int paste(void *i) -{ - int cr; - - asm volatile(str(PASTE(0, %1, 0, 0))";" - "mfcr %0;" - : "=r" (cr) - : "b" (i) - : "memory" - ); - return cr; -} - -static inline int paste_last(void *i) -{ - int cr; - - asm volatile(str(PASTE(0, %1, 1, 1))";" - "mfcr %0;" - : "=r" (cr) - : "b" (i) - : "memory" - ); - return cr; -} - -#define PPC_INST_COPY __COPY(0, 0, 0) -#define PPC_INST_COPY_FIRST __COPY(0, 0, 1) -#define PPC_INST_PASTE __PASTE(0, 0, 0, 0) -#define PPC_INST_PASTE_LAST __PASTE(0, 0, 1, 1) - -#endif /* _SELFTESTS_POWERPC_INSTRUCTIONS_H */ diff --git a/tools/testing/selftests/powerpc/math/fpu_asm.S b/tools/testing/selftests/powerpc/math/fpu_asm.S index 241f067a510f..8a04bb117b69 100644 --- a/tools/testing/selftests/powerpc/math/fpu_asm.S +++ b/tools/testing/selftests/powerpc/math/fpu_asm.S @@ -7,8 +7,8 @@ * 2 of the License, or (at your option) any later version. */ -#include "../basic_asm.h" -#include "../fpu_asm.h" +#include "basic_asm.h" +#include "fpu_asm.h" FUNC_START(check_fpu) mr r4,r3 diff --git a/tools/testing/selftests/powerpc/math/vmx_asm.S b/tools/testing/selftests/powerpc/math/vmx_asm.S index fd74da488625..cb1e5ae1be99 100644 --- a/tools/testing/selftests/powerpc/math/vmx_asm.S +++ b/tools/testing/selftests/powerpc/math/vmx_asm.S @@ -7,8 +7,8 @@ * 2 of the License, or (at your option) any later version. */ -#include "../basic_asm.h" -#include "../vmx_asm.h" +#include "basic_asm.h" +#include "vmx_asm.h" # Should be safe from C, only touches r4, r5 and v0,v1,v2 FUNC_START(check_vmx) diff --git a/tools/testing/selftests/powerpc/math/vsx_asm.S b/tools/testing/selftests/powerpc/math/vsx_asm.S index a110dd882d5e..8f431f6abc49 100644 --- a/tools/testing/selftests/powerpc/math/vsx_asm.S +++ b/tools/testing/selftests/powerpc/math/vsx_asm.S @@ -7,8 +7,8 @@ * 2 of the License, or (at your option) any later version. */ -#include "../basic_asm.h" -#include "../vsx_asm.h" +#include "basic_asm.h" +#include "vsx_asm.h" #long check_vsx(vector int *r3); #This function wraps storeing VSX regs to the end of an array and a diff --git a/tools/testing/selftests/powerpc/reg.h b/tools/testing/selftests/powerpc/reg.h deleted file mode 100644 index bb843b2d9bac..000000000000 --- a/tools/testing/selftests/powerpc/reg.h +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2014, Michael Ellerman, IBM Corp. - * Licensed under GPLv2. - */ - -#ifndef _SELFTESTS_POWERPC_REG_H -#define _SELFTESTS_POWERPC_REG_H - -#define __stringify_1(x) #x -#define __stringify(x) __stringify_1(x) - -#define mfspr(rn) ({unsigned long rval; \ - asm volatile("mfspr %0," _str(rn) \ - : "=r" (rval)); rval; }) -#define mtspr(rn, v) asm volatile("mtspr " _str(rn) ",%0" : \ - : "r" ((unsigned long)(v)) \ - : "memory") - -#define mb() asm volatile("sync" : : : "memory"); - -#define SPRN_MMCR2 769 -#define SPRN_MMCRA 770 -#define SPRN_MMCR0 779 -#define MMCR0_PMAO 0x00000080 -#define MMCR0_PMAE 0x04000000 -#define MMCR0_FC 0x80000000 -#define SPRN_EBBHR 804 -#define SPRN_EBBRR 805 -#define SPRN_BESCR 806 /* Branch event status & control register */ -#define SPRN_BESCRS 800 /* Branch event status & control set (1 bits set to 1) */ -#define SPRN_BESCRSU 801 /* Branch event status & control set upper */ -#define SPRN_BESCRR 802 /* Branch event status & control REset (1 bits set to 0) */ -#define SPRN_BESCRRU 803 /* Branch event status & control REset upper */ - -#define BESCR_PMEO 0x1 /* PMU Event-based exception Occurred */ -#define BESCR_PME (0x1ul << 32) /* PMU Event-based exception Enable */ - -#define SPRN_PMC1 771 -#define SPRN_PMC2 772 -#define SPRN_PMC3 773 -#define SPRN_PMC4 774 -#define SPRN_PMC5 775 -#define SPRN_PMC6 776 - -#define SPRN_SIAR 780 -#define SPRN_SDAR 781 -#define SPRN_SIER 768 - -#define SPRN_TEXASR 0x82 /* Transaction Exception and Status Register */ -#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */ -#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */ -#define SPRN_TAR 0x32f /* Target Address Register */ - -#define SPRN_DSCR_PRIV 0x11 /* Privilege State DSCR */ -#define SPRN_DSCR 0x03 /* Data Stream Control Register */ -#define SPRN_PPR 896 /* Program Priority Register */ - -/* TEXASR register bits */ -#define TEXASR_FC 0xFE00000000000000 -#define TEXASR_FP 0x0100000000000000 -#define TEXASR_DA 0x0080000000000000 -#define TEXASR_NO 0x0040000000000000 -#define TEXASR_FO 0x0020000000000000 -#define TEXASR_SIC 0x0010000000000000 -#define TEXASR_NTC 0x0008000000000000 -#define TEXASR_TC 0x0004000000000000 -#define TEXASR_TIC 0x0002000000000000 -#define TEXASR_IC 0x0001000000000000 -#define TEXASR_IFC 0x0000800000000000 -#define TEXASR_ABT 0x0000000100000000 -#define TEXASR_SPD 0x0000000080000000 -#define TEXASR_HV 0x0000000020000000 -#define TEXASR_PR 0x0000000010000000 -#define TEXASR_FS 0x0000000008000000 -#define TEXASR_TE 0x0000000004000000 -#define TEXASR_ROT 0x0000000002000000 - -/* Vector Instructions */ -#define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \ - ((rb) << 11) | (((xs) >> 5))) -#define STXVD2X(xs, ra, rb) .long (0x7c000798 | VSX_XX1((xs), (ra), (rb))) -#define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb))) - -#endif /* _SELFTESTS_POWERPC_REG_H */ diff --git a/tools/testing/selftests/powerpc/signal/signal.S b/tools/testing/selftests/powerpc/signal/signal.S index 7043d521df0a..322f2f1fc327 100644 --- a/tools/testing/selftests/powerpc/signal/signal.S +++ b/tools/testing/selftests/powerpc/signal/signal.S @@ -7,7 +7,7 @@ * 2 of the License, or (at your option) any later version. */ -#include "../basic_asm.h" +#include "basic_asm.h" /* long signal_self(pid_t pid, int sig); */ FUNC_START(signal_self) diff --git a/tools/testing/selftests/powerpc/stringloops/memcmp.c b/tools/testing/selftests/powerpc/stringloops/memcmp.c index 17417dd70708..30b1222380ca 100644 --- a/tools/testing/selftests/powerpc/stringloops/memcmp.c +++ b/tools/testing/selftests/powerpc/stringloops/memcmp.c @@ -1,7 +1,7 @@ #include #include #include -#include "../utils.h" +#include "utils.h" #define SIZE 256 #define ITERATIONS 10000 diff --git a/tools/testing/selftests/powerpc/subunit.h b/tools/testing/selftests/powerpc/subunit.h deleted file mode 100644 index 9c6c4e901ab6..000000000000 --- a/tools/testing/selftests/powerpc/subunit.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2013, Michael Ellerman, IBM Corp. - * Licensed under GPLv2. - */ - -#ifndef _SELFTESTS_POWERPC_SUBUNIT_H -#define _SELFTESTS_POWERPC_SUBUNIT_H - -static inline void test_start(char *name) -{ - printf("test: %s\n", name); -} - -static inline void test_failure_detail(char *name, char *detail) -{ - printf("failure: %s [%s]\n", name, detail); -} - -static inline void test_failure(char *name) -{ - printf("failure: %s\n", name); -} - -static inline void test_error(char *name) -{ - printf("error: %s\n", name); -} - -static inline void test_skip(char *name) -{ - printf("skip: %s\n", name); -} - -static inline void test_success(char *name) -{ - printf("success: %s\n", name); -} - -static inline void test_finish(char *name, int status) -{ - if (status) - test_failure(name); - else - test_success(name); -} - -static inline void test_set_git_version(char *value) -{ - printf("tags: git_version:%s\n", value); -} - -#endif /* _SELFTESTS_POWERPC_SUBUNIT_H */ diff --git a/tools/testing/selftests/powerpc/tm/tm-signal.S b/tools/testing/selftests/powerpc/tm/tm-signal.S index 4e13e8b3a96f..506a4ebaf3ae 100644 --- a/tools/testing/selftests/powerpc/tm/tm-signal.S +++ b/tools/testing/selftests/powerpc/tm/tm-signal.S @@ -7,11 +7,11 @@ * 2 of the License, or (at your option) any later version. */ -#include "../basic_asm.h" -#include "../gpr_asm.h" -#include "../fpu_asm.h" -#include "../vmx_asm.h" -#include "../vsx_asm.h" +#include "basic_asm.h" +#include "gpr_asm.h" +#include "fpu_asm.h" +#include "vmx_asm.h" +#include "vsx_asm.h" /* * Large caveat here being that the caller cannot expect the diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h index 2c8da74304e7..0ffff04433c5 100644 --- a/tools/testing/selftests/powerpc/tm/tm.h +++ b/tools/testing/selftests/powerpc/tm/tm.h @@ -10,7 +10,7 @@ #include #include -#include "../utils.h" +#include "utils.h" static inline bool have_htm(void) { diff --git a/tools/testing/selftests/powerpc/utils.h b/tools/testing/selftests/powerpc/utils.h deleted file mode 100644 index 53405e8a52ab..000000000000 --- a/tools/testing/selftests/powerpc/utils.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2013, Michael Ellerman, IBM Corp. - * Licensed under GPLv2. - */ - -#ifndef _SELFTESTS_POWERPC_UTILS_H -#define _SELFTESTS_POWERPC_UTILS_H - -#define __cacheline_aligned __attribute__((aligned(128))) - -#include -#include -#include -#include "reg.h" - -/* Avoid headaches with PRI?64 - just use %ll? always */ -typedef unsigned long long u64; -typedef signed long long s64; - -/* Just for familiarity */ -typedef uint32_t u32; -typedef uint16_t u16; -typedef uint8_t u8; - -void test_harness_set_timeout(uint64_t time); -int test_harness(int (test_function)(void), char *name); -extern void *get_auxv_entry(int type); -int pick_online_cpu(void); - -static inline bool have_hwcap(unsigned long ftr) -{ - return ((unsigned long)get_auxv_entry(AT_HWCAP) & ftr) == ftr; -} - -#ifdef AT_HWCAP2 -static inline bool have_hwcap2(unsigned long ftr2) -{ - return ((unsigned long)get_auxv_entry(AT_HWCAP2) & ftr2) == ftr2; -} -#else -static inline bool have_hwcap2(unsigned long ftr2) -{ - return false; -} -#endif - -/* Yes, this is evil */ -#define FAIL_IF(x) \ -do { \ - if ((x)) { \ - fprintf(stderr, \ - "[FAIL] Test FAILED on line %d\n", __LINE__); \ - return 1; \ - } \ -} while (0) - -/* The test harness uses this, yes it's gross */ -#define MAGIC_SKIP_RETURN_VALUE 99 - -#define SKIP_IF(x) \ -do { \ - if ((x)) { \ - fprintf(stderr, \ - "[SKIP] Test skipped on line %d\n", __LINE__); \ - return MAGIC_SKIP_RETURN_VALUE; \ - } \ -} while (0) - -#define _str(s) #s -#define str(s) _str(s) - -/* POWER9 feature */ -#ifndef PPC_FEATURE2_ARCH_3_00 -#define PPC_FEATURE2_ARCH_3_00 0x00800000 -#endif - -#endif /* _SELFTESTS_POWERPC_UTILS_H */ diff --git a/tools/testing/selftests/powerpc/vmx_asm.h b/tools/testing/selftests/powerpc/vmx_asm.h deleted file mode 100644 index 2eaaeca9cf1d..000000000000 --- a/tools/testing/selftests/powerpc/vmx_asm.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2015, Cyril Bur, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include "basic_asm.h" - -/* POS MUST BE 16 ALIGNED! */ -#define PUSH_VMX(pos,reg) \ - li reg,pos; \ - stvx v20,reg,%r1; \ - addi reg,reg,16; \ - stvx v21,reg,%r1; \ - addi reg,reg,16; \ - stvx v22,reg,%r1; \ - addi reg,reg,16; \ - stvx v23,reg,%r1; \ - addi reg,reg,16; \ - stvx v24,reg,%r1; \ - addi reg,reg,16; \ - stvx v25,reg,%r1; \ - addi reg,reg,16; \ - stvx v26,reg,%r1; \ - addi reg,reg,16; \ - stvx v27,reg,%r1; \ - addi reg,reg,16; \ - stvx v28,reg,%r1; \ - addi reg,reg,16; \ - stvx v29,reg,%r1; \ - addi reg,reg,16; \ - stvx v30,reg,%r1; \ - addi reg,reg,16; \ - stvx v31,reg,%r1; - -/* POS MUST BE 16 ALIGNED! */ -#define POP_VMX(pos,reg) \ - li reg,pos; \ - lvx v20,reg,%r1; \ - addi reg,reg,16; \ - lvx v21,reg,%r1; \ - addi reg,reg,16; \ - lvx v22,reg,%r1; \ - addi reg,reg,16; \ - lvx v23,reg,%r1; \ - addi reg,reg,16; \ - lvx v24,reg,%r1; \ - addi reg,reg,16; \ - lvx v25,reg,%r1; \ - addi reg,reg,16; \ - lvx v26,reg,%r1; \ - addi reg,reg,16; \ - lvx v27,reg,%r1; \ - addi reg,reg,16; \ - lvx v28,reg,%r1; \ - addi reg,reg,16; \ - lvx v29,reg,%r1; \ - addi reg,reg,16; \ - lvx v30,reg,%r1; \ - addi reg,reg,16; \ - lvx v31,reg,%r1; - -/* - * Careful this will 'clobber' vmx (by design) - * Don't call this from C - */ -FUNC_START(load_vmx) - li r5,0 - lvx v20,r5,r3 - addi r5,r5,16 - lvx v21,r5,r3 - addi r5,r5,16 - lvx v22,r5,r3 - addi r5,r5,16 - lvx v23,r5,r3 - addi r5,r5,16 - lvx v24,r5,r3 - addi r5,r5,16 - lvx v25,r5,r3 - addi r5,r5,16 - lvx v26,r5,r3 - addi r5,r5,16 - lvx v27,r5,r3 - addi r5,r5,16 - lvx v28,r5,r3 - addi r5,r5,16 - lvx v29,r5,r3 - addi r5,r5,16 - lvx v30,r5,r3 - addi r5,r5,16 - lvx v31,r5,r3 - blr -FUNC_END(load_vmx) diff --git a/tools/testing/selftests/powerpc/vsx_asm.h b/tools/testing/selftests/powerpc/vsx_asm.h deleted file mode 100644 index d828bfb6ef2d..000000000000 --- a/tools/testing/selftests/powerpc/vsx_asm.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2015, Cyril Bur, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include "basic_asm.h" - -/* - * Careful this will 'clobber' vsx (by design), VSX are always - * volatile though so unlike vmx this isn't so much of an issue - * Still should avoid calling from C - */ -FUNC_START(load_vsx) - li r5,0 - lxvx vs20,r5,r3 - addi r5,r5,16 - lxvx vs21,r5,r3 - addi r5,r5,16 - lxvx vs22,r5,r3 - addi r5,r5,16 - lxvx vs23,r5,r3 - addi r5,r5,16 - lxvx vs24,r5,r3 - addi r5,r5,16 - lxvx vs25,r5,r3 - addi r5,r5,16 - lxvx vs26,r5,r3 - addi r5,r5,16 - lxvx vs27,r5,r3 - addi r5,r5,16 - lxvx vs28,r5,r3 - addi r5,r5,16 - lxvx vs29,r5,r3 - addi r5,r5,16 - lxvx vs30,r5,r3 - addi r5,r5,16 - lxvx vs31,r5,r3 - blr -FUNC_END(load_vsx) - -FUNC_START(store_vsx) - li r5,0 - stxvx vs20,r5,r3 - addi r5,r5,16 - stxvx vs21,r5,r3 - addi r5,r5,16 - stxvx vs22,r5,r3 - addi r5,r5,16 - stxvx vs23,r5,r3 - addi r5,r5,16 - stxvx vs24,r5,r3 - addi r5,r5,16 - stxvx vs25,r5,r3 - addi r5,r5,16 - stxvx vs26,r5,r3 - addi r5,r5,16 - stxvx vs27,r5,r3 - addi r5,r5,16 - stxvx vs28,r5,r3 - addi r5,r5,16 - stxvx vs29,r5,r3 - addi r5,r5,16 - stxvx vs30,r5,r3 - addi r5,r5,16 - stxvx vs31,r5,r3 - blr -FUNC_END(store_vsx) -- cgit From f666ad413db6f7fbfaeec6b11ed6f8e4b60dda38 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 30 Sep 2016 10:32:52 +0800 Subject: selftests/powerpc: Add ptrace tests for GPR/FPR registers This patch adds ptrace interface test for GPR/FPR registers. This adds ptrace interface based helper functions related to GPR/FPR access and some assembly helper functions related to GPR/FPR registers. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo [mpe: Add #defines for the new note types when headers don't define them] Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/Makefile | 3 +- tools/testing/selftests/powerpc/include/reg.h | 61 ++++ tools/testing/selftests/powerpc/lib/reg.S | 132 ++++++++ tools/testing/selftests/powerpc/ptrace/.gitignore | 1 + tools/testing/selftests/powerpc/ptrace/Makefile | 12 + .../testing/selftests/powerpc/ptrace/ptrace-gpr.c | 123 +++++++ .../testing/selftests/powerpc/ptrace/ptrace-gpr.h | 74 ++++ tools/testing/selftests/powerpc/ptrace/ptrace.h | 376 +++++++++++++++++++++ 8 files changed, 781 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/lib/reg.S create mode 100644 tools/testing/selftests/powerpc/ptrace/.gitignore create mode 100644 tools/testing/selftests/powerpc/ptrace/Makefile create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace.h diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile index 561afa1921b9..c2c4211ba58b 100644 --- a/tools/testing/selftests/powerpc/Makefile +++ b/tools/testing/selftests/powerpc/Makefile @@ -26,7 +26,8 @@ SUB_DIRS = alignment \ syscalls \ tm \ vphn \ - math + math \ + ptrace endif diff --git a/tools/testing/selftests/powerpc/include/reg.h b/tools/testing/selftests/powerpc/include/reg.h index bb843b2d9bac..4afdebcce4cd 100644 --- a/tools/testing/selftests/powerpc/include/reg.h +++ b/tools/testing/selftests/powerpc/include/reg.h @@ -81,4 +81,65 @@ #define STXVD2X(xs, ra, rb) .long (0x7c000798 | VSX_XX1((xs), (ra), (rb))) #define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb))) +#define ASM_LOAD_GPR_IMMED(_asm_symbol_name_immed) \ + "li 14, %[" #_asm_symbol_name_immed "];" \ + "li 15, %[" #_asm_symbol_name_immed "];" \ + "li 16, %[" #_asm_symbol_name_immed "];" \ + "li 17, %[" #_asm_symbol_name_immed "];" \ + "li 18, %[" #_asm_symbol_name_immed "];" \ + "li 19, %[" #_asm_symbol_name_immed "];" \ + "li 20, %[" #_asm_symbol_name_immed "];" \ + "li 21, %[" #_asm_symbol_name_immed "];" \ + "li 22, %[" #_asm_symbol_name_immed "];" \ + "li 23, %[" #_asm_symbol_name_immed "];" \ + "li 24, %[" #_asm_symbol_name_immed "];" \ + "li 25, %[" #_asm_symbol_name_immed "];" \ + "li 26, %[" #_asm_symbol_name_immed "];" \ + "li 27, %[" #_asm_symbol_name_immed "];" \ + "li 28, %[" #_asm_symbol_name_immed "];" \ + "li 29, %[" #_asm_symbol_name_immed "];" \ + "li 30, %[" #_asm_symbol_name_immed "];" \ + "li 31, %[" #_asm_symbol_name_immed "];" + +#define ASM_LOAD_FPR_SINGLE_PRECISION(_asm_symbol_name_addr) \ + "lfs 0, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 1, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 2, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 3, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 4, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 5, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 6, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 7, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 8, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 9, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 10, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 11, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 12, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 13, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 14, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 15, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 16, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 17, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 18, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 19, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 20, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 21, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 22, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 23, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 24, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 25, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 26, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 27, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 28, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 29, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 30, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfs 31, 0(%[" #_asm_symbol_name_addr "]);" + +#ifndef __ASSEMBLER__ +void store_gpr(unsigned long *addr); +void load_gpr(unsigned long *addr); +void load_fpr_single_precision(float *addr); +void store_fpr_single_precision(float *addr); +#endif /* end of __ASSEMBLER__ */ + #endif /* _SELFTESTS_POWERPC_REG_H */ diff --git a/tools/testing/selftests/powerpc/lib/reg.S b/tools/testing/selftests/powerpc/lib/reg.S new file mode 100644 index 000000000000..b6aee2f79454 --- /dev/null +++ b/tools/testing/selftests/powerpc/lib/reg.S @@ -0,0 +1,132 @@ +/* + * test helper assembly functions + * + * Copyright (C) 2016 Simon Guo, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include "reg.h" + + +/* Non volatile GPR - unsigned long buf[18] */ +FUNC_START(load_gpr) + ld 14, 0*8(3) + ld 15, 1*8(3) + ld 16, 2*8(3) + ld 17, 3*8(3) + ld 18, 4*8(3) + ld 19, 5*8(3) + ld 20, 6*8(3) + ld 21, 7*8(3) + ld 22, 8*8(3) + ld 23, 9*8(3) + ld 24, 10*8(3) + ld 25, 11*8(3) + ld 26, 12*8(3) + ld 27, 13*8(3) + ld 28, 14*8(3) + ld 29, 15*8(3) + ld 30, 16*8(3) + ld 31, 17*8(3) + blr +FUNC_END(load_gpr) + +FUNC_START(store_gpr) + std 14, 0*8(3) + std 15, 1*8(3) + std 16, 2*8(3) + std 17, 3*8(3) + std 18, 4*8(3) + std 19, 5*8(3) + std 20, 6*8(3) + std 21, 7*8(3) + std 22, 8*8(3) + std 23, 9*8(3) + std 24, 10*8(3) + std 25, 11*8(3) + std 26, 12*8(3) + std 27, 13*8(3) + std 28, 14*8(3) + std 29, 15*8(3) + std 30, 16*8(3) + std 31, 17*8(3) + blr +FUNC_END(store_gpr) + +/* Single Precision Float - float buf[32] */ +FUNC_START(load_fpr_single_precision) + lfs 0, 0*4(3) + lfs 1, 1*4(3) + lfs 2, 2*4(3) + lfs 3, 3*4(3) + lfs 4, 4*4(3) + lfs 5, 5*4(3) + lfs 6, 6*4(3) + lfs 7, 7*4(3) + lfs 8, 8*4(3) + lfs 9, 9*4(3) + lfs 10, 10*4(3) + lfs 11, 11*4(3) + lfs 12, 12*4(3) + lfs 13, 13*4(3) + lfs 14, 14*4(3) + lfs 15, 15*4(3) + lfs 16, 16*4(3) + lfs 17, 17*4(3) + lfs 18, 18*4(3) + lfs 19, 19*4(3) + lfs 20, 20*4(3) + lfs 21, 21*4(3) + lfs 22, 22*4(3) + lfs 23, 23*4(3) + lfs 24, 24*4(3) + lfs 25, 25*4(3) + lfs 26, 26*4(3) + lfs 27, 27*4(3) + lfs 28, 28*4(3) + lfs 29, 29*4(3) + lfs 30, 30*4(3) + lfs 31, 31*4(3) + blr +FUNC_END(load_fpr_single_precision) + +/* Single Precision Float - float buf[32] */ +FUNC_START(store_fpr_single_precision) + stfs 0, 0*4(3) + stfs 1, 1*4(3) + stfs 2, 2*4(3) + stfs 3, 3*4(3) + stfs 4, 4*4(3) + stfs 5, 5*4(3) + stfs 6, 6*4(3) + stfs 7, 7*4(3) + stfs 8, 8*4(3) + stfs 9, 9*4(3) + stfs 10, 10*4(3) + stfs 11, 11*4(3) + stfs 12, 12*4(3) + stfs 13, 13*4(3) + stfs 14, 14*4(3) + stfs 15, 15*4(3) + stfs 16, 16*4(3) + stfs 17, 17*4(3) + stfs 18, 18*4(3) + stfs 19, 19*4(3) + stfs 20, 20*4(3) + stfs 21, 21*4(3) + stfs 22, 22*4(3) + stfs 23, 23*4(3) + stfs 24, 24*4(3) + stfs 25, 25*4(3) + stfs 26, 26*4(3) + stfs 27, 27*4(3) + stfs 28, 28*4(3) + stfs 29, 29*4(3) + stfs 30, 30*4(3) + stfs 31, 31*4(3) + blr +FUNC_END(store_fpr_single_precision) diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore new file mode 100644 index 000000000000..6e060041d7c8 --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/.gitignore @@ -0,0 +1 @@ +ptrace-gpr \ No newline at end of file diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile new file mode 100644 index 000000000000..110377268f6d --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/Makefile @@ -0,0 +1,12 @@ +TEST_PROGS := ptrace-gpr + +include ../../lib.mk + +all: $(TEST_PROGS) + +CFLAGS += -m64 -I../../../../../usr/include + +$(TEST_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h + +clean: + rm -f $(TEST_PROGS) *.o diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c new file mode 100644 index 000000000000..0b4ebcc2f485 --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c @@ -0,0 +1,123 @@ +/* + * Ptrace test for GPR/FPR registers + * + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include "ptrace.h" +#include "ptrace-gpr.h" +#include "reg.h" + +/* Tracer and Tracee Shared Data */ +int shm_id; +int *cptr, *pptr; + +float a = FPR_1; +float b = FPR_2; +float c = FPR_3; + +void gpr(void) +{ + unsigned long gpr_buf[18]; + float fpr_buf[32]; + + cptr = (int *)shmat(shm_id, NULL, 0); + + asm __volatile__( + ASM_LOAD_GPR_IMMED(gpr_1) + ASM_LOAD_FPR_SINGLE_PRECISION(flt_1) + : + : [gpr_1]"i"(GPR_1), [flt_1] "r" (&a) + : "memory", "r6", "r7", "r8", "r9", "r10", + "r11", "r12", "r13", "r14", "r15", "r16", "r17", + "r18", "r19", "r20", "r21", "r22", "r23", "r24", + "r25", "r26", "r27", "r28", "r29", "r30", "r31" + ); + + cptr[1] = 1; + + while (!cptr[0]) + asm volatile("" : : : "memory"); + + shmdt((void *)cptr); + store_gpr(gpr_buf); + store_fpr_single_precision(fpr_buf); + + if (validate_gpr(gpr_buf, GPR_3)) + exit(1); + + if (validate_fpr_float(fpr_buf, c)) + exit(1); + + exit(0); +} + +int trace_gpr(pid_t child) +{ + unsigned long gpr[18]; + unsigned long fpr[32]; + + FAIL_IF(start_trace(child)); + FAIL_IF(show_gpr(child, gpr)); + FAIL_IF(validate_gpr(gpr, GPR_1)); + FAIL_IF(show_fpr(child, fpr)); + FAIL_IF(validate_fpr(fpr, FPR_1_REP)); + FAIL_IF(write_gpr(child, GPR_3)); + FAIL_IF(write_fpr(child, FPR_3_REP)); + FAIL_IF(stop_trace(child)); + + return TEST_PASS; +} + +int ptrace_gpr(void) +{ + pid_t pid; + int ret, status; + + shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT); + pid = fork(); + if (pid < 0) { + perror("fork() failed"); + return TEST_FAIL; + } + if (pid == 0) + gpr(); + + if (pid) { + pptr = (int *)shmat(shm_id, NULL, 0); + while (!pptr[1]) + asm volatile("" : : : "memory"); + + ret = trace_gpr(pid); + if (ret) { + kill(pid, SIGTERM); + shmdt((void *)pptr); + shmctl(shm_id, IPC_RMID, NULL); + return TEST_FAIL; + } + + pptr[0] = 1; + shmdt((void *)pptr); + + ret = wait(&status); + shmctl(shm_id, IPC_RMID, NULL); + if (ret != pid) { + printf("Child's exit status not captured\n"); + return TEST_FAIL; + } + + return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL : + TEST_PASS; + } + + return TEST_PASS; +} + +int main(int argc, char *argv[]) +{ + return test_harness(ptrace_gpr, "ptrace_gpr"); +} diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h new file mode 100644 index 000000000000..e30fef63824c --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#define GPR_1 1 +#define GPR_2 2 +#define GPR_3 3 +#define GPR_4 4 + +#define FPR_1 0.001 +#define FPR_2 0.002 +#define FPR_3 0.003 +#define FPR_4 0.004 + +#define FPR_1_REP 0x3f50624de0000000 +#define FPR_2_REP 0x3f60624de0000000 +#define FPR_3_REP 0x3f689374c0000000 +#define FPR_4_REP 0x3f70624de0000000 + +/* Buffer must have 18 elements */ +int validate_gpr(unsigned long *gpr, unsigned long val) +{ + int i, found = 1; + + for (i = 0; i < 18; i++) { + if (gpr[i] != val) { + printf("GPR[%d]: %lx Expected: %lx\n", + i+14, gpr[i], val); + found = 0; + } + } + + if (!found) + return TEST_FAIL; + return TEST_PASS; +} + +/* Buffer must have 32 elements */ +int validate_fpr(unsigned long *fpr, unsigned long val) +{ + int i, found = 1; + + for (i = 0; i < 32; i++) { + if (fpr[i] != val) { + printf("FPR[%d]: %lx Expected: %lx\n", i, fpr[i], val); + found = 0; + } + } + + if (!found) + return TEST_FAIL; + return TEST_PASS; +} + +/* Buffer must have 32 elements */ +int validate_fpr_float(float *fpr, float val) +{ + int i, found = 1; + + for (i = 0; i < 32; i++) { + if (fpr[i] != val) { + printf("FPR[%d]: %f Expected: %f\n", i, fpr[i], val); + found = 0; + } + } + + if (!found) + return TEST_FAIL; + return TEST_PASS; +} diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace.h b/tools/testing/selftests/powerpc/ptrace/ptrace.h new file mode 100644 index 000000000000..9ee89eb06df9 --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace.h @@ -0,0 +1,376 @@ +/* + * Ptrace interface test helper functions + * + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "reg.h" +#include "utils.h" + +#define TEST_PASS 0 +#define TEST_FAIL 1 + +struct fpr_regs { + unsigned long fpr[32]; + unsigned long fpscr; +}; + + +#ifndef NT_PPC_TAR +#define NT_PPC_TAR 0x103 +#define NT_PPC_PPR 0x104 +#define NT_PPC_DSCR 0x105 +#define NT_PPC_EBB 0x106 +#define NT_PPC_PMU 0x107 +#define NT_PPC_TM_CGPR 0x108 +#define NT_PPC_TM_CFPR 0x109 +#define NT_PPC_TM_CVMX 0x10a +#define NT_PPC_TM_CVSX 0x10b +#define NT_PPC_TM_SPR 0x10c +#define NT_PPC_TM_CTAR 0x10d +#define NT_PPC_TM_CPPR 0x10e +#define NT_PPC_TM_CDSCR 0x10f +#endif + +/* Basic ptrace operations */ +int start_trace(pid_t child) +{ + int ret; + + ret = ptrace(PTRACE_ATTACH, child, NULL, NULL); + if (ret) { + perror("ptrace(PTRACE_ATTACH) failed"); + return TEST_FAIL; + } + ret = waitpid(child, NULL, 0); + if (ret != child) { + perror("waitpid() failed"); + return TEST_FAIL; + } + return TEST_PASS; +} + +int stop_trace(pid_t child) +{ + int ret; + + ret = ptrace(PTRACE_DETACH, child, NULL, NULL); + if (ret) { + perror("ptrace(PTRACE_DETACH) failed"); + return TEST_FAIL; + } + return TEST_PASS; +} + +int cont_trace(pid_t child) +{ + int ret; + + ret = ptrace(PTRACE_CONT, child, NULL, NULL); + if (ret) { + perror("ptrace(PTRACE_CONT) failed"); + return TEST_FAIL; + } + return TEST_PASS; +} + +/* FPR */ +int show_fpr(pid_t child, unsigned long *fpr) +{ + struct fpr_regs *regs; + int ret, i; + + regs = (struct fpr_regs *) malloc(sizeof(struct fpr_regs)); + ret = ptrace(PTRACE_GETFPREGS, child, NULL, regs); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + return TEST_FAIL; + } + + if (fpr) { + for (i = 0; i < 32; i++) + fpr[i] = regs->fpr[i]; + } + return TEST_PASS; +} + +int write_fpr(pid_t child, unsigned long val) +{ + struct fpr_regs *regs; + int ret, i; + + regs = (struct fpr_regs *) malloc(sizeof(struct fpr_regs)); + ret = ptrace(PTRACE_GETFPREGS, child, NULL, regs); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + return TEST_FAIL; + } + + for (i = 0; i < 32; i++) + regs->fpr[i] = val; + + ret = ptrace(PTRACE_SETFPREGS, child, NULL, regs); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + return TEST_FAIL; + } + return TEST_PASS; +} + +int show_ckpt_fpr(pid_t child, unsigned long *fpr) +{ + struct fpr_regs *regs; + struct iovec iov; + int ret, i; + + regs = (struct fpr_regs *) malloc(sizeof(struct fpr_regs)); + iov.iov_base = regs; + iov.iov_len = sizeof(struct fpr_regs); + + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CFPR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + return TEST_FAIL; + } + + if (fpr) { + for (i = 0; i < 32; i++) + fpr[i] = regs->fpr[i]; + } + + return TEST_PASS; +} + +int write_ckpt_fpr(pid_t child, unsigned long val) +{ + struct fpr_regs *regs; + struct iovec iov; + int ret, i; + + regs = (struct fpr_regs *) malloc(sizeof(struct fpr_regs)); + iov.iov_base = regs; + iov.iov_len = sizeof(struct fpr_regs); + + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CFPR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + return TEST_FAIL; + } + + for (i = 0; i < 32; i++) + regs->fpr[i] = val; + + ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CFPR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + return TEST_FAIL; + } + return TEST_PASS; +} + +/* GPR */ +int show_gpr(pid_t child, unsigned long *gpr) +{ + struct pt_regs *regs; + int ret, i; + + regs = (struct pt_regs *) malloc(sizeof(struct pt_regs)); + if (!regs) { + perror("malloc() failed"); + return TEST_FAIL; + } + + ret = ptrace(PTRACE_GETREGS, child, NULL, regs); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + return TEST_FAIL; + } + + if (gpr) { + for (i = 14; i < 32; i++) + gpr[i-14] = regs->gpr[i]; + } + + return TEST_PASS; +} + +int write_gpr(pid_t child, unsigned long val) +{ + struct pt_regs *regs; + int i, ret; + + regs = (struct pt_regs *) malloc(sizeof(struct pt_regs)); + if (!regs) { + perror("malloc() failed"); + return TEST_FAIL; + } + + ret = ptrace(PTRACE_GETREGS, child, NULL, regs); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + return TEST_FAIL; + } + + for (i = 14; i < 32; i++) + regs->gpr[i] = val; + + ret = ptrace(PTRACE_SETREGS, child, NULL, regs); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + return TEST_FAIL; + } + return TEST_PASS; +} + +int show_ckpt_gpr(pid_t child, unsigned long *gpr) +{ + struct pt_regs *regs; + struct iovec iov; + int ret, i; + + regs = (struct pt_regs *) malloc(sizeof(struct pt_regs)); + if (!regs) { + perror("malloc() failed"); + return TEST_FAIL; + } + + iov.iov_base = (u64 *) regs; + iov.iov_len = sizeof(struct pt_regs); + + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CGPR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + return TEST_FAIL; + } + + if (gpr) { + for (i = 14; i < 32; i++) + gpr[i-14] = regs->gpr[i]; + } + + return TEST_PASS; +} + +int write_ckpt_gpr(pid_t child, unsigned long val) +{ + struct pt_regs *regs; + struct iovec iov; + int ret, i; + + regs = (struct pt_regs *) malloc(sizeof(struct pt_regs)); + if (!regs) { + perror("malloc() failed\n"); + return TEST_FAIL; + } + iov.iov_base = (u64 *) regs; + iov.iov_len = sizeof(struct pt_regs); + + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CGPR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + return TEST_FAIL; + } + + for (i = 14; i < 32; i++) + regs->gpr[i] = val; + + ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CGPR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + return TEST_FAIL; + } + return TEST_PASS; +} + +/* Analyse TEXASR after TM failure */ +inline unsigned long get_tfiar(void) +{ + unsigned long ret; + + asm volatile("mfspr %0,%1" : "=r" (ret) : "i" (SPRN_TFIAR)); + return ret; +} + +void analyse_texasr(unsigned long texasr) +{ + printf("TEXASR: %16lx\t", texasr); + + if (texasr & TEXASR_FP) + printf("TEXASR_FP "); + + if (texasr & TEXASR_DA) + printf("TEXASR_DA "); + + if (texasr & TEXASR_NO) + printf("TEXASR_NO "); + + if (texasr & TEXASR_FO) + printf("TEXASR_FO "); + + if (texasr & TEXASR_SIC) + printf("TEXASR_SIC "); + + if (texasr & TEXASR_NTC) + printf("TEXASR_NTC "); + + if (texasr & TEXASR_TC) + printf("TEXASR_TC "); + + if (texasr & TEXASR_TIC) + printf("TEXASR_TIC "); + + if (texasr & TEXASR_IC) + printf("TEXASR_IC "); + + if (texasr & TEXASR_IFC) + printf("TEXASR_IFC "); + + if (texasr & TEXASR_ABT) + printf("TEXASR_ABT "); + + if (texasr & TEXASR_SPD) + printf("TEXASR_SPD "); + + if (texasr & TEXASR_HV) + printf("TEXASR_HV "); + + if (texasr & TEXASR_PR) + printf("TEXASR_PR "); + + if (texasr & TEXASR_FS) + printf("TEXASR_FS "); + + if (texasr & TEXASR_TE) + printf("TEXASR_TE "); + + if (texasr & TEXASR_ROT) + printf("TEXASR_ROT "); + + printf("TFIAR :%lx\n", get_tfiar()); +} + +void store_gpr(unsigned long *addr); +void store_fpr(float *addr); -- cgit From 509fcfe57b09637a80fe0fcacd052e8f445a2274 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 30 Sep 2016 10:32:54 +0800 Subject: selftests/powerpc: Add ptrace tests for GPR/FPR registers in TM This patch adds ptrace interface test for GPR/FPR registers inside TM context. This adds ptrace interface based helper functions related to checkpointed GPR/FPR access. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/ptrace/.gitignore | 3 +- tools/testing/selftests/powerpc/ptrace/Makefile | 4 +- .../selftests/powerpc/ptrace/ptrace-tm-gpr.c | 158 +++++++++++++++++++++ 3 files changed, 162 insertions(+), 3 deletions(-) create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore index 6e060041d7c8..4756bcdddc8a 100644 --- a/tools/testing/selftests/powerpc/ptrace/.gitignore +++ b/tools/testing/selftests/powerpc/ptrace/.gitignore @@ -1 +1,2 @@ -ptrace-gpr \ No newline at end of file +ptrace-gpr +ptrace-tm-gpr \ No newline at end of file diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile index 110377268f6d..84f5de9a1b13 100644 --- a/tools/testing/selftests/powerpc/ptrace/Makefile +++ b/tools/testing/selftests/powerpc/ptrace/Makefile @@ -1,10 +1,10 @@ -TEST_PROGS := ptrace-gpr +TEST_PROGS := ptrace-gpr ptrace-tm-gpr include ../../lib.mk all: $(TEST_PROGS) -CFLAGS += -m64 -I../../../../../usr/include +CFLAGS += -m64 -I../../../../../usr/include -I../tm -mhtm $(TEST_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c new file mode 100644 index 000000000000..59206b96e98a --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c @@ -0,0 +1,158 @@ +/* + * Ptrace test for GPR/FPR registers in TM context + * + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include "ptrace.h" +#include "ptrace-gpr.h" +#include "tm.h" + +/* Tracer and Tracee Shared Data */ +int shm_id; +unsigned long *cptr, *pptr; + +float a = FPR_1; +float b = FPR_2; +float c = FPR_3; + +void tm_gpr(void) +{ + unsigned long gpr_buf[18]; + unsigned long result, texasr; + float fpr_buf[32]; + + printf("Starting the child\n"); + cptr = (unsigned long *)shmat(shm_id, NULL, 0); + +trans: + cptr[1] = 0; + asm __volatile__( + ASM_LOAD_GPR_IMMED(gpr_1) + ASM_LOAD_FPR_SINGLE_PRECISION(flt_1) + "1: ;" + "tbegin.;" + "beq 2f;" + ASM_LOAD_GPR_IMMED(gpr_2) + ASM_LOAD_FPR_SINGLE_PRECISION(flt_2) + "tsuspend.;" + "li 7, 1;" + "stw 7, 0(%[cptr1]);" + "tresume.;" + "b .;" + + "tend.;" + "li 0, 0;" + "ori %[res], 0, 0;" + "b 3f;" + + /* Transaction abort handler */ + "2: ;" + "li 0, 1;" + "ori %[res], 0, 0;" + "mfspr %[texasr], %[sprn_texasr];" + + "3: ;" + : [res] "=r" (result), [texasr] "=r" (texasr) + : [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2), + [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "r" (&a), + [flt_2] "r" (&b), [cptr1] "r" (&cptr[1]) + : "memory", "r7", "r8", "r9", "r10", + "r11", "r12", "r13", "r14", "r15", "r16", + "r17", "r18", "r19", "r20", "r21", "r22", + "r23", "r24", "r25", "r26", "r27", "r28", + "r29", "r30", "r31" + ); + + if (result) { + if (!cptr[0]) + goto trans; + + shmdt((void *)cptr); + store_gpr(gpr_buf); + store_fpr_single_precision(fpr_buf); + + if (validate_gpr(gpr_buf, GPR_3)) + exit(1); + + if (validate_fpr_float(fpr_buf, c)) + exit(1); + + exit(0); + } + shmdt((void *)cptr); + exit(1); +} + +int trace_tm_gpr(pid_t child) +{ + unsigned long gpr[18]; + unsigned long fpr[32]; + + FAIL_IF(start_trace(child)); + FAIL_IF(show_gpr(child, gpr)); + FAIL_IF(validate_gpr(gpr, GPR_2)); + FAIL_IF(show_fpr(child, fpr)); + FAIL_IF(validate_fpr(fpr, FPR_2_REP)); + FAIL_IF(show_ckpt_fpr(child, fpr)); + FAIL_IF(validate_fpr(fpr, FPR_1_REP)); + FAIL_IF(show_ckpt_gpr(child, gpr)); + FAIL_IF(validate_gpr(gpr, GPR_1)); + FAIL_IF(write_ckpt_gpr(child, GPR_3)); + FAIL_IF(write_ckpt_fpr(child, FPR_3_REP)); + + pptr[0] = 1; + FAIL_IF(stop_trace(child)); + + return TEST_PASS; +} + +int ptrace_tm_gpr(void) +{ + pid_t pid; + int ret, status; + + SKIP_IF(!have_htm()); + shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT); + pid = fork(); + if (pid < 0) { + perror("fork() failed"); + return TEST_FAIL; + } + if (pid == 0) + tm_gpr(); + + if (pid) { + pptr = (unsigned long *)shmat(shm_id, NULL, 0); + + while (!pptr[1]) + asm volatile("" : : : "memory"); + ret = trace_tm_gpr(pid); + if (ret) { + kill(pid, SIGTERM); + return TEST_FAIL; + } + + shmdt((void *)pptr); + + ret = wait(&status); + shmctl(shm_id, IPC_RMID, NULL); + if (ret != pid) { + printf("Child's exit status not captured\n"); + return TEST_FAIL; + } + + return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL : + TEST_PASS; + } + return TEST_PASS; +} + +int main(int argc, char *argv[]) +{ + return test_harness(ptrace_tm_gpr, "ptrace_tm_gpr"); +} -- cgit From c7096dc0ee02ee7fd885c8010aad2c7d73d8c640 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 30 Sep 2016 10:32:55 +0800 Subject: selftests/powerpc: Add ptrace tests for GPR/FPR registers in suspended TM This patch adds ptrace interface test for GPR/FPR registers inside suspended TM context. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/ptrace/.gitignore | 3 +- tools/testing/selftests/powerpc/ptrace/Makefile | 2 +- .../selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c | 169 +++++++++++++++++++++ 3 files changed, 172 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore index 4756bcdddc8a..e85470e020de 100644 --- a/tools/testing/selftests/powerpc/ptrace/.gitignore +++ b/tools/testing/selftests/powerpc/ptrace/.gitignore @@ -1,2 +1,3 @@ ptrace-gpr -ptrace-tm-gpr \ No newline at end of file +ptrace-tm-gpr +ptrace-tm-spd-gpr \ No newline at end of file diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile index 84f5de9a1b13..d246e286fb54 100644 --- a/tools/testing/selftests/powerpc/ptrace/Makefile +++ b/tools/testing/selftests/powerpc/ptrace/Makefile @@ -1,4 +1,4 @@ -TEST_PROGS := ptrace-gpr ptrace-tm-gpr +TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr include ../../lib.mk diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c new file mode 100644 index 000000000000..327fa943c7f3 --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c @@ -0,0 +1,169 @@ +/* + * Ptrace test for GPR/FPR registers in TM Suspend context + * + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include "ptrace.h" +#include "ptrace-gpr.h" +#include "tm.h" + +/* Tracer and Tracee Shared Data */ +int shm_id; +int *cptr, *pptr; + +float a = FPR_1; +float b = FPR_2; +float c = FPR_3; +float d = FPR_4; + +__attribute__((used)) void wait_parent(void) +{ + cptr[2] = 1; + while (!cptr[1]) + asm volatile("" : : : "memory"); +} + +void tm_spd_gpr(void) +{ + unsigned long gpr_buf[18]; + unsigned long result, texasr; + float fpr_buf[32]; + + cptr = (int *)shmat(shm_id, NULL, 0); + +trans: + cptr[2] = 0; + asm __volatile__( + ASM_LOAD_GPR_IMMED(gpr_1) + ASM_LOAD_FPR_SINGLE_PRECISION(flt_1) + + "1: ;" + "tbegin.;" + "beq 2f;" + + ASM_LOAD_GPR_IMMED(gpr_2) + "tsuspend.;" + ASM_LOAD_GPR_IMMED(gpr_4) + ASM_LOAD_FPR_SINGLE_PRECISION(flt_4) + + "bl wait_parent;" + "tresume.;" + "tend.;" + "li 0, 0;" + "ori %[res], 0, 0;" + "b 3f;" + + /* Transaction abort handler */ + "2: ;" + "li 0, 1;" + "ori %[res], 0, 0;" + "mfspr %[texasr], %[sprn_texasr];" + + "3: ;" + : [res] "=r" (result), [texasr] "=r" (texasr) + : [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2), [gpr_4]"i"(GPR_4), + [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "r" (&a), + [flt_2] "r" (&b), [flt_4] "r" (&d) + : "memory", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" + ); + + if (result) { + if (!cptr[0]) + goto trans; + + shmdt((void *)cptr); + store_gpr(gpr_buf); + store_fpr_single_precision(fpr_buf); + + if (validate_gpr(gpr_buf, GPR_3)) + exit(1); + + if (validate_fpr_float(fpr_buf, c)) + exit(1); + exit(0); + } + shmdt((void *)cptr); + exit(1); +} + +int trace_tm_spd_gpr(pid_t child) +{ + unsigned long gpr[18]; + unsigned long fpr[32]; + + FAIL_IF(start_trace(child)); + FAIL_IF(show_gpr(child, gpr)); + FAIL_IF(validate_gpr(gpr, GPR_4)); + FAIL_IF(show_fpr(child, fpr)); + FAIL_IF(validate_fpr(fpr, FPR_4_REP)); + FAIL_IF(show_ckpt_fpr(child, fpr)); + FAIL_IF(validate_fpr(fpr, FPR_1_REP)); + FAIL_IF(show_ckpt_gpr(child, gpr)); + FAIL_IF(validate_gpr(gpr, GPR_1)); + FAIL_IF(write_ckpt_gpr(child, GPR_3)); + FAIL_IF(write_ckpt_fpr(child, FPR_3_REP)); + + pptr[0] = 1; + pptr[1] = 1; + FAIL_IF(stop_trace(child)); + return TEST_PASS; +} + +int ptrace_tm_spd_gpr(void) +{ + pid_t pid; + int ret, status; + + SKIP_IF(!have_htm()); + shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT); + pid = fork(); + if (pid < 0) { + perror("fork() failed"); + return TEST_FAIL; + } + + if (pid == 0) + tm_spd_gpr(); + + if (pid) { + pptr = (int *)shmat(shm_id, NULL, 0); + pptr[0] = 0; + pptr[1] = 0; + + while (!pptr[2]) + asm volatile("" : : : "memory"); + ret = trace_tm_spd_gpr(pid); + if (ret) { + kill(pid, SIGTERM); + shmdt((void *)pptr); + shmctl(shm_id, IPC_RMID, NULL); + return TEST_FAIL; + } + + shmdt((void *)pptr); + + ret = wait(&status); + shmctl(shm_id, IPC_RMID, NULL); + if (ret != pid) { + printf("Child's exit status not captured\n"); + return TEST_FAIL; + } + + return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL : + TEST_PASS; + } + return TEST_PASS; +} + +int main(int argc, char *argv[]) +{ + return test_harness(ptrace_tm_spd_gpr, "ptrace_tm_spd_gpr"); +} -- cgit From 254dae59af6791f02bad50c70a1e5a27cb9deb8c Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 30 Sep 2016 10:32:56 +0800 Subject: selftests/powerpc: Add ptrace tests for TAR, PPR, DSCR registers This patch adds ptrace interface test for TAR, PPR, DSCR registers. This also adds ptrace interface based helper functions related to TAR, PPR, DSCR register access. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/ptrace/.gitignore | 3 +- tools/testing/selftests/powerpc/ptrace/Makefile | 3 +- .../testing/selftests/powerpc/ptrace/ptrace-tar.c | 135 +++++++++++++++ .../testing/selftests/powerpc/ptrace/ptrace-tar.h | 50 ++++++ tools/testing/selftests/powerpc/ptrace/ptrace.h | 181 +++++++++++++++++++++ 5 files changed, 370 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tar.c create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tar.h diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore index e85470e020de..ce9431343ff8 100644 --- a/tools/testing/selftests/powerpc/ptrace/.gitignore +++ b/tools/testing/selftests/powerpc/ptrace/.gitignore @@ -1,3 +1,4 @@ ptrace-gpr ptrace-tm-gpr -ptrace-tm-spd-gpr \ No newline at end of file +ptrace-tm-spd-gpr +ptrace-tar \ No newline at end of file diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile index d246e286fb54..c1060ff71e15 100644 --- a/tools/testing/selftests/powerpc/ptrace/Makefile +++ b/tools/testing/selftests/powerpc/ptrace/Makefile @@ -1,4 +1,5 @@ -TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr +TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ + ptrace-tar include ../../lib.mk diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c new file mode 100644 index 000000000000..f9b5069db89b --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c @@ -0,0 +1,135 @@ +/* + * Ptrace test for TAR, PPR, DSCR registers + * + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include "ptrace.h" +#include "ptrace-tar.h" + +/* Tracer and Tracee Shared Data */ +int shm_id; +int *cptr; +int *pptr; + +void tar(void) +{ + unsigned long reg[3]; + int ret; + + cptr = (int *)shmat(shm_id, NULL, 0); + printf("%-30s TAR: %u PPR: %lx DSCR: %u\n", + user_write, TAR_1, PPR_1, DSCR_1); + + mtspr(SPRN_TAR, TAR_1); + mtspr(SPRN_PPR, PPR_1); + mtspr(SPRN_DSCR, DSCR_1); + + cptr[2] = 1; + + /* Wait on parent */ + while (!cptr[0]) + asm volatile("" : : : "memory"); + + reg[0] = mfspr(SPRN_TAR); + reg[1] = mfspr(SPRN_PPR); + reg[2] = mfspr(SPRN_DSCR); + + printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n", + user_read, reg[0], reg[1], reg[2]); + + /* Unblock the parent now */ + cptr[1] = 1; + shmdt((int *)cptr); + + ret = validate_tar_registers(reg, TAR_2, PPR_2, DSCR_2); + if (ret) + exit(1); + exit(0); +} + +int trace_tar(pid_t child) +{ + unsigned long reg[3]; + + FAIL_IF(start_trace(child)); + FAIL_IF(show_tar_registers(child, reg)); + printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n", + ptrace_read_running, reg[0], reg[1], reg[2]); + + FAIL_IF(validate_tar_registers(reg, TAR_1, PPR_1, DSCR_1)); + FAIL_IF(stop_trace(child)); + return TEST_PASS; +} + +int trace_tar_write(pid_t child) +{ + FAIL_IF(start_trace(child)); + FAIL_IF(write_tar_registers(child, TAR_2, PPR_2, DSCR_2)); + printf("%-30s TAR: %u PPR: %lx DSCR: %u\n", + ptrace_write_running, TAR_2, PPR_2, DSCR_2); + + FAIL_IF(stop_trace(child)); + return TEST_PASS; +} + +int ptrace_tar(void) +{ + pid_t pid; + int ret, status; + + shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT); + pid = fork(); + if (pid < 0) { + perror("fork() failed"); + return TEST_FAIL; + } + + if (pid == 0) + tar(); + + if (pid) { + pptr = (int *)shmat(shm_id, NULL, 0); + pptr[0] = 0; + pptr[1] = 0; + + while (!pptr[2]) + asm volatile("" : : : "memory"); + ret = trace_tar(pid); + if (ret) + return ret; + + ret = trace_tar_write(pid); + if (ret) + return ret; + + /* Unblock the child now */ + pptr[0] = 1; + + /* Wait on child */ + while (!pptr[1]) + asm volatile("" : : : "memory"); + + shmdt((int *)pptr); + + ret = wait(&status); + shmctl(shm_id, IPC_RMID, NULL); + if (ret != pid) { + printf("Child's exit status not captured\n"); + return TEST_PASS; + } + + return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL : + TEST_PASS; + } + return TEST_PASS; +} + +int main(int argc, char *argv[]) +{ + return test_harness(ptrace_tar, "ptrace_tar"); +} diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.h b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.h new file mode 100644 index 000000000000..aed0aac716d2 --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#define TAR_1 10 +#define TAR_2 20 +#define TAR_3 30 +#define TAR_4 40 +#define TAR_5 50 + +#define DSCR_1 100 +#define DSCR_2 200 +#define DSCR_3 300 +#define DSCR_4 400 +#define DSCR_5 500 + +#define PPR_1 0x4000000000000 /* or 31,31,31*/ +#define PPR_2 0x8000000000000 /* or 1,1,1 */ +#define PPR_3 0xc000000000000 /* or 6,6,6 */ +#define PPR_4 0x10000000000000 /* or 2,2,2 */ + +char *user_read = "[User Read (Running)]"; +char *user_write = "[User Write (Running)]"; +char *ptrace_read_running = "[Ptrace Read (Running)]"; +char *ptrace_write_running = "[Ptrace Write (Running)]"; +char *ptrace_read_ckpt = "[Ptrace Read (Checkpointed)]"; +char *ptrace_write_ckpt = "[Ptrace Write (Checkpointed)]"; + +int validate_tar_registers(unsigned long *reg, unsigned long tar, + unsigned long ppr, unsigned long dscr) +{ + int match = 1; + + if (reg[0] != tar) + match = 0; + + if (reg[1] != ppr) + match = 0; + + if (reg[2] != dscr) + match = 0; + + if (!match) + return TEST_FAIL; + return TEST_PASS; +} diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace.h b/tools/testing/selftests/powerpc/ptrace/ptrace.h index 9ee89eb06df9..fee71b8c1e75 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace.h +++ b/tools/testing/selftests/powerpc/ptrace/ptrace.h @@ -97,6 +97,187 @@ int cont_trace(pid_t child) return TEST_PASS; } +/* TAR, PPR, DSCR */ +int show_tar_registers(pid_t child, unsigned long *out) +{ + struct iovec iov; + unsigned long *reg; + int ret; + + reg = malloc(sizeof(unsigned long)); + if (!reg) { + perror("malloc() failed"); + return TEST_FAIL; + } + iov.iov_base = (u64 *) reg; + iov.iov_len = sizeof(unsigned long); + + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TAR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + goto fail; + } + if (out) + out[0] = *reg; + + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_PPR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + goto fail; + } + if (out) + out[1] = *reg; + + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_DSCR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + goto fail; + } + if (out) + out[2] = *reg; + + free(reg); + return TEST_PASS; +fail: + free(reg); + return TEST_FAIL; +} + +int write_tar_registers(pid_t child, unsigned long tar, + unsigned long ppr, unsigned long dscr) +{ + struct iovec iov; + unsigned long *reg; + int ret; + + reg = malloc(sizeof(unsigned long)); + if (!reg) { + perror("malloc() failed"); + return TEST_FAIL; + } + + iov.iov_base = (u64 *) reg; + iov.iov_len = sizeof(unsigned long); + + *reg = tar; + ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TAR, &iov); + if (ret) { + perror("ptrace(PTRACE_SETREGSET) failed"); + goto fail; + } + + *reg = ppr; + ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_PPR, &iov); + if (ret) { + perror("ptrace(PTRACE_SETREGSET) failed"); + goto fail; + } + + *reg = dscr; + ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_DSCR, &iov); + if (ret) { + perror("ptrace(PTRACE_SETREGSET) failed"); + goto fail; + } + + free(reg); + return TEST_PASS; +fail: + free(reg); + return TEST_FAIL; +} + +int show_tm_checkpointed_state(pid_t child, unsigned long *out) +{ + struct iovec iov; + unsigned long *reg; + int ret; + + reg = malloc(sizeof(unsigned long)); + if (!reg) { + perror("malloc() failed"); + return TEST_FAIL; + } + + iov.iov_base = (u64 *) reg; + iov.iov_len = sizeof(unsigned long); + + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CTAR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + goto fail; + } + if (out) + out[0] = *reg; + + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CPPR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + goto fail; + } + if (out) + out[1] = *reg; + + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CDSCR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + goto fail; + } + if (out) + out[2] = *reg; + + free(reg); + return TEST_PASS; + +fail: + free(reg); + return TEST_FAIL; +} + +int write_ckpt_tar_registers(pid_t child, unsigned long tar, + unsigned long ppr, unsigned long dscr) +{ + struct iovec iov; + unsigned long *reg; + int ret; + + reg = malloc(sizeof(unsigned long)); + if (!reg) { + perror("malloc() failed"); + return TEST_FAIL; + } + + iov.iov_base = (u64 *) reg; + iov.iov_len = sizeof(unsigned long); + + *reg = tar; + ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CTAR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + goto fail; + } + + *reg = ppr; + ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CPPR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + goto fail; + } + + *reg = dscr; + ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CDSCR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + goto fail; + } + + free(reg); + return TEST_PASS; +fail: + free(reg); + return TEST_FAIL; +} + /* FPR */ int show_fpr(pid_t child, unsigned long *fpr) { -- cgit From fcf73a6bd9cb2b09045307e3ae88448f18750987 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 30 Sep 2016 10:32:57 +0800 Subject: selftests/powerpc: Add ptrace tests for TAR, PPR, DSCR in TM This patch adds ptrace interface test for TAR, PPR, DSCR registers inside TM context. This also adds ptrace interface based helper functions related to checkpointed TAR, PPR, DSCR register access. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/ptrace/.gitignore | 3 +- tools/testing/selftests/powerpc/ptrace/Makefile | 2 +- .../selftests/powerpc/ptrace/ptrace-tm-tar.c | 160 +++++++++++++++++++++ 3 files changed, 163 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore index ce9431343ff8..556a6e1a1773 100644 --- a/tools/testing/selftests/powerpc/ptrace/.gitignore +++ b/tools/testing/selftests/powerpc/ptrace/.gitignore @@ -1,4 +1,5 @@ ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr -ptrace-tar \ No newline at end of file +ptrace-tar +ptrace-tm-tar diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile index c1060ff71e15..befa2c5bb40b 100644 --- a/tools/testing/selftests/powerpc/ptrace/Makefile +++ b/tools/testing/selftests/powerpc/ptrace/Makefile @@ -1,5 +1,5 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ - ptrace-tar + ptrace-tar ptrace-tm-tar include ../../lib.mk diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c new file mode 100644 index 000000000000..48b462f75023 --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c @@ -0,0 +1,160 @@ +/* + * Ptrace test for TAR, PPR, DSCR registers in the TM context + * + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include "ptrace.h" +#include "tm.h" +#include "ptrace-tar.h" + +int shm_id; +unsigned long *cptr, *pptr; + + +void tm_tar(void) +{ + unsigned long result, texasr; + unsigned long regs[3]; + int ret; + + cptr = (unsigned long *)shmat(shm_id, NULL, 0); + +trans: + cptr[1] = 0; + asm __volatile__( + "li 4, %[tar_1];" + "mtspr %[sprn_tar], 4;" /* TAR_1 */ + "li 4, %[dscr_1];" + "mtspr %[sprn_dscr], 4;" /* DSCR_1 */ + "or 31,31,31;" /* PPR_1*/ + + "1: ;" + "tbegin.;" + "beq 2f;" + + "li 4, %[tar_2];" + "mtspr %[sprn_tar], 4;" /* TAR_2 */ + "li 4, %[dscr_2];" + "mtspr %[sprn_dscr], 4;" /* DSCR_2 */ + "or 1,1,1;" /* PPR_2 */ + "tsuspend.;" + "li 0, 1;" + "stw 0, 0(%[cptr1]);" + "tresume.;" + "b .;" + + "tend.;" + "li 0, 0;" + "ori %[res], 0, 0;" + "b 3f;" + + /* Transaction abort handler */ + "2: ;" + "li 0, 1;" + "ori %[res], 0, 0;" + "mfspr %[texasr], %[sprn_texasr];" + + "3: ;" + + : [res] "=r" (result), [texasr] "=r" (texasr) + : [sprn_dscr]"i"(SPRN_DSCR), [sprn_tar]"i"(SPRN_TAR), + [sprn_ppr]"i"(SPRN_PPR), [sprn_texasr]"i"(SPRN_TEXASR), + [tar_1]"i"(TAR_1), [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2), + [dscr_2]"i"(DSCR_2), [cptr1] "r" (&cptr[1]) + : "memory", "r0", "r1", "r3", "r4", "r5", "r6" + ); + + /* TM failed, analyse */ + if (result) { + if (!cptr[0]) + goto trans; + + regs[0] = mfspr(SPRN_TAR); + regs[1] = mfspr(SPRN_PPR); + regs[2] = mfspr(SPRN_DSCR); + + shmdt(&cptr); + printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n", + user_read, regs[0], regs[1], regs[2]); + + ret = validate_tar_registers(regs, TAR_4, PPR_4, DSCR_4); + if (ret) + exit(1); + exit(0); + } + shmdt(&cptr); + exit(1); +} + +int trace_tm_tar(pid_t child) +{ + unsigned long regs[3]; + + FAIL_IF(start_trace(child)); + FAIL_IF(show_tar_registers(child, regs)); + printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n", + ptrace_read_running, regs[0], regs[1], regs[2]); + + FAIL_IF(validate_tar_registers(regs, TAR_2, PPR_2, DSCR_2)); + FAIL_IF(show_tm_checkpointed_state(child, regs)); + printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n", + ptrace_read_ckpt, regs[0], regs[1], regs[2]); + + FAIL_IF(validate_tar_registers(regs, TAR_1, PPR_1, DSCR_1)); + FAIL_IF(write_ckpt_tar_registers(child, TAR_4, PPR_4, DSCR_4)); + printf("%-30s TAR: %u PPR: %lx DSCR: %u\n", + ptrace_write_ckpt, TAR_4, PPR_4, DSCR_4); + + pptr[0] = 1; + FAIL_IF(stop_trace(child)); + return TEST_PASS; +} + +int ptrace_tm_tar(void) +{ + pid_t pid; + int ret, status; + + SKIP_IF(!have_htm()); + shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT); + pid = fork(); + if (pid == 0) + tm_tar(); + + pptr = (unsigned long *)shmat(shm_id, NULL, 0); + pptr[0] = 0; + + if (pid) { + while (!pptr[1]) + asm volatile("" : : : "memory"); + ret = trace_tm_tar(pid); + if (ret) { + kill(pid, SIGTERM); + shmdt(&pptr); + shmctl(shm_id, IPC_RMID, NULL); + return TEST_FAIL; + } + shmdt(&pptr); + + ret = wait(&status); + shmctl(shm_id, IPC_RMID, NULL); + if (ret != pid) { + printf("Child's exit status not captured\n"); + return TEST_FAIL; + } + + return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL : + TEST_PASS; + } + return TEST_PASS; +} + +int main(int argc, char *argv[]) +{ + return test_harness(ptrace_tm_tar, "ptrace_tm_tar"); +} -- cgit From 01f7fdc7b90e5afc3fafa19da970ec30caa44f3d Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 30 Sep 2016 10:32:58 +0800 Subject: selftests/powerpc: Add ptrace tests for TAR, PPR, DSCR in suspended TM This patch adds ptrace interface test for TAR, PPR, DSCR registers inside suspended TM context. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/ptrace/.gitignore | 1 + tools/testing/selftests/powerpc/ptrace/Makefile | 2 +- .../selftests/powerpc/ptrace/ptrace-tm-spd-tar.c | 174 +++++++++++++++++++++ 3 files changed, 176 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore index 556a6e1a1773..7b3383a2fc30 100644 --- a/tools/testing/selftests/powerpc/ptrace/.gitignore +++ b/tools/testing/selftests/powerpc/ptrace/.gitignore @@ -3,3 +3,4 @@ ptrace-tm-gpr ptrace-tm-spd-gpr ptrace-tar ptrace-tm-tar +ptrace-tm-spd-tar diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile index befa2c5bb40b..66accb29ea19 100644 --- a/tools/testing/selftests/powerpc/ptrace/Makefile +++ b/tools/testing/selftests/powerpc/ptrace/Makefile @@ -1,5 +1,5 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ - ptrace-tar ptrace-tm-tar + ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar include ../../lib.mk diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c new file mode 100644 index 000000000000..b3c061dc9512 --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c @@ -0,0 +1,174 @@ +/* + * Ptrace test for TAR, PPR, DSCR registers in the TM Suspend context + * + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include "ptrace.h" +#include "tm.h" +#include "ptrace-tar.h" + +int shm_id; +int *cptr, *pptr; + +__attribute__((used)) void wait_parent(void) +{ + cptr[2] = 1; + while (!cptr[1]) + asm volatile("" : : : "memory"); +} + +void tm_spd_tar(void) +{ + unsigned long result, texasr; + unsigned long regs[3]; + int ret; + + cptr = (int *)shmat(shm_id, NULL, 0); + +trans: + cptr[2] = 0; + asm __volatile__( + "li 4, %[tar_1];" + "mtspr %[sprn_tar], 4;" /* TAR_1 */ + "li 4, %[dscr_1];" + "mtspr %[sprn_dscr], 4;" /* DSCR_1 */ + "or 31,31,31;" /* PPR_1*/ + + "1: ;" + "tbegin.;" + "beq 2f;" + + "li 4, %[tar_2];" + "mtspr %[sprn_tar], 4;" /* TAR_2 */ + "li 4, %[dscr_2];" + "mtspr %[sprn_dscr], 4;" /* DSCR_2 */ + "or 1,1,1;" /* PPR_2 */ + + "tsuspend.;" + "li 4, %[tar_3];" + "mtspr %[sprn_tar], 4;" /* TAR_3 */ + "li 4, %[dscr_3];" + "mtspr %[sprn_dscr], 4;" /* DSCR_3 */ + "or 6,6,6;" /* PPR_3 */ + "bl wait_parent;" + "tresume.;" + + "tend.;" + "li 0, 0;" + "ori %[res], 0, 0;" + "b 3f;" + + /* Transaction abort handler */ + "2: ;" + "li 0, 1;" + "ori %[res], 0, 0;" + "mfspr %[texasr], %[sprn_texasr];" + + "3: ;" + + : [res] "=r" (result), [texasr] "=r" (texasr) + : [val] "r" (cptr[1]), [sprn_dscr]"i"(SPRN_DSCR), + [sprn_tar]"i"(SPRN_TAR), [sprn_ppr]"i"(SPRN_PPR), + [sprn_texasr]"i"(SPRN_TEXASR), [tar_1]"i"(TAR_1), + [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2), [dscr_2]"i"(DSCR_2), + [tar_3]"i"(TAR_3), [dscr_3]"i"(DSCR_3) + : "memory", "r0", "r1", "r3", "r4", "r5", "r6" + ); + + /* TM failed, analyse */ + if (result) { + if (!cptr[0]) + goto trans; + + regs[0] = mfspr(SPRN_TAR); + regs[1] = mfspr(SPRN_PPR); + regs[2] = mfspr(SPRN_DSCR); + + shmdt(&cptr); + printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n", + user_read, regs[0], regs[1], regs[2]); + + ret = validate_tar_registers(regs, TAR_4, PPR_4, DSCR_4); + if (ret) + exit(1); + exit(0); + } + shmdt(&cptr); + exit(1); +} + +int trace_tm_spd_tar(pid_t child) +{ + unsigned long regs[3]; + + FAIL_IF(start_trace(child)); + FAIL_IF(show_tar_registers(child, regs)); + printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n", + ptrace_read_running, regs[0], regs[1], regs[2]); + + FAIL_IF(validate_tar_registers(regs, TAR_3, PPR_3, DSCR_3)); + FAIL_IF(show_tm_checkpointed_state(child, regs)); + printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n", + ptrace_read_ckpt, regs[0], regs[1], regs[2]); + + FAIL_IF(validate_tar_registers(regs, TAR_1, PPR_1, DSCR_1)); + FAIL_IF(write_ckpt_tar_registers(child, TAR_4, PPR_4, DSCR_4)); + printf("%-30s TAR: %u PPR: %lx DSCR: %u\n", + ptrace_write_ckpt, TAR_4, PPR_4, DSCR_4); + + pptr[0] = 1; + pptr[1] = 1; + FAIL_IF(stop_trace(child)); + return TEST_PASS; +} + +int ptrace_tm_spd_tar(void) +{ + pid_t pid; + int ret, status; + + SKIP_IF(!have_htm()); + shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT); + pid = fork(); + if (pid == 0) + tm_spd_tar(); + + pptr = (int *)shmat(shm_id, NULL, 0); + pptr[0] = 0; + pptr[1] = 0; + + if (pid) { + while (!pptr[2]) + asm volatile("" : : : "memory"); + ret = trace_tm_spd_tar(pid); + if (ret) { + kill(pid, SIGTERM); + shmdt(&pptr); + shmctl(shm_id, IPC_RMID, NULL); + return TEST_FAIL; + } + + shmdt(&pptr); + + ret = wait(&status); + shmctl(shm_id, IPC_RMID, NULL); + if (ret != pid) { + printf("Child's exit status not captured\n"); + return TEST_FAIL; + } + + return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL : + TEST_PASS; + } + return TEST_PASS; +} + +int main(int argc, char *argv[]) +{ + return test_harness(ptrace_tm_spd_tar, "ptrace_tm_spd_tar"); +} -- cgit From 0da535c0844b81111f837670e0b9b300cc75b100 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 30 Sep 2016 10:32:59 +0800 Subject: selftests/powerpc: Add ptrace tests for VSX, VMX registers This patch adds ptrace interface test for VSX, VMX registers. This also adds ptrace interface based helper functions related to VSX, VMX registers access. This also adds some assembly helper functions related to VSX and VMX registers. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/lib/reg.S | 265 +++++++++++++++++++++ tools/testing/selftests/powerpc/ptrace/.gitignore | 1 + tools/testing/selftests/powerpc/ptrace/Makefile | 2 +- .../testing/selftests/powerpc/ptrace/ptrace-vsx.c | 117 +++++++++ .../testing/selftests/powerpc/ptrace/ptrace-vsx.h | 127 ++++++++++ tools/testing/selftests/powerpc/ptrace/ptrace.h | 119 +++++++++ 6 files changed, 630 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h diff --git a/tools/testing/selftests/powerpc/lib/reg.S b/tools/testing/selftests/powerpc/lib/reg.S index b6aee2f79454..0dc44f0da065 100644 --- a/tools/testing/selftests/powerpc/lib/reg.S +++ b/tools/testing/selftests/powerpc/lib/reg.S @@ -130,3 +130,268 @@ FUNC_START(store_fpr_single_precision) stfs 31, 31*4(3) blr FUNC_END(store_fpr_single_precision) + +/* VMX/VSX registers - unsigned long buf[128] */ +FUNC_START(loadvsx) + lis 4, 0 + LXVD2X (0,(4),(3)) + addi 4, 4, 16 + LXVD2X (1,(4),(3)) + addi 4, 4, 16 + LXVD2X (2,(4),(3)) + addi 4, 4, 16 + LXVD2X (3,(4),(3)) + addi 4, 4, 16 + LXVD2X (4,(4),(3)) + addi 4, 4, 16 + LXVD2X (5,(4),(3)) + addi 4, 4, 16 + LXVD2X (6,(4),(3)) + addi 4, 4, 16 + LXVD2X (7,(4),(3)) + addi 4, 4, 16 + LXVD2X (8,(4),(3)) + addi 4, 4, 16 + LXVD2X (9,(4),(3)) + addi 4, 4, 16 + LXVD2X (10,(4),(3)) + addi 4, 4, 16 + LXVD2X (11,(4),(3)) + addi 4, 4, 16 + LXVD2X (12,(4),(3)) + addi 4, 4, 16 + LXVD2X (13,(4),(3)) + addi 4, 4, 16 + LXVD2X (14,(4),(3)) + addi 4, 4, 16 + LXVD2X (15,(4),(3)) + addi 4, 4, 16 + LXVD2X (16,(4),(3)) + addi 4, 4, 16 + LXVD2X (17,(4),(3)) + addi 4, 4, 16 + LXVD2X (18,(4),(3)) + addi 4, 4, 16 + LXVD2X (19,(4),(3)) + addi 4, 4, 16 + LXVD2X (20,(4),(3)) + addi 4, 4, 16 + LXVD2X (21,(4),(3)) + addi 4, 4, 16 + LXVD2X (22,(4),(3)) + addi 4, 4, 16 + LXVD2X (23,(4),(3)) + addi 4, 4, 16 + LXVD2X (24,(4),(3)) + addi 4, 4, 16 + LXVD2X (25,(4),(3)) + addi 4, 4, 16 + LXVD2X (26,(4),(3)) + addi 4, 4, 16 + LXVD2X (27,(4),(3)) + addi 4, 4, 16 + LXVD2X (28,(4),(3)) + addi 4, 4, 16 + LXVD2X (29,(4),(3)) + addi 4, 4, 16 + LXVD2X (30,(4),(3)) + addi 4, 4, 16 + LXVD2X (31,(4),(3)) + addi 4, 4, 16 + LXVD2X (32,(4),(3)) + addi 4, 4, 16 + LXVD2X (33,(4),(3)) + addi 4, 4, 16 + LXVD2X (34,(4),(3)) + addi 4, 4, 16 + LXVD2X (35,(4),(3)) + addi 4, 4, 16 + LXVD2X (36,(4),(3)) + addi 4, 4, 16 + LXVD2X (37,(4),(3)) + addi 4, 4, 16 + LXVD2X (38,(4),(3)) + addi 4, 4, 16 + LXVD2X (39,(4),(3)) + addi 4, 4, 16 + LXVD2X (40,(4),(3)) + addi 4, 4, 16 + LXVD2X (41,(4),(3)) + addi 4, 4, 16 + LXVD2X (42,(4),(3)) + addi 4, 4, 16 + LXVD2X (43,(4),(3)) + addi 4, 4, 16 + LXVD2X (44,(4),(3)) + addi 4, 4, 16 + LXVD2X (45,(4),(3)) + addi 4, 4, 16 + LXVD2X (46,(4),(3)) + addi 4, 4, 16 + LXVD2X (47,(4),(3)) + addi 4, 4, 16 + LXVD2X (48,(4),(3)) + addi 4, 4, 16 + LXVD2X (49,(4),(3)) + addi 4, 4, 16 + LXVD2X (50,(4),(3)) + addi 4, 4, 16 + LXVD2X (51,(4),(3)) + addi 4, 4, 16 + LXVD2X (52,(4),(3)) + addi 4, 4, 16 + LXVD2X (53,(4),(3)) + addi 4, 4, 16 + LXVD2X (54,(4),(3)) + addi 4, 4, 16 + LXVD2X (55,(4),(3)) + addi 4, 4, 16 + LXVD2X (56,(4),(3)) + addi 4, 4, 16 + LXVD2X (57,(4),(3)) + addi 4, 4, 16 + LXVD2X (58,(4),(3)) + addi 4, 4, 16 + LXVD2X (59,(4),(3)) + addi 4, 4, 16 + LXVD2X (60,(4),(3)) + addi 4, 4, 16 + LXVD2X (61,(4),(3)) + addi 4, 4, 16 + LXVD2X (62,(4),(3)) + addi 4, 4, 16 + LXVD2X (63,(4),(3)) + blr +FUNC_END(loadvsx) + +FUNC_START(storevsx) + lis 4, 0 + STXVD2X (0,(4),(3)) + addi 4, 4, 16 + STXVD2X (1,(4),(3)) + addi 4, 4, 16 + STXVD2X (2,(4),(3)) + addi 4, 4, 16 + STXVD2X (3,(4),(3)) + addi 4, 4, 16 + STXVD2X (4,(4),(3)) + addi 4, 4, 16 + STXVD2X (5,(4),(3)) + addi 4, 4, 16 + STXVD2X (6,(4),(3)) + addi 4, 4, 16 + STXVD2X (7,(4),(3)) + addi 4, 4, 16 + STXVD2X (8,(4),(3)) + addi 4, 4, 16 + STXVD2X (9,(4),(3)) + addi 4, 4, 16 + STXVD2X (10,(4),(3)) + addi 4, 4, 16 + STXVD2X (11,(4),(3)) + addi 4, 4, 16 + STXVD2X (12,(4),(3)) + addi 4, 4, 16 + STXVD2X (13,(4),(3)) + addi 4, 4, 16 + STXVD2X (14,(4),(3)) + addi 4, 4, 16 + STXVD2X (15,(4),(3)) + addi 4, 4, 16 + STXVD2X (16,(4),(3)) + addi 4, 4, 16 + STXVD2X (17,(4),(3)) + addi 4, 4, 16 + STXVD2X (18,(4),(3)) + addi 4, 4, 16 + STXVD2X (19,(4),(3)) + addi 4, 4, 16 + STXVD2X (20,(4),(3)) + addi 4, 4, 16 + STXVD2X (21,(4),(3)) + addi 4, 4, 16 + STXVD2X (22,(4),(3)) + addi 4, 4, 16 + STXVD2X (23,(4),(3)) + addi 4, 4, 16 + STXVD2X (24,(4),(3)) + addi 4, 4, 16 + STXVD2X (25,(4),(3)) + addi 4, 4, 16 + STXVD2X (26,(4),(3)) + addi 4, 4, 16 + STXVD2X (27,(4),(3)) + addi 4, 4, 16 + STXVD2X (28,(4),(3)) + addi 4, 4, 16 + STXVD2X (29,(4),(3)) + addi 4, 4, 16 + STXVD2X (30,(4),(3)) + addi 4, 4, 16 + STXVD2X (31,(4),(3)) + addi 4, 4, 16 + STXVD2X (32,(4),(3)) + addi 4, 4, 16 + STXVD2X (33,(4),(3)) + addi 4, 4, 16 + STXVD2X (34,(4),(3)) + addi 4, 4, 16 + STXVD2X (35,(4),(3)) + addi 4, 4, 16 + STXVD2X (36,(4),(3)) + addi 4, 4, 16 + STXVD2X (37,(4),(3)) + addi 4, 4, 16 + STXVD2X (38,(4),(3)) + addi 4, 4, 16 + STXVD2X (39,(4),(3)) + addi 4, 4, 16 + STXVD2X (40,(4),(3)) + addi 4, 4, 16 + STXVD2X (41,(4),(3)) + addi 4, 4, 16 + STXVD2X (42,(4),(3)) + addi 4, 4, 16 + STXVD2X (43,(4),(3)) + addi 4, 4, 16 + STXVD2X (44,(4),(3)) + addi 4, 4, 16 + STXVD2X (45,(4),(3)) + addi 4, 4, 16 + STXVD2X (46,(4),(3)) + addi 4, 4, 16 + STXVD2X (47,(4),(3)) + addi 4, 4, 16 + STXVD2X (48,(4),(3)) + addi 4, 4, 16 + STXVD2X (49,(4),(3)) + addi 4, 4, 16 + STXVD2X (50,(4),(3)) + addi 4, 4, 16 + STXVD2X (51,(4),(3)) + addi 4, 4, 16 + STXVD2X (52,(4),(3)) + addi 4, 4, 16 + STXVD2X (53,(4),(3)) + addi 4, 4, 16 + STXVD2X (54,(4),(3)) + addi 4, 4, 16 + STXVD2X (55,(4),(3)) + addi 4, 4, 16 + STXVD2X (56,(4),(3)) + addi 4, 4, 16 + STXVD2X (57,(4),(3)) + addi 4, 4, 16 + STXVD2X (58,(4),(3)) + addi 4, 4, 16 + STXVD2X (59,(4),(3)) + addi 4, 4, 16 + STXVD2X (60,(4),(3)) + addi 4, 4, 16 + STXVD2X (61,(4),(3)) + addi 4, 4, 16 + STXVD2X (62,(4),(3)) + addi 4, 4, 16 + STXVD2X (63,(4),(3)) + blr +FUNC_END(storevsx) diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore index 7b3383a2fc30..720c002859d8 100644 --- a/tools/testing/selftests/powerpc/ptrace/.gitignore +++ b/tools/testing/selftests/powerpc/ptrace/.gitignore @@ -4,3 +4,4 @@ ptrace-tm-spd-gpr ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar +ptrace-vsx diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile index 66accb29ea19..18790ff44588 100644 --- a/tools/testing/selftests/powerpc/ptrace/Makefile +++ b/tools/testing/selftests/powerpc/ptrace/Makefile @@ -1,5 +1,5 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ - ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar + ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx include ../../lib.mk diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c new file mode 100644 index 000000000000..04084ee7d27b --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c @@ -0,0 +1,117 @@ +/* + * Ptrace test for VMX/VSX registers + * + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include "ptrace.h" +#include "ptrace-vsx.h" + +/* Tracer and Tracee Shared Data */ +int shm_id; +int *cptr, *pptr; + +unsigned long fp_load[VEC_MAX]; +unsigned long fp_load_new[VEC_MAX]; +unsigned long fp_store[VEC_MAX]; + +void vsx(void) +{ + int ret; + + cptr = (int *)shmat(shm_id, NULL, 0); + loadvsx(fp_load, 0); + cptr[1] = 1; + + while (!cptr[0]) + asm volatile("" : : : "memory"); + shmdt((void *) cptr); + + storevsx(fp_store, 0); + ret = compare_vsx_vmx(fp_store, fp_load_new); + if (ret) + exit(1); + exit(0); +} + +int trace_vsx(pid_t child) +{ + unsigned long vsx[VSX_MAX]; + unsigned long vmx[VMX_MAX + 2][2]; + + FAIL_IF(start_trace(child)); + FAIL_IF(show_vsx(child, vsx)); + FAIL_IF(validate_vsx(vsx, fp_load)); + FAIL_IF(show_vmx(child, vmx)); + FAIL_IF(validate_vmx(vmx, fp_load)); + + memset(vsx, 0, sizeof(vsx)); + memset(vmx, 0, sizeof(vmx)); + load_vsx_vmx(fp_load_new, vsx, vmx); + + FAIL_IF(write_vsx(child, vsx)); + FAIL_IF(write_vmx(child, vmx)); + FAIL_IF(stop_trace(child)); + + return TEST_PASS; +} + +int ptrace_vsx(void) +{ + pid_t pid; + int ret, status, i; + + shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT); + + for (i = 0; i < VEC_MAX; i++) + fp_load[i] = i + rand(); + + for (i = 0; i < VEC_MAX; i++) + fp_load_new[i] = i + 2 * rand(); + + pid = fork(); + if (pid < 0) { + perror("fork() failed"); + return TEST_FAIL; + } + + if (pid == 0) + vsx(); + + if (pid) { + pptr = (int *)shmat(shm_id, NULL, 0); + while (!pptr[1]) + asm volatile("" : : : "memory"); + + ret = trace_vsx(pid); + if (ret) { + kill(pid, SIGTERM); + shmdt((void *)pptr); + shmctl(shm_id, IPC_RMID, NULL); + return TEST_FAIL; + } + + pptr[0] = 1; + shmdt((void *)pptr); + + ret = wait(&status); + shmctl(shm_id, IPC_RMID, NULL); + if (ret != pid) { + printf("Child's exit status not captured\n"); + return TEST_FAIL; + } + + return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL : + TEST_PASS; + } + return TEST_PASS; +} + +int main(int argc, char *argv[]) +{ + return test_harness(ptrace_vsx, "ptrace_vsx"); +} diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h new file mode 100644 index 000000000000..f4e4b427c9d9 --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#define VEC_MAX 128 +#define VSX_MAX 32 +#define VMX_MAX 32 + +/* + * unsigned long vsx[32] + * unsigned long load[128] + */ +int validate_vsx(unsigned long *vsx, unsigned long *load) +{ + int i; + + for (i = 0; i < VSX_MAX; i++) { + if (vsx[i] != load[2 * i + 1]) { + printf("vsx[%d]: %lx load[%d] %lx\n", + i, vsx[i], 2 * i + 1, load[2 * i + 1]); + return TEST_FAIL; + } + } + return TEST_PASS; +} + +/* + * unsigned long vmx[32][2] + * unsigned long load[128] + */ +int validate_vmx(unsigned long vmx[][2], unsigned long *load) +{ + int i; + + for (i = 0; i < VMX_MAX; i++) { + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + if ((vmx[i][0] != load[64 + 2 * i]) || + (vmx[i][1] != load[65 + 2 * i])) { + printf("vmx[%d][0]: %lx load[%d] %lx\n", + i, vmx[i][0], 64 + 2 * i, + load[64 + 2 * i]); + printf("vmx[%d][1]: %lx load[%d] %lx\n", + i, vmx[i][1], 65 + 2 * i, + load[65 + 2 * i]); + return TEST_FAIL; + } + #else /* + * In LE each value pair is stored in an + * alternate manner. + */ + if ((vmx[i][0] != load[65 + 2 * i]) || + (vmx[i][1] != load[64 + 2 * i])) { + printf("vmx[%d][0]: %lx load[%d] %lx\n", + i, vmx[i][0], 65 + 2 * i, + load[65 + 2 * i]); + printf("vmx[%d][1]: %lx load[%d] %lx\n", + i, vmx[i][1], 64 + 2 * i, + load[64 + 2 * i]); + return TEST_FAIL; + } + #endif + } + return TEST_PASS; +} + +/* + * unsigned long store[128] + * unsigned long load[128] + */ +int compare_vsx_vmx(unsigned long *store, unsigned long *load) +{ + int i; + + for (i = 0; i < VSX_MAX; i++) { + if (store[1 + 2 * i] != load[1 + 2 * i]) { + printf("store[%d]: %lx load[%d] %lx\n", + 1 + 2 * i, store[i], + 1 + 2 * i, load[i]); + return TEST_FAIL; + } + } + + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + for (i = 64; i < VEC_MAX; i++) { + if (store[i] != load[i]) { + printf("store[%d]: %lx load[%d] %lx\n", + i, store[i], i, load[i]); + return TEST_FAIL; + } + } + #else /* In LE each value pair is stored in an alternate manner */ + for (i = 64; i < VEC_MAX; i++) { + if (!(i % 2) && (store[i] != load[i+1])) { + printf("store[%d]: %lx load[%d] %lx\n", + i, store[i], i+1, load[i+1]); + return TEST_FAIL; + } + if ((i % 2) && (store[i] != load[i-1])) { + printf("here store[%d]: %lx load[%d] %lx\n", + i, store[i], i-1, load[i-1]); + return TEST_FAIL; + } + } + #endif + return TEST_PASS; +} + +void load_vsx_vmx(unsigned long *load, unsigned long *vsx, + unsigned long vmx[][2]) +{ + int i; + + for (i = 0; i < VSX_MAX; i++) + vsx[i] = load[1 + 2 * i]; + + for (i = 0; i < VMX_MAX; i++) { + vmx[i][0] = load[64 + 2 * i]; + vmx[i][1] = load[65 + 2 * i]; + } +} + +void loadvsx(void *p, int tmp); +void storevsx(void *p, int tmp); diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace.h b/tools/testing/selftests/powerpc/ptrace/ptrace.h index fee71b8c1e75..9aa0498ac738 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace.h +++ b/tools/testing/selftests/powerpc/ptrace/ptrace.h @@ -486,6 +486,125 @@ int write_ckpt_gpr(pid_t child, unsigned long val) return TEST_PASS; } +/* VMX */ +int show_vmx(pid_t child, unsigned long vmx[][2]) +{ + int ret; + + ret = ptrace(PTRACE_GETVRREGS, child, 0, vmx); + if (ret) { + perror("ptrace(PTRACE_GETVRREGS) failed"); + return TEST_FAIL; + } + return TEST_PASS; +} + +int show_vmx_ckpt(pid_t child, unsigned long vmx[][2]) +{ + unsigned long regs[34][2]; + struct iovec iov; + int ret; + + iov.iov_base = (u64 *) regs; + iov.iov_len = sizeof(regs); + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CVMX, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET, NT_PPC_TM_CVMX) failed"); + return TEST_FAIL; + } + memcpy(vmx, regs, sizeof(regs)); + return TEST_PASS; +} + + +int write_vmx(pid_t child, unsigned long vmx[][2]) +{ + int ret; + + ret = ptrace(PTRACE_SETVRREGS, child, 0, vmx); + if (ret) { + perror("ptrace(PTRACE_SETVRREGS) failed"); + return TEST_FAIL; + } + return TEST_PASS; +} + +int write_vmx_ckpt(pid_t child, unsigned long vmx[][2]) +{ + unsigned long regs[34][2]; + struct iovec iov; + int ret; + + memcpy(regs, vmx, sizeof(regs)); + iov.iov_base = (u64 *) regs; + iov.iov_len = sizeof(regs); + ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CVMX, &iov); + if (ret) { + perror("ptrace(PTRACE_SETREGSET, NT_PPC_TM_CVMX) failed"); + return TEST_FAIL; + } + return TEST_PASS; +} + +/* VSX */ +int show_vsx(pid_t child, unsigned long *vsx) +{ + int ret; + + ret = ptrace(PTRACE_GETVSRREGS, child, 0, vsx); + if (ret) { + perror("ptrace(PTRACE_GETVSRREGS) failed"); + return TEST_FAIL; + } + return TEST_PASS; +} + +int show_vsx_ckpt(pid_t child, unsigned long *vsx) +{ + unsigned long regs[32]; + struct iovec iov; + int ret; + + iov.iov_base = (u64 *) regs; + iov.iov_len = sizeof(regs); + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CVSX, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET, NT_PPC_TM_CVSX) failed"); + return TEST_FAIL; + } + memcpy(vsx, regs, sizeof(regs)); + return TEST_PASS; +} + +int write_vsx(pid_t child, unsigned long *vsx) +{ + int ret; + + ret = ptrace(PTRACE_SETVSRREGS, child, 0, vsx); + if (ret) { + perror("ptrace(PTRACE_SETVSRREGS) failed"); + return TEST_FAIL; + } + return TEST_PASS; +} + +int write_vsx_ckpt(pid_t child, unsigned long *vsx) +{ + unsigned long regs[32]; + struct iovec iov; + int ret; + + memcpy(regs, vsx, sizeof(regs)); + iov.iov_base = (u64 *) regs; + iov.iov_len = sizeof(regs); + ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CVSX, &iov); + if (ret) { + perror("ptrace(PTRACE_SETREGSET, NT_PPC_TM_CVSX) failed"); + return TEST_FAIL; + } + return TEST_PASS; +} + /* Analyse TEXASR after TM failure */ inline unsigned long get_tfiar(void) { -- cgit From 11508074c9a74a00df1aa6cfba81d1fdabdaa51c Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 30 Sep 2016 10:33:00 +0800 Subject: selftests/powerpc: Add ptrace tests for VSX, VMX registers in TM This patch adds ptrace interface test for VSX, VMX registers inside TM context. This also adds ptrace interface based helper functions related to chckpointed VSX, VMX registers access. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/ptrace/.gitignore | 1 + tools/testing/selftests/powerpc/ptrace/Makefile | 2 +- .../selftests/powerpc/ptrace/ptrace-tm-vsx.c | 168 +++++++++++++++++++++ 3 files changed, 170 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore index 720c002859d8..f1f3055ca647 100644 --- a/tools/testing/selftests/powerpc/ptrace/.gitignore +++ b/tools/testing/selftests/powerpc/ptrace/.gitignore @@ -5,3 +5,4 @@ ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx +ptrace-tm-vsx diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile index 18790ff44588..f82cfe12aaeb 100644 --- a/tools/testing/selftests/powerpc/ptrace/Makefile +++ b/tools/testing/selftests/powerpc/ptrace/Makefile @@ -1,5 +1,5 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ - ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx + ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx include ../../lib.mk diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c new file mode 100644 index 000000000000..b4081e2b22d5 --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c @@ -0,0 +1,168 @@ +/* + * Ptrace test for VMX/VSX registers in the TM context + * + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include "ptrace.h" +#include "tm.h" +#include "ptrace-vsx.h" + +int shm_id; +unsigned long *cptr, *pptr; + +unsigned long fp_load[VEC_MAX]; +unsigned long fp_store[VEC_MAX]; +unsigned long fp_load_ckpt[VEC_MAX]; +unsigned long fp_load_ckpt_new[VEC_MAX]; + +__attribute__((used)) void load_vsx(void) +{ + loadvsx(fp_load, 0); +} + +__attribute__((used)) void load_vsx_ckpt(void) +{ + loadvsx(fp_load_ckpt, 0); +} + +void tm_vsx(void) +{ + unsigned long result, texasr; + int ret; + + cptr = (unsigned long *)shmat(shm_id, NULL, 0); + +trans: + cptr[1] = 0; + asm __volatile__( + "bl load_vsx_ckpt;" + + "1: ;" + "tbegin.;" + "beq 2f;" + + "bl load_vsx;" + "tsuspend.;" + "li 7, 1;" + "stw 7, 0(%[cptr1]);" + "tresume.;" + "b .;" + + "tend.;" + "li 0, 0;" + "ori %[res], 0, 0;" + "b 3f;" + + "2: ;" + "li 0, 1;" + "ori %[res], 0, 0;" + "mfspr %[texasr], %[sprn_texasr];" + + "3: ;" + : [res] "=r" (result), [texasr] "=r" (texasr) + : [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt), + [sprn_texasr] "i" (SPRN_TEXASR), [cptr1] "r" (&cptr[1]) + : "memory", "r0", "r1", "r2", "r3", "r4", + "r7", "r8", "r9", "r10", "r11" + ); + + if (result) { + if (!cptr[0]) + goto trans; + + shmdt((void *)cptr); + storevsx(fp_store, 0); + ret = compare_vsx_vmx(fp_store, fp_load_ckpt_new); + if (ret) + exit(1); + exit(0); + } + shmdt((void *)cptr); + exit(1); +} + +int trace_tm_vsx(pid_t child) +{ + unsigned long vsx[VSX_MAX]; + unsigned long vmx[VMX_MAX + 2][2]; + + FAIL_IF(start_trace(child)); + FAIL_IF(show_vsx(child, vsx)); + FAIL_IF(validate_vsx(vsx, fp_load)); + FAIL_IF(show_vmx(child, vmx)); + FAIL_IF(validate_vmx(vmx, fp_load)); + FAIL_IF(show_vsx_ckpt(child, vsx)); + FAIL_IF(validate_vsx(vsx, fp_load_ckpt)); + FAIL_IF(show_vmx_ckpt(child, vmx)); + FAIL_IF(validate_vmx(vmx, fp_load_ckpt)); + memset(vsx, 0, sizeof(vsx)); + memset(vmx, 0, sizeof(vmx)); + + load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); + + FAIL_IF(write_vsx_ckpt(child, vsx)); + FAIL_IF(write_vmx_ckpt(child, vmx)); + pptr[0] = 1; + FAIL_IF(stop_trace(child)); + return TEST_PASS; +} + +int ptrace_tm_vsx(void) +{ + pid_t pid; + int ret, status, i; + + SKIP_IF(!have_htm()); + shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT); + + for (i = 0; i < 128; i++) { + fp_load[i] = 1 + rand(); + fp_load_ckpt[i] = 1 + 2 * rand(); + fp_load_ckpt_new[i] = 1 + 3 * rand(); + } + + pid = fork(); + if (pid < 0) { + perror("fork() failed"); + return TEST_FAIL; + } + + if (pid == 0) + tm_vsx(); + + if (pid) { + pptr = (unsigned long *)shmat(shm_id, NULL, 0); + while (!pptr[1]) + asm volatile("" : : : "memory"); + + ret = trace_tm_vsx(pid); + if (ret) { + kill(pid, SIGKILL); + shmdt((void *)pptr); + shmctl(shm_id, IPC_RMID, NULL); + return TEST_FAIL; + } + + shmdt((void *)pptr); + ret = wait(&status); + shmctl(shm_id, IPC_RMID, NULL); + if (ret != pid) { + printf("Child's exit status not captured\n"); + return TEST_FAIL; + } + + return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL : + TEST_PASS; + } + return TEST_PASS; +} + +int main(int argc, char *argv[]) +{ + return test_harness(ptrace_tm_vsx, "ptrace_tm_vsx"); +} -- cgit From a18b55bf51b32136fb9c5190b1cbf86c75117ead Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 30 Sep 2016 10:33:01 +0800 Subject: selftests/powerpc: Add ptrace tests for VSX, VMX registers in suspended TM This patch adds ptrace interface test for VSX, VMX registers inside suspended TM context. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/ptrace/.gitignore | 1 + tools/testing/selftests/powerpc/ptrace/Makefile | 3 +- .../selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c | 185 +++++++++++++++++++++ 3 files changed, 188 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore index f1f3055ca647..49439538e234 100644 --- a/tools/testing/selftests/powerpc/ptrace/.gitignore +++ b/tools/testing/selftests/powerpc/ptrace/.gitignore @@ -6,3 +6,4 @@ ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx +ptrace-tm-spd-vsx diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile index f82cfe12aaeb..f3f4129b3755 100644 --- a/tools/testing/selftests/powerpc/ptrace/Makefile +++ b/tools/testing/selftests/powerpc/ptrace/Makefile @@ -1,5 +1,6 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ - ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx + ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \ + ptrace-tm-spd-vsx include ../../lib.mk diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c new file mode 100644 index 000000000000..0df3c23b7888 --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c @@ -0,0 +1,185 @@ +/* + * Ptrace test for VMX/VSX registers in the TM Suspend context + * + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include "ptrace.h" +#include "tm.h" +#include "ptrace-vsx.h" + +int shm_id; +int *cptr, *pptr; + +unsigned long fp_load[VEC_MAX]; +unsigned long fp_load_new[VEC_MAX]; +unsigned long fp_store[VEC_MAX]; +unsigned long fp_load_ckpt[VEC_MAX]; +unsigned long fp_load_ckpt_new[VEC_MAX]; + +__attribute__((used)) void load_vsx(void) +{ + loadvsx(fp_load, 0); +} + +__attribute__((used)) void load_vsx_new(void) +{ + loadvsx(fp_load_new, 0); +} + +__attribute__((used)) void load_vsx_ckpt(void) +{ + loadvsx(fp_load_ckpt, 0); +} + +__attribute__((used)) void wait_parent(void) +{ + cptr[2] = 1; + while (!cptr[1]) + asm volatile("" : : : "memory"); +} + +void tm_spd_vsx(void) +{ + unsigned long result, texasr; + int ret; + + cptr = (int *)shmat(shm_id, NULL, 0); + +trans: + cptr[2] = 0; + asm __volatile__( + "bl load_vsx_ckpt;" + + "1: ;" + "tbegin.;" + "beq 2f;" + + "bl load_vsx_new;" + "tsuspend.;" + "bl load_vsx;" + "bl wait_parent;" + "tresume.;" + + "tend.;" + "li 0, 0;" + "ori %[res], 0, 0;" + "b 3f;" + + "2: ;" + "li 0, 1;" + "ori %[res], 0, 0;" + "mfspr %[texasr], %[sprn_texasr];" + + "3: ;" + : [res] "=r" (result), [texasr] "=r" (texasr) + : [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt), + [sprn_texasr] "i" (SPRN_TEXASR) + : "memory", "r0", "r1", "r2", "r3", "r4", + "r8", "r9", "r10", "r11" + ); + + if (result) { + if (!cptr[0]) + goto trans; + shmdt((void *)cptr); + + storevsx(fp_store, 0); + ret = compare_vsx_vmx(fp_store, fp_load_ckpt_new); + if (ret) + exit(1); + exit(0); + } + shmdt((void *)cptr); + exit(1); +} + +int trace_tm_spd_vsx(pid_t child) +{ + unsigned long vsx[VSX_MAX]; + unsigned long vmx[VMX_MAX + 2][2]; + + FAIL_IF(start_trace(child)); + FAIL_IF(show_vsx(child, vsx)); + FAIL_IF(validate_vsx(vsx, fp_load)); + FAIL_IF(show_vmx(child, vmx)); + FAIL_IF(validate_vmx(vmx, fp_load)); + FAIL_IF(show_vsx_ckpt(child, vsx)); + FAIL_IF(validate_vsx(vsx, fp_load_ckpt)); + FAIL_IF(show_vmx_ckpt(child, vmx)); + FAIL_IF(validate_vmx(vmx, fp_load_ckpt)); + + memset(vsx, 0, sizeof(vsx)); + memset(vmx, 0, sizeof(vmx)); + + load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); + + FAIL_IF(write_vsx_ckpt(child, vsx)); + FAIL_IF(write_vmx_ckpt(child, vmx)); + + pptr[0] = 1; + pptr[1] = 1; + FAIL_IF(stop_trace(child)); + + return TEST_PASS; +} + +int ptrace_tm_spd_vsx(void) +{ + pid_t pid; + int ret, status, i; + + SKIP_IF(!have_htm()); + shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT); + + for (i = 0; i < 128; i++) { + fp_load[i] = 1 + rand(); + fp_load_new[i] = 1 + 2 * rand(); + fp_load_ckpt[i] = 1 + 3 * rand(); + fp_load_ckpt_new[i] = 1 + 4 * rand(); + } + + pid = fork(); + if (pid < 0) { + perror("fork() failed"); + return TEST_FAIL; + } + + if (pid == 0) + tm_spd_vsx(); + + if (pid) { + pptr = (int *)shmat(shm_id, NULL, 0); + while (!pptr[2]) + asm volatile("" : : : "memory"); + + ret = trace_tm_spd_vsx(pid); + if (ret) { + kill(pid, SIGKILL); + shmdt((void *)pptr); + shmctl(shm_id, IPC_RMID, NULL); + return TEST_FAIL; + } + + shmdt((void *)pptr); + ret = wait(&status); + shmctl(shm_id, IPC_RMID, NULL); + if (ret != pid) { + printf("Child's exit status not captured\n"); + return TEST_FAIL; + } + + return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL : + TEST_PASS; + } + return TEST_PASS; +} + +int main(int argc, char *argv[]) +{ + return test_harness(ptrace_tm_spd_vsx, "ptrace_tm_spd_vsx"); +} -- cgit From 5bdac52f3c67e1262b1979081a362fdedebd2ed2 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 30 Sep 2016 10:33:02 +0800 Subject: selftests/powerpc: Add ptrace tests for TM SPR registers This patch adds ptrace interface test for TM SPR registers. This also adds ptrace interface based helper functions related to TM SPR registers access. Signed-off-by: Anshuman Khandual Signed-off-by: Simon Guo Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/ptrace/.gitignore | 1 + tools/testing/selftests/powerpc/ptrace/Makefile | 2 +- .../selftests/powerpc/ptrace/ptrace-tm-spr.c | 168 +++++++++++++++++++++ tools/testing/selftests/powerpc/ptrace/ptrace.h | 35 +++++ 4 files changed, 205 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore index 49439538e234..349acfafc95b 100644 --- a/tools/testing/selftests/powerpc/ptrace/.gitignore +++ b/tools/testing/selftests/powerpc/ptrace/.gitignore @@ -7,3 +7,4 @@ ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx ptrace-tm-spd-vsx +ptrace-tm-spr diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile index f3f4129b3755..fe6bc60dfc60 100644 --- a/tools/testing/selftests/powerpc/ptrace/Makefile +++ b/tools/testing/selftests/powerpc/ptrace/Makefile @@ -1,6 +1,6 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \ - ptrace-tm-spd-vsx + ptrace-tm-spd-vsx ptrace-tm-spr include ../../lib.mk diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c new file mode 100644 index 000000000000..94e57cb89769 --- /dev/null +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c @@ -0,0 +1,168 @@ +/* + * Ptrace test TM SPR registers + * + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include "ptrace.h" +#include "tm.h" + +/* Tracee and tracer shared data */ +struct shared { + int flag; + struct tm_spr_regs regs; +}; +unsigned long tfhar; + +int shm_id; +struct shared *cptr, *pptr; + +int shm_id1; +int *cptr1, *pptr1; + +#define TM_KVM_SCHED 0xe0000001ac000001 +int validate_tm_spr(struct tm_spr_regs *regs) +{ + FAIL_IF(regs->tm_tfhar != tfhar); + FAIL_IF((regs->tm_texasr == TM_KVM_SCHED) && (regs->tm_tfiar != 0)); + + return TEST_PASS; +} + +void tm_spr(void) +{ + unsigned long result, texasr; + int ret; + + cptr = (struct shared *)shmat(shm_id, NULL, 0); + cptr1 = (int *)shmat(shm_id1, NULL, 0); + +trans: + cptr1[0] = 0; + asm __volatile__( + "1: ;" + /* TM failover handler should follow "tbegin.;" */ + "mflr 31;" + "bl 4f;" /* $ = TFHAR - 12 */ + "4: ;" + "mflr %[tfhar];" + "mtlr 31;" + + "tbegin.;" + "beq 2f;" + + "tsuspend.;" + "li 8, 1;" + "sth 8, 0(%[cptr1]);" + "tresume.;" + "b .;" + + "tend.;" + "li 0, 0;" + "ori %[res], 0, 0;" + "b 3f;" + + "2: ;" + + "li 0, 1;" + "ori %[res], 0, 0;" + "mfspr %[texasr], %[sprn_texasr];" + + "3: ;" + : [tfhar] "=r" (tfhar), [res] "=r" (result), + [texasr] "=r" (texasr), [cptr1] "=r" (cptr1) + : [sprn_texasr] "i" (SPRN_TEXASR) + : "memory", "r0", "r1", "r2", "r3", "r4", + "r8", "r9", "r10", "r11", "r31" + ); + + /* There are 2 32bit instructions before tbegin. */ + tfhar += 12; + + if (result) { + if (!cptr->flag) + goto trans; + + ret = validate_tm_spr((struct tm_spr_regs *)&cptr->regs); + shmdt((void *)cptr); + shmdt((void *)cptr1); + if (ret) + exit(1); + exit(0); + } + shmdt((void *)cptr); + shmdt((void *)cptr1); + exit(1); +} + +int trace_tm_spr(pid_t child) +{ + FAIL_IF(start_trace(child)); + FAIL_IF(show_tm_spr(child, (struct tm_spr_regs *)&pptr->regs)); + + printf("TFHAR: %lx TEXASR: %lx TFIAR: %lx\n", pptr->regs.tm_tfhar, + pptr->regs.tm_texasr, pptr->regs.tm_tfiar); + + pptr->flag = 1; + FAIL_IF(stop_trace(child)); + + return TEST_PASS; +} + +int ptrace_tm_spr(void) +{ + pid_t pid; + int ret, status; + + SKIP_IF(!have_htm()); + shm_id = shmget(IPC_PRIVATE, sizeof(struct shared), 0777|IPC_CREAT); + shm_id1 = shmget(IPC_PRIVATE, sizeof(int), 0777|IPC_CREAT); + pid = fork(); + if (pid < 0) { + perror("fork() failed"); + return TEST_FAIL; + } + + if (pid == 0) + tm_spr(); + + if (pid) { + pptr = (struct shared *)shmat(shm_id, NULL, 0); + pptr1 = (int *)shmat(shm_id1, NULL, 0); + + while (!pptr1[0]) + asm volatile("" : : : "memory"); + ret = trace_tm_spr(pid); + if (ret) { + kill(pid, SIGKILL); + shmdt((void *)pptr); + shmdt((void *)pptr1); + shmctl(shm_id, IPC_RMID, NULL); + shmctl(shm_id1, IPC_RMID, NULL); + return TEST_FAIL; + } + + shmdt((void *)pptr); + shmdt((void *)pptr1); + ret = wait(&status); + shmctl(shm_id, IPC_RMID, NULL); + shmctl(shm_id1, IPC_RMID, NULL); + if (ret != pid) { + printf("Child's exit status not captured\n"); + return TEST_FAIL; + } + + return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL : + TEST_PASS; + } + return TEST_PASS; +} + +int main(int argc, char *argv[]) +{ + return test_harness(ptrace_tm_spr, "ptrace_tm_spr"); +} diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace.h b/tools/testing/selftests/powerpc/ptrace/ptrace.h index 9aa0498ac738..19fb825270a1 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace.h +++ b/tools/testing/selftests/powerpc/ptrace/ptrace.h @@ -38,6 +38,11 @@ struct fpr_regs { unsigned long fpscr; }; +struct tm_spr_regs { + unsigned long tm_tfhar; + unsigned long tm_texasr; + unsigned long tm_tfiar; +}; #ifndef NT_PPC_TAR #define NT_PPC_TAR 0x103 @@ -605,6 +610,36 @@ int write_vsx_ckpt(pid_t child, unsigned long *vsx) return TEST_PASS; } +/* TM SPR */ +int show_tm_spr(pid_t child, struct tm_spr_regs *out) +{ + struct tm_spr_regs *regs; + struct iovec iov; + int ret; + + regs = (struct tm_spr_regs *) malloc(sizeof(struct tm_spr_regs)); + if (!regs) { + perror("malloc() failed"); + return TEST_FAIL; + } + + iov.iov_base = (u64 *) regs; + iov.iov_len = sizeof(struct tm_spr_regs); + + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_SPR, &iov); + if (ret) { + perror("ptrace(PTRACE_GETREGSET) failed"); + return TEST_FAIL; + } + + if (out) + memcpy(out, regs, sizeof(struct tm_spr_regs)); + + return TEST_PASS; +} + + + /* Analyse TEXASR after TM failure */ inline unsigned long get_tfiar(void) { -- cgit From 555c16328ae6d75a90e234eac9b51998d68f185b Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 9 Nov 2016 16:36:33 +1100 Subject: powerpc/mm: Correct process and partition table max size Version 3.00 of the ISA states that the PATS (partition table size) field of the PTCR (partition table control register) and the PRTS (process table size) field of the partition table entry must both be less than or equal to 24. However the actual size of the partition and process tables is equal to 2 to the power of 12 plus the PATS and PRTS fields, respectively. This means that the max allowable size of each of these tables is 2^36 or 64GB for both. Thus when checking the size shift for each we should be checking for values of greater than 36 instead of the current check for shifts larger than 24 and 23. Fixes: 2bfd65e45e877fb5704730244da67c748d28a1b8 Signed-off-by: Suraj Jitindar Singh Reviewed-by: Balbir Singh Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable-radix.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 525c36ebbc8d..e93e38455d59 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -159,7 +159,7 @@ redo: * Allocate Partition table and process table for the * host. */ - BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 23), "Process table size too large."); + BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large."); process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT); /* * Fill in the process table. @@ -181,7 +181,7 @@ static void __init radix_init_partition_table(void) rts_field = radix__get_tree_size(); - BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large."); + BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT); partition_tb->patb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR); -- cgit From c05f69a3dc702312bbecae4c7ddfc999f48ee720 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 21 Sep 2016 16:49:27 +0200 Subject: powerpc/64: get rid of MIN_HUGEPTE_SHIFT MIN_HUGEPTE_SHIFT hasn't been used since commit d1837cba5d5d5 ("powerpc/mm: Cleanup initialization of hugepages on powerpc") Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/hash-4k.h | 3 --- arch/powerpc/include/asm/book3s/64/hash-64k.h | 3 --- arch/powerpc/include/asm/nohash/64/pgtable-4k.h | 3 --- arch/powerpc/include/asm/nohash/64/pgtable-64k.h | 3 --- 4 files changed, 12 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 1af837c561ba..1c64bc6330bc 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -16,9 +16,6 @@ #define H_PUD_TABLE_SIZE (sizeof(pud_t) << H_PUD_INDEX_SIZE) #define H_PGD_TABLE_SIZE (sizeof(pgd_t) << H_PGD_INDEX_SIZE) -/* With 4k base page size, hugepage PTEs go at the PMD level */ -#define MIN_HUGEPTE_SHIFT PMD_SHIFT - /* PTE flags to conserve for HPTE identification */ #define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | \ H_PAGE_F_SECOND | H_PAGE_F_GIX) diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index 5aae4f530c21..f3dd21efa2ea 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -6,9 +6,6 @@ #define H_PUD_INDEX_SIZE 5 #define H_PGD_INDEX_SIZE 12 -/* With 4k base page size, hugepage PTEs go at the PMD level */ -#define MIN_HUGEPTE_SHIFT PAGE_SHIFT - #define H_PAGE_COMBO 0x00001000 /* this is a combo 4k page */ #define H_PAGE_4K_PFN 0x00002000 /* PFN is for a single 4k page */ /* diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h index fc7d51753f81..d0db98793dd8 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h @@ -27,9 +27,6 @@ #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -/* With 4k base page size, hugepage PTEs go at the PMD level */ -#define MIN_HUGEPTE_SHIFT PMD_SHIFT - /* PUD_SHIFT determines what a third-level page table entry can map */ #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) #define PUD_SIZE (1UL << PUD_SHIFT) diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h index 908324574f77..55b28ef3409a 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h @@ -31,9 +31,6 @@ #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) -/* With 4k base page size, hugepage PTEs go at the PMD level */ -#define MIN_HUGEPTE_SHIFT PAGE_SHIFT - /* PMD_SHIFT determines what a second-level page table entry can map */ #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) #define PMD_SIZE (1UL << PMD_SHIFT) -- cgit From 60d862e531f310a32e272528ad4e041970543d02 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 17 Nov 2016 17:20:24 +0100 Subject: powerpc: Fix old style declaration GCC warnings Fix two [-Wold-style-declaration] GCC warnings by moving the inline keyword before the return type. Signed-off-by: Tobias Klauser Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/prom_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 88ac964f4858..05d2556ebb9f 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -461,14 +461,14 @@ static int __init prom_next_node(phandle *nodep) } } -static int inline prom_getprop(phandle node, const char *pname, +static inline int prom_getprop(phandle node, const char *pname, void *value, size_t valuelen) { return call_prom("getprop", 4, 1, node, ADDR(pname), (u32)(unsigned long) value, (u32) valuelen); } -static int inline prom_getproplen(phandle node, const char *pname) +static inline int prom_getproplen(phandle node, const char *pname) { return call_prom("getproplen", 2, 1, node, ADDR(pname)); } -- cgit From d6d56ec738dcac4ac7716ccb7a08a4637de47955 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 17 Nov 2016 17:19:21 +0100 Subject: powerpc/mce: Remove unused but set variable Remove the unused but set variable srr1 in save_mce_event() to fix the following GCC warning when building with 'W=1': arch/powerpc/kernel/mce.c:75:11: warning: variable 'srr1' set but not used It has never been used. Signed-off-by: Tobias Klauser Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/mce.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index 5e7ece0fda9f..c6923ff45131 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -72,7 +72,6 @@ void save_mce_event(struct pt_regs *regs, long handled, struct mce_error_info *mce_err, uint64_t nip, uint64_t addr) { - uint64_t srr1; int index = __this_cpu_inc_return(mce_nest_count) - 1; struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]); @@ -99,8 +98,6 @@ void save_mce_event(struct pt_regs *regs, long handled, mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; mce->severity = MCE_SEV_ERROR_SYNC; - srr1 = regs->msr; - /* * Populate the mce error_type and type-specific error_type. */ -- cgit From 25b587fba9a4198e7ea2e2319d62e219d1a4ca2c Mon Sep 17 00:00:00 2001 From: Nathan Fontenot Date: Thu, 17 Nov 2016 11:38:10 -0500 Subject: powerpc/pseries: Correct possible read beyond dlpar sysfs buffer The pasrsing of data written to the dlpar file in sysfs does not correctly account for the possibility of reading past the end of the buffer. The code assumes that all pieces of the command witten to the sysfs file are present in the form " ". Correct this by updating the buffer parsing code to make a local copy and use the strsep() and sysfs_streq() routines to parse the buffer. This patch also separates the parsing code into subroutines for each piece of the command. Signed-off-by: Nathan Fontenot Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/dlpar.c | 142 ++++++++++++++++++++++----------- 1 file changed, 97 insertions(+), 45 deletions(-) diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 423e450efe07..76caa4a45ccd 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -418,84 +418,136 @@ void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog, } } -static ssize_t dlpar_store(struct class *class, struct class_attribute *attr, - const char *buf, size_t count) +static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog) { - struct pseries_hp_errorlog *hp_elog; - struct completion hotplug_done; - const char *arg; - int rc; + char *arg; - hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL); - if (!hp_elog) { - rc = -ENOMEM; - goto dlpar_store_out; - } + arg = strsep(cmd, " "); + if (!arg) + return -EINVAL; - /* Parse out the request from the user, this will be in the form - * - */ - arg = buf; - if (!strncmp(arg, "memory", 6)) { + if (sysfs_streq(arg, "memory")) { hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM; - arg += strlen("memory "); - } else if (!strncmp(arg, "cpu", 3)) { + } else if (sysfs_streq(arg, "cpu")) { hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU; - arg += strlen("cpu "); } else { - pr_err("Invalid resource specified: \"%s\"\n", buf); - rc = -EINVAL; - goto dlpar_store_out; + pr_err("Invalid resource specified.\n"); + return -EINVAL; } - if (!strncmp(arg, "add", 3)) { + return 0; +} + +static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog) +{ + char *arg; + + arg = strsep(cmd, " "); + if (!arg) + return -EINVAL; + + if (sysfs_streq(arg, "add")) { hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD; - arg += strlen("add "); - } else if (!strncmp(arg, "remove", 6)) { + } else if (sysfs_streq(arg, "remove")) { hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE; - arg += strlen("remove "); } else { - pr_err("Invalid action specified: \"%s\"\n", buf); - rc = -EINVAL; - goto dlpar_store_out; + pr_err("Invalid action specified.\n"); + return -EINVAL; } - if (!strncmp(arg, "index", 5)) { - u32 index; + return 0; +} +static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog) +{ + char *arg; + u32 count, index; + + arg = strsep(cmd, " "); + if (!arg) + return -EINVAL; + + if (sysfs_streq(arg, "index")) { hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX; - arg += strlen("index "); + arg = strsep(cmd, " "); + if (!arg) { + pr_err("No DRC Index specified.\n"); + return -EINVAL; + } + if (kstrtou32(arg, 0, &index)) { - rc = -EINVAL; - pr_err("Invalid drc_index specified: \"%s\"\n", buf); - goto dlpar_store_out; + pr_err("Invalid DRC Index specified.\n"); + return -EINVAL; } hp_elog->_drc_u.drc_index = cpu_to_be32(index); - } else if (!strncmp(arg, "count", 5)) { - u32 count; - + } else if (sysfs_streq(arg, "count")) { hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT; - arg += strlen("count "); + arg = strsep(cmd, " "); + if (!arg) { + pr_err("No DRC count specified.\n"); + return -EINVAL; + } + if (kstrtou32(arg, 0, &count)) { - rc = -EINVAL; - pr_err("Invalid count specified: \"%s\"\n", buf); - goto dlpar_store_out; + pr_err("Invalid DRC count specified.\n"); + return -EINVAL; } hp_elog->_drc_u.drc_count = cpu_to_be32(count); } else { - pr_err("Invalid id_type specified: \"%s\"\n", buf); - rc = -EINVAL; - goto dlpar_store_out; + pr_err("Invalid id_type specified.\n"); + return -EINVAL; + } + + return 0; +} + +static ssize_t dlpar_store(struct class *class, struct class_attribute *attr, + const char *buf, size_t count) +{ + struct pseries_hp_errorlog *hp_elog; + struct completion hotplug_done; + char *argbuf; + char *args; + int rc; + + args = argbuf = kstrdup(buf, GFP_KERNEL); + hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL); + if (!hp_elog || !argbuf) { + pr_info("Could not allocate resources for DLPAR operation\n"); + kfree(argbuf); + kfree(hp_elog); + return -ENOMEM; } + /* + * Parse out the request from the user, this will be in the form: + * + */ + rc = dlpar_parse_resource(&args, hp_elog); + if (rc) + goto dlpar_store_out; + + rc = dlpar_parse_action(&args, hp_elog); + if (rc) + goto dlpar_store_out; + + rc = dlpar_parse_id_type(&args, hp_elog); + if (rc) + goto dlpar_store_out; + init_completion(&hotplug_done); queue_hotplug_event(hp_elog, &hotplug_done, &rc); wait_for_completion(&hotplug_done); dlpar_store_out: + kfree(argbuf); kfree(hp_elog); + + if (rc) + pr_err("Could not handle DLPAR request \"%s\"\n", buf); + return rc ? rc : count; } -- cgit From 5e9d0e3d9ea6f771fa9fc9dbb65b54c865702850 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 18 Nov 2016 11:51:14 +1100 Subject: powerpc/lib: Fix randconfig build failure in sstep.c Under some configs we need to explicitly include cpu_has_feature.h, otherwise we fail with: arch/powerpc/lib/sstep.c:1992:7: error: implicit declaration of function 'cpu_has_feature' Signed-off-by: Michael Ellerman --- arch/powerpc/lib/sstep.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index b64287c6793f..9c78a9c102c3 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -15,6 +15,7 @@ #include #include #include +#include #include extern char system_call_common[]; -- cgit From 43c9127d94d62a232ed33ed2eab8a08657ce5472 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 19 Oct 2016 14:15:59 +1100 Subject: powerpc: Add option to use thin archives Add an option to use thin archives to build the kernel. Thin archives are explained in commit a5967db9af51 ("kbuild: allow architectures to use thin archives instead of ld -r"). This is a gradual way to introduce the option to testers. Some change to the way we invoke ar is required so it can be used by scripts/link-vmlinux.sh. Signed-off-by: Stephen Rothwell Signed-off-by: Nicholas Piggin [mpe: Make it an explicit option not dependant on COMPILE_TEST] Signed-off-by: Michael Ellerman --- arch/powerpc/Kconfig | 8 ++++++++ arch/powerpc/Makefile | 4 ++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b49062b060d2..5ee3dbeab323 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -396,6 +396,14 @@ config MPROFILE_KERNEL depends on PPC64 && CPU_LITTLE_ENDIAN def_bool !DISABLE_MPROFILE_KERNEL +config USE_THIN_ARCHIVES + bool "Build the kernel using thin archives" + default n + select THIN_ARCHIVES + help + Build the kernel using thin archives. + If you're unsure say N. + config IOMMU_HELPER def_bool PPC64 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 041fda1e2a5d..0ee7be32b112 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -23,7 +23,7 @@ CROSS32AR := $(CROSS32_COMPILE)ar ifeq ($(HAS_BIARCH),y) ifeq ($(CROSS32_COMPILE),) CROSS32CC := $(CC) -m32 -CROSS32AR := GNUTARGET=elf32-powerpc $(AR) +KBUILD_ARFLAGS += --target=elf32-powerpc endif endif @@ -85,7 +85,7 @@ ifeq ($(HAS_BIARCH),y) override AS += -a$(BITS) override LD += -m elf$(BITS)$(LDEMULATION) override CC += -m$(BITS) -override AR := GNUTARGET=elf$(BITS)-$(GNUTARGET) $(AR) +KBUILD_ARFLAGS += --target=elf$(BITS)-$(GNUTARGET) endif LDFLAGS_vmlinux-y := -Bstatic -- cgit From 6f44b20ee9b4130345c189c0c90ef6604bcd8005 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 8 Nov 2016 23:14:44 +1100 Subject: powerpc: Fix graceful debugger recovery When exiting xmon with 'x' (exit and recover), oops_begin bails out immediately, but die then calls __die() and oops_end(), which cause a lot of bad things to happen. If the debugger was attached then went to graceful recovery, exit from die() immediately. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/traps.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 91d278c9ab28..13ca8b687e88 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -122,9 +122,6 @@ static unsigned long oops_begin(struct pt_regs *regs) int cpu; unsigned long flags; - if (debugger(regs)) - return 1; - oops_enter(); /* racy, but better than risking deadlock. */ @@ -227,8 +224,12 @@ NOKPROBE_SYMBOL(__die); void die(const char *str, struct pt_regs *regs, long err) { - unsigned long flags = oops_begin(regs); + unsigned long flags; + + if (debugger(regs)) + return; + flags = oops_begin(regs); if (__die(str, regs, err)) err = 0; oops_end(flags, regs, err); -- cgit From 7458e8b2ce62e65051fec0bcb14cac35d4f1118d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 8 Nov 2016 23:14:45 +1100 Subject: powerpc: Fix second nested oops hang When ending an oops, don't clear die_owner unless the nest count went to zero. This prevents a second nested oops from hanging forever on the die_lock. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/traps.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 13ca8b687e88..3eb20d16c67c 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -147,14 +147,15 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) { bust_spinlocks(0); - die_owner = -1; add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); die_nest_count--; oops_exit(); printk("\n"); - if (!die_nest_count) + if (!die_nest_count) { /* Nest count reaches zero, release the lock. */ + die_owner = -1; arch_spin_unlock(&die_lock); + } raw_local_irq_restore(flags); crash_fadump(regs, "die oops"); -- cgit From 5cd4f5cec2374d2c09299211d2b357cc160e49a0 Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Sun, 30 Oct 2016 20:35:57 +0100 Subject: cxl: Fix memory allocation failure test 'cxl_context_alloc()' does not return an error pointer. It is just a shortcut for a call to 'kzalloc' with 'sizeof(struct cxl_context)' as the size parameter. So its return value should be compared with NULL. While fixing it, simplify a bit the code. Signed-off-by: Christophe JAILLET Reviewed-by: Andrew Donnellan Acked-by: Frederic Barrat Acked-by: Ian Munsie Signed-off-by: Michael Ellerman --- drivers/misc/cxl/api.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 2e5233b60971..2b88ad8a2a89 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -30,10 +30,8 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) return ERR_CAST(afu); ctx = cxl_context_alloc(); - if (IS_ERR(ctx)) { - rc = PTR_ERR(ctx); - goto err_dev; - } + if (!ctx) + return ERR_PTR(-ENOMEM); ctx->kernelapi = true; @@ -61,7 +59,6 @@ err_mapping: kfree(mapping); err_ctx: kfree(ctx); -err_dev: return ERR_PTR(rc); } EXPORT_SYMBOL_GPL(cxl_dev_context_init); -- cgit From 28e323e5a0b21b3665eb6e136ca8bbce10fd29d7 Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Sun, 30 Oct 2016 22:34:51 +0100 Subject: cxl: Fix error handling in _cxl_cx4_setup_msi_irqs() 'cxl_dev_context_init()' returns an error pointer in case of error, not NULL. So test it with IS_ERR. Signed-off-by: Christophe JAILLET Reviewed-by: Andrew Donnellan Acked-by: Frederic Barrat Acked-by: Ian Munsie Signed-off-by: Michael Ellerman --- drivers/misc/cxl/api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 2b88ad8a2a89..e2efc6489c6e 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -538,7 +538,7 @@ int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) if (remaining > 0) { new_ctx = cxl_dev_context_init(pdev); - if (!new_ctx) { + if (IS_ERR(new_ctx)) { pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev)); return -ENOSPC; } -- cgit From bb81733de28c99e10b61dcaff1592142abe53442 Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Sun, 30 Oct 2016 22:40:47 +0100 Subject: cxl: Fix error handling in _cxl_pci_associate_default_context() 'cxl_dev_context_init()' returns an error pointer in case of error, not NULL. So test it with IS_ERR. Signed-off-by: Christophe JAILLET Reviewed-by: Andrew Donnellan Acked-by: Frederic Barrat Acked-by: Ian Munsie Signed-off-by: Michael Ellerman --- drivers/misc/cxl/pci.c | 2 +- drivers/misc/cxl/phb.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index e96be9ca4e60..80a87ab25b83 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1921,7 +1921,7 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) goto err; ctx = cxl_dev_context_init(afu_dev); - if (!ctx) + if (IS_ERR(ctx)) goto err; afu_dev->dev.archdata.cxl_ctx = ctx; diff --git a/drivers/misc/cxl/phb.c b/drivers/misc/cxl/phb.c index 0935d44c1770..6ec69ada19f4 100644 --- a/drivers/misc/cxl/phb.c +++ b/drivers/misc/cxl/phb.c @@ -20,7 +20,7 @@ bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu * in the virtual phb, we'll need a default context to attach them to. */ ctx = cxl_dev_context_init(dev); - if (!ctx) + if (IS_ERR(ctx)) return false; dev->dev.archdata.cxl_ctx = ctx; -- cgit From abf051be684be768c1ee079514f4d07de9389d54 Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Wed, 16 Nov 2016 19:39:33 +0530 Subject: cxl: Do adapter fence check before handling afu interrupt If an afu interrupt is in flight when an eeh error is triggered the control still reaches the function native_irq_multiplexed and the PE-Handle read from the CXL_PSL_PEHandle_An register is 0xffff. The function then erroneously assumes that the interrupt belonged to a detached context and generates a warning with full stack dump in the kernel log complaining: "Unable to demultiplex CXL PSL IRQ for PE 65535 DSISR ffffffff DAR ffffffff. (Possible AFU HW issue - was a term/remove acked with outstanding transactions" To fix this the patch adds new code to the function native_irq_multiplexed function to compares the read value of register CXL_PSL_PEHandle_An to ~0ULL. If true then logs a warning message saying that the interrupt is being ignored and returns IRQ_HANDLED from the irq handler. Reviewed-by: Andrew Donnellan Acked-by: Frederic Barrat Acked-by: Ian Munsie Signed-off-by: Vaibhav Jain Signed-off-by: Michael Ellerman --- drivers/misc/cxl/native.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index a217a74ccc98..c336350ede94 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -931,9 +931,18 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data) struct cxl_afu *afu = data; struct cxl_context *ctx; struct cxl_irq_info irq_info; - int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff; - int ret; - + u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An); + int ph, ret; + + /* check if eeh kicked in while the interrupt was in flight */ + if (unlikely(phreg == ~0ULL)) { + dev_warn(&afu->dev, + "Ignoring slice interrupt(%d) due to fenced card", + irq); + return IRQ_HANDLED; + } + /* Mask the pe-handle from register value */ + ph = phreg & 0xffff; if ((ret = native_get_irq_info(afu, &irq_info))) { WARN(1, "Unable to get CXL IRQ Info: %i\n", ret); return fail_psl_irq(afu, &irq_info); -- cgit From bdecf76e319a29735d828575f4a9269f0e17c547 Mon Sep 17 00:00:00 2001 From: Frederic Barrat Date: Fri, 18 Nov 2016 23:00:31 +1100 Subject: cxl: Fix coredump generation when cxl_get_fd() is used If a process dumps core while owning a cxl file descriptor obtained from an AFU driver (e.g. cxlflash) through the cxl_get_fd() API, the following error occurs: [ 868.027591] Unable to handle kernel paging request for data at address ... [ 868.027778] Faulting instruction address: 0xc00000000035edb0 cpu 0x8c: Vector: 300 (Data Access) at [c000003c688275e0] pc: c00000000035edb0: elf_core_dump+0xd60/0x1300 lr: c00000000035ed80: elf_core_dump+0xd30/0x1300 sp: c000003c68827860 msr: 9000000100009033 dar: c dsisr: 40000000 current = 0xc000003c68780000 paca = 0xc000000001b73200 softe: 0 irq_happened: 0x01 pid = 46725, comm = hxesurelock enter ? for help [c000003c68827a60] c00000000036948c do_coredump+0xcec/0x11e0 [c000003c68827c20] c0000000000ce9e0 get_signal+0x540/0x7b0 [c000003c68827d10] c000000000017354 do_signal+0x54/0x2b0 [c000003c68827e00] c00000000001777c do_notify_resume+0xbc/0xd0 [c000003c68827e30] c000000000009838 ret_from_except_lite+0x64/0x68 --- Exception: 300 (Data Access) at 00003fff98ad2918 The root cause is that the address_space structure for the file doesn't define a 'host' member. When cxl allocates a file descriptor, it's using the anonymous inode to back the file, but allocates a private address_space for each context. The private address_space allows to track memory allocation for each context. cxl doesn't define the 'host' member of the address space, i.e. the inode. We don't want to define it as the anonymous inode, since there's no longer a 1-to-1 relation between address_space and inode. To fix it, instead of using the anonymous inode, we introduce a simple pseudo filesystem so that cxl can allocate its own inodes. So we now have one inode for each file and address_space. The pseudo filesystem is only mounted on the first allocation of a file descriptor by cxl_get_fd(). Tested with cxlflash. Signed-off-by: Frederic Barrat Reviewed-by: Matthew R. Ochs Signed-off-by: Michael Ellerman --- drivers/misc/cxl/api.c | 138 +++++++++++++++++++++++++++++++++++++-------- drivers/misc/cxl/context.c | 17 ++++-- drivers/misc/cxl/cxl.h | 6 +- drivers/misc/cxl/file.c | 5 +- 4 files changed, 135 insertions(+), 31 deletions(-) diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index e2efc6489c6e..1b35e33d2434 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -9,18 +9,119 @@ #include #include -#include #include #include -#include #include #include +#include +#include #include "cxl.h" +/* + * Since we want to track memory mappings to be able to force-unmap + * when the AFU is no longer reachable, we need an inode. For devices + * opened through the cxl user API, this is not a problem, but a + * userland process can also get a cxl fd through the cxl_get_fd() + * API, which is used by the cxlflash driver. + * + * Therefore we implement our own simple pseudo-filesystem and inode + * allocator. We don't use the anonymous inode, as we need the + * meta-data associated with it (address_space) and it is shared by + * other drivers/processes, so it could lead to cxl unmapping VMAs + * from random processes. + */ + +#define CXL_PSEUDO_FS_MAGIC 0x1697697f + +static int cxl_fs_cnt; +static struct vfsmount *cxl_vfs_mount; + +static const struct dentry_operations cxl_fs_dops = { + .d_dname = simple_dname, +}; + +static struct dentry *cxl_fs_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) +{ + return mount_pseudo(fs_type, "cxl:", NULL, &cxl_fs_dops, + CXL_PSEUDO_FS_MAGIC); +} + +static struct file_system_type cxl_fs_type = { + .name = "cxl", + .owner = THIS_MODULE, + .mount = cxl_fs_mount, + .kill_sb = kill_anon_super, +}; + + +void cxl_release_mapping(struct cxl_context *ctx) +{ + if (ctx->kernelapi && ctx->mapping) + simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt); +} + +static struct file *cxl_getfile(const char *name, + const struct file_operations *fops, + void *priv, int flags) +{ + struct qstr this; + struct path path; + struct file *file; + struct inode *inode = NULL; + int rc; + + /* strongly inspired by anon_inode_getfile() */ + + if (fops->owner && !try_module_get(fops->owner)) + return ERR_PTR(-ENOENT); + + rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt); + if (rc < 0) { + pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc); + file = ERR_PTR(rc); + goto err_module; + } + + inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb); + if (IS_ERR(inode)) { + file = ERR_CAST(inode); + goto err_fs; + } + + file = ERR_PTR(-ENOMEM); + this.name = name; + this.len = strlen(name); + this.hash = 0; + path.dentry = d_alloc_pseudo(cxl_vfs_mount->mnt_sb, &this); + if (!path.dentry) + goto err_inode; + + path.mnt = mntget(cxl_vfs_mount); + d_instantiate(path.dentry, inode); + + file = alloc_file(&path, OPEN_FMODE(flags), fops); + if (IS_ERR(file)) + goto err_dput; + file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); + file->private_data = priv; + + return file; + +err_dput: + path_put(&path); +err_inode: + iput(inode); +err_fs: + simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt); +err_module: + module_put(fops->owner); + return file; +} + struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) { - struct address_space *mapping; struct cxl_afu *afu; struct cxl_context *ctx; int rc; @@ -35,28 +136,13 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) ctx->kernelapi = true; - /* - * Make our own address space since we won't have one from the - * filesystem like the user api has, and even if we do associate a file - * with this context we don't want to use the global anonymous inode's - * address space as that can invalidate unrelated users: - */ - mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL); - if (!mapping) { - rc = -ENOMEM; - goto err_ctx; - } - address_space_init_once(mapping); - /* Make it a slave context. We can promote it later? */ - rc = cxl_context_init(ctx, afu, false, mapping); + rc = cxl_context_init(ctx, afu, false); if (rc) - goto err_mapping; + goto err_ctx; return ctx; -err_mapping: - kfree(mapping); err_ctx: kfree(ctx); return ERR_PTR(rc); @@ -337,6 +423,11 @@ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops, { struct file *file; int rc, flags, fdtmp; + char *name = NULL; + + /* only allow one per context */ + if (ctx->mapping) + return ERR_PTR(-EEXIST); flags = O_RDWR | O_CLOEXEC; @@ -360,12 +451,13 @@ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops, } else /* use default ops */ fops = (struct file_operations *)&afu_fops; - file = anon_inode_getfile("cxl", fops, ctx, flags); + name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe); + file = cxl_getfile(name, fops, ctx, flags); + kfree(name); if (IS_ERR(file)) goto err_fd; - file->f_mapping = ctx->mapping; - + cxl_context_set_mapping(ctx, file->f_mapping); *fd = fdtmp; return file; diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 5e506c19108a..ff5e7e8cb1d1 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -34,8 +34,7 @@ struct cxl_context *cxl_context_alloc(void) /* * Initialises a CXL context. */ -int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, - struct address_space *mapping) +int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master) { int i; @@ -44,7 +43,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, ctx->master = master; ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */ mutex_init(&ctx->mapping_lock); - ctx->mapping = mapping; + ctx->mapping = NULL; /* * Allocate the segment table before we put it in the IDR so that we @@ -114,6 +113,14 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, return 0; } +void cxl_context_set_mapping(struct cxl_context *ctx, + struct address_space *mapping) +{ + mutex_lock(&ctx->mapping_lock); + ctx->mapping = mapping; + mutex_unlock(&ctx->mapping_lock); +} + static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct cxl_context *ctx = vma->vm_file->private_data; @@ -300,8 +307,6 @@ static void reclaim_ctx(struct rcu_head *rcu) if (ctx->ff_page) __free_page(ctx->ff_page); ctx->sstp = NULL; - if (ctx->kernelapi) - kfree(ctx->mapping); kfree(ctx->irq_bitmap); @@ -313,6 +318,8 @@ static void reclaim_ctx(struct rcu_head *rcu) void cxl_context_free(struct cxl_context *ctx) { + if (ctx->kernelapi && ctx->mapping) + cxl_release_mapping(ctx); mutex_lock(&ctx->afu->contexts_lock); idr_remove(&ctx->afu->contexts_idr, ctx->pe); mutex_unlock(&ctx->afu->contexts_lock); diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index a144073593fa..b24d76723fb0 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -817,8 +817,9 @@ void cxl_dump_debug_buffer(void *addr, size_t size); void init_cxl_native(void); struct cxl_context *cxl_context_alloc(void); -int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, - struct address_space *mapping); +int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master); +void cxl_context_set_mapping(struct cxl_context *ctx, + struct address_space *mapping); void cxl_context_free(struct cxl_context *ctx); int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma); unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, @@ -877,6 +878,7 @@ void cxl_native_err_irq_dump_regs(struct cxl *adapter); void cxl_stop_trace(struct cxl *cxl); int cxl_pci_vphb_add(struct cxl_afu *afu); void cxl_pci_vphb_remove(struct cxl_afu *afu); +void cxl_release_mapping(struct cxl_context *ctx); extern struct pci_driver cxl_pci_driver; extern struct platform_driver cxl_of_driver; diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 77080cc5fa0a..859959f19f10 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -86,9 +86,12 @@ static int __afu_open(struct inode *inode, struct file *file, bool master) goto err_put_afu; } - if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping))) + rc = cxl_context_init(ctx, afu, master); + if (rc) goto err_put_afu; + cxl_context_set_mapping(ctx, inode->i_mapping); + pr_devel("afu_open pe: %i\n", ctx->pe); file->private_data = ctx; cxl_ctx_get(); -- cgit From 139ac5afe30bcad3e0a8064a2fac608f015aa166 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 15 Nov 2016 14:47:42 +1100 Subject: powerpc/pseries: Move vio.c into platforms pseries vio.c is pseries only code, so move it in there. Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/Makefile | 1 - arch/powerpc/kernel/vio.c | 1705 ------------------------------- arch/powerpc/platforms/Kconfig | 5 - arch/powerpc/platforms/pseries/Kconfig | 5 + arch/powerpc/platforms/pseries/Makefile | 1 + arch/powerpc/platforms/pseries/vio.c | 1705 +++++++++++++++++++++++++++++++ 6 files changed, 1711 insertions(+), 1711 deletions(-) delete mode 100644 arch/powerpc/kernel/vio.c create mode 100644 arch/powerpc/platforms/pseries/vio.c diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 1925341dbb9c..9c57ebf61e4d 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -58,7 +58,6 @@ obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y) obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o obj-$(CONFIG_RTAS_PROC) += rtas-proc.o -obj-$(CONFIG_IBMVIO) += vio.o obj-$(CONFIG_IBMEBUS) += ibmebus.o obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \ eeh_driver.o eeh_event.o eeh_sysfs.o diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c deleted file mode 100644 index 2c8fb3ec989e..000000000000 --- a/arch/powerpc/kernel/vio.c +++ /dev/null @@ -1,1705 +0,0 @@ -/* - * IBM PowerPC Virtual I/O Infrastructure Support. - * - * Copyright (c) 2003,2008 IBM Corp. - * Dave Engebretsen engebret@us.ibm.com - * Santiago Leon santil@us.ibm.com - * Hollis Blanchard - * Stephen Rothwell - * Robert Jennings - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -static struct vio_dev vio_bus_device = { /* fake "parent" device */ - .name = "vio", - .type = "", - .dev.init_name = "vio", - .dev.bus = &vio_bus_type, -}; - -#ifdef CONFIG_PPC_SMLPAR -/** - * vio_cmo_pool - A pool of IO memory for CMO use - * - * @size: The size of the pool in bytes - * @free: The amount of free memory in the pool - */ -struct vio_cmo_pool { - size_t size; - size_t free; -}; - -/* How many ms to delay queued balance work */ -#define VIO_CMO_BALANCE_DELAY 100 - -/* Portion out IO memory to CMO devices by this chunk size */ -#define VIO_CMO_BALANCE_CHUNK 131072 - -/** - * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement - * - * @vio_dev: struct vio_dev pointer - * @list: pointer to other devices on bus that are being tracked - */ -struct vio_cmo_dev_entry { - struct vio_dev *viodev; - struct list_head list; -}; - -/** - * vio_cmo - VIO bus accounting structure for CMO entitlement - * - * @lock: spinlock for entire structure - * @balance_q: work queue for balancing system entitlement - * @device_list: list of CMO-enabled devices requiring entitlement - * @entitled: total system entitlement in bytes - * @reserve: pool of memory from which devices reserve entitlement, incl. spare - * @excess: pool of excess entitlement not needed for device reserves or spare - * @spare: IO memory for device hotplug functionality - * @min: minimum necessary for system operation - * @desired: desired memory for system operation - * @curr: bytes currently allocated - * @high: high water mark for IO data usage - */ -static struct vio_cmo { - spinlock_t lock; - struct delayed_work balance_q; - struct list_head device_list; - size_t entitled; - struct vio_cmo_pool reserve; - struct vio_cmo_pool excess; - size_t spare; - size_t min; - size_t desired; - size_t curr; - size_t high; -} vio_cmo; - -/** - * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows - */ -static int vio_cmo_num_OF_devs(void) -{ - struct device_node *node_vroot; - int count = 0; - - /* - * Count the number of vdevice entries with an - * ibm,my-dma-window OF property - */ - node_vroot = of_find_node_by_name(NULL, "vdevice"); - if (node_vroot) { - struct device_node *of_node; - struct property *prop; - - for_each_child_of_node(node_vroot, of_node) { - prop = of_find_property(of_node, "ibm,my-dma-window", - NULL); - if (prop) - count++; - } - } - of_node_put(node_vroot); - return count; -} - -/** - * vio_cmo_alloc - allocate IO memory for CMO-enable devices - * - * @viodev: VIO device requesting IO memory - * @size: size of allocation requested - * - * Allocations come from memory reserved for the devices and any excess - * IO memory available to all devices. The spare pool used to service - * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be - * made available. - * - * Return codes: - * 0 for successful allocation and -ENOMEM for a failure - */ -static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size) -{ - unsigned long flags; - size_t reserve_free = 0; - size_t excess_free = 0; - int ret = -ENOMEM; - - spin_lock_irqsave(&vio_cmo.lock, flags); - - /* Determine the amount of free entitlement available in reserve */ - if (viodev->cmo.entitled > viodev->cmo.allocated) - reserve_free = viodev->cmo.entitled - viodev->cmo.allocated; - - /* If spare is not fulfilled, the excess pool can not be used. */ - if (vio_cmo.spare >= VIO_CMO_MIN_ENT) - excess_free = vio_cmo.excess.free; - - /* The request can be satisfied */ - if ((reserve_free + excess_free) >= size) { - vio_cmo.curr += size; - if (vio_cmo.curr > vio_cmo.high) - vio_cmo.high = vio_cmo.curr; - viodev->cmo.allocated += size; - size -= min(reserve_free, size); - vio_cmo.excess.free -= size; - ret = 0; - } - - spin_unlock_irqrestore(&vio_cmo.lock, flags); - return ret; -} - -/** - * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices - * @viodev: VIO device freeing IO memory - * @size: size of deallocation - * - * IO memory is freed by the device back to the correct memory pools. - * The spare pool is replenished first from either memory pool, then - * the reserve pool is used to reduce device entitlement, the excess - * pool is used to increase the reserve pool toward the desired entitlement - * target, and then the remaining memory is returned to the pools. - * - */ -static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) -{ - unsigned long flags; - size_t spare_needed = 0; - size_t excess_freed = 0; - size_t reserve_freed = size; - size_t tmp; - int balance = 0; - - spin_lock_irqsave(&vio_cmo.lock, flags); - vio_cmo.curr -= size; - - /* Amount of memory freed from the excess pool */ - if (viodev->cmo.allocated > viodev->cmo.entitled) { - excess_freed = min(reserve_freed, (viodev->cmo.allocated - - viodev->cmo.entitled)); - reserve_freed -= excess_freed; - } - - /* Remove allocation from device */ - viodev->cmo.allocated -= (reserve_freed + excess_freed); - - /* Spare is a subset of the reserve pool, replenish it first. */ - spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare; - - /* - * Replenish the spare in the reserve pool from the excess pool. - * This moves entitlement into the reserve pool. - */ - if (spare_needed && excess_freed) { - tmp = min(excess_freed, spare_needed); - vio_cmo.excess.size -= tmp; - vio_cmo.reserve.size += tmp; - vio_cmo.spare += tmp; - excess_freed -= tmp; - spare_needed -= tmp; - balance = 1; - } - - /* - * Replenish the spare in the reserve pool from the reserve pool. - * This removes entitlement from the device down to VIO_CMO_MIN_ENT, - * if needed, and gives it to the spare pool. The amount of used - * memory in this pool does not change. - */ - if (spare_needed && reserve_freed) { - tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); - - vio_cmo.spare += tmp; - viodev->cmo.entitled -= tmp; - reserve_freed -= tmp; - spare_needed -= tmp; - balance = 1; - } - - /* - * Increase the reserve pool until the desired allocation is met. - * Move an allocation freed from the excess pool into the reserve - * pool and schedule a balance operation. - */ - if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) { - tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size)); - - vio_cmo.excess.size -= tmp; - vio_cmo.reserve.size += tmp; - excess_freed -= tmp; - balance = 1; - } - - /* Return memory from the excess pool to that pool */ - if (excess_freed) - vio_cmo.excess.free += excess_freed; - - if (balance) - schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY); - spin_unlock_irqrestore(&vio_cmo.lock, flags); -} - -/** - * vio_cmo_entitlement_update - Manage system entitlement changes - * - * @new_entitlement: new system entitlement to attempt to accommodate - * - * Increases in entitlement will be used to fulfill the spare entitlement - * and the rest is given to the excess pool. Decreases, if they are - * possible, come from the excess pool and from unused device entitlement - * - * Returns: 0 on success, -ENOMEM when change can not be made - */ -int vio_cmo_entitlement_update(size_t new_entitlement) -{ - struct vio_dev *viodev; - struct vio_cmo_dev_entry *dev_ent; - unsigned long flags; - size_t avail, delta, tmp; - - spin_lock_irqsave(&vio_cmo.lock, flags); - - /* Entitlement increases */ - if (new_entitlement > vio_cmo.entitled) { - delta = new_entitlement - vio_cmo.entitled; - - /* Fulfill spare allocation */ - if (vio_cmo.spare < VIO_CMO_MIN_ENT) { - tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare)); - vio_cmo.spare += tmp; - vio_cmo.reserve.size += tmp; - delta -= tmp; - } - - /* Remaining new allocation goes to the excess pool */ - vio_cmo.entitled += delta; - vio_cmo.excess.size += delta; - vio_cmo.excess.free += delta; - - goto out; - } - - /* Entitlement decreases */ - delta = vio_cmo.entitled - new_entitlement; - avail = vio_cmo.excess.free; - - /* - * Need to check how much unused entitlement each device can - * sacrifice to fulfill entitlement change. - */ - list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { - if (avail >= delta) - break; - - viodev = dev_ent->viodev; - if ((viodev->cmo.entitled > viodev->cmo.allocated) && - (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) - avail += viodev->cmo.entitled - - max_t(size_t, viodev->cmo.allocated, - VIO_CMO_MIN_ENT); - } - - if (delta <= avail) { - vio_cmo.entitled -= delta; - - /* Take entitlement from the excess pool first */ - tmp = min(vio_cmo.excess.free, delta); - vio_cmo.excess.size -= tmp; - vio_cmo.excess.free -= tmp; - delta -= tmp; - - /* - * Remove all but VIO_CMO_MIN_ENT bytes from devices - * until entitlement change is served - */ - list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { - if (!delta) - break; - - viodev = dev_ent->viodev; - tmp = 0; - if ((viodev->cmo.entitled > viodev->cmo.allocated) && - (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) - tmp = viodev->cmo.entitled - - max_t(size_t, viodev->cmo.allocated, - VIO_CMO_MIN_ENT); - viodev->cmo.entitled -= min(tmp, delta); - delta -= min(tmp, delta); - } - } else { - spin_unlock_irqrestore(&vio_cmo.lock, flags); - return -ENOMEM; - } - -out: - schedule_delayed_work(&vio_cmo.balance_q, 0); - spin_unlock_irqrestore(&vio_cmo.lock, flags); - return 0; -} - -/** - * vio_cmo_balance - Balance entitlement among devices - * - * @work: work queue structure for this operation - * - * Any system entitlement above the minimum needed for devices, or - * already allocated to devices, can be distributed to the devices. - * The list of devices is iterated through to recalculate the desired - * entitlement level and to determine how much entitlement above the - * minimum entitlement is allocated to devices. - * - * Small chunks of the available entitlement are given to devices until - * their requirements are fulfilled or there is no entitlement left to give. - * Upon completion sizes of the reserve and excess pools are calculated. - * - * The system minimum entitlement level is also recalculated here. - * Entitlement will be reserved for devices even after vio_bus_remove to - * accommodate reloading the driver. The OF tree is walked to count the - * number of devices present and this will remove entitlement for devices - * that have actually left the system after having vio_bus_remove called. - */ -static void vio_cmo_balance(struct work_struct *work) -{ - struct vio_cmo *cmo; - struct vio_dev *viodev; - struct vio_cmo_dev_entry *dev_ent; - unsigned long flags; - size_t avail = 0, level, chunk, need; - int devcount = 0, fulfilled; - - cmo = container_of(work, struct vio_cmo, balance_q.work); - - spin_lock_irqsave(&vio_cmo.lock, flags); - - /* Calculate minimum entitlement and fulfill spare */ - cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT; - BUG_ON(cmo->min > cmo->entitled); - cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min)); - cmo->min += cmo->spare; - cmo->desired = cmo->min; - - /* - * Determine how much entitlement is available and reset device - * entitlements - */ - avail = cmo->entitled - cmo->spare; - list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { - viodev = dev_ent->viodev; - devcount++; - viodev->cmo.entitled = VIO_CMO_MIN_ENT; - cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); - avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); - } - - /* - * Having provided each device with the minimum entitlement, loop - * over the devices portioning out the remaining entitlement - * until there is nothing left. - */ - level = VIO_CMO_MIN_ENT; - while (avail) { - fulfilled = 0; - list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { - viodev = dev_ent->viodev; - - if (viodev->cmo.desired <= level) { - fulfilled++; - continue; - } - - /* - * Give the device up to VIO_CMO_BALANCE_CHUNK - * bytes of entitlement, but do not exceed the - * desired level of entitlement for the device. - */ - chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK); - chunk = min(chunk, (viodev->cmo.desired - - viodev->cmo.entitled)); - viodev->cmo.entitled += chunk; - - /* - * If the memory for this entitlement increase was - * already allocated to the device it does not come - * from the available pool being portioned out. - */ - need = max(viodev->cmo.allocated, viodev->cmo.entitled)- - max(viodev->cmo.allocated, level); - avail -= need; - - } - if (fulfilled == devcount) - break; - level += VIO_CMO_BALANCE_CHUNK; - } - - /* Calculate new reserve and excess pool sizes */ - cmo->reserve.size = cmo->min; - cmo->excess.free = 0; - cmo->excess.size = 0; - need = 0; - list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { - viodev = dev_ent->viodev; - /* Calculated reserve size above the minimum entitlement */ - if (viodev->cmo.entitled) - cmo->reserve.size += (viodev->cmo.entitled - - VIO_CMO_MIN_ENT); - /* Calculated used excess entitlement */ - if (viodev->cmo.allocated > viodev->cmo.entitled) - need += viodev->cmo.allocated - viodev->cmo.entitled; - } - cmo->excess.size = cmo->entitled - cmo->reserve.size; - cmo->excess.free = cmo->excess.size - need; - - cancel_delayed_work(to_delayed_work(work)); - spin_unlock_irqrestore(&vio_cmo.lock, flags); -} - -static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - unsigned long attrs) -{ - struct vio_dev *viodev = to_vio_dev(dev); - void *ret; - - if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) { - atomic_inc(&viodev->cmo.allocs_failed); - return NULL; - } - - ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs); - if (unlikely(ret == NULL)) { - vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); - atomic_inc(&viodev->cmo.allocs_failed); - } - - return ret; -} - -static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs) -{ - struct vio_dev *viodev = to_vio_dev(dev); - - dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs); - - vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); -} - -static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction, - unsigned long attrs) -{ - struct vio_dev *viodev = to_vio_dev(dev); - struct iommu_table *tbl; - dma_addr_t ret = DMA_ERROR_CODE; - - tbl = get_iommu_table_base(dev); - if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) { - atomic_inc(&viodev->cmo.allocs_failed); - return ret; - } - - ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); - if (unlikely(dma_mapping_error(dev, ret))) { - vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); - atomic_inc(&viodev->cmo.allocs_failed); - } - - return ret; -} - -static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction, - unsigned long attrs) -{ - struct vio_dev *viodev = to_vio_dev(dev); - struct iommu_table *tbl; - - tbl = get_iommu_table_base(dev); - dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); - - vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); -} - -static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction, - unsigned long attrs) -{ - struct vio_dev *viodev = to_vio_dev(dev); - struct iommu_table *tbl; - struct scatterlist *sgl; - int ret, count; - size_t alloc_size = 0; - - tbl = get_iommu_table_base(dev); - for_each_sg(sglist, sgl, nelems, count) - alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); - - if (vio_cmo_alloc(viodev, alloc_size)) { - atomic_inc(&viodev->cmo.allocs_failed); - return 0; - } - - ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs); - - if (unlikely(!ret)) { - vio_cmo_dealloc(viodev, alloc_size); - atomic_inc(&viodev->cmo.allocs_failed); - return ret; - } - - for_each_sg(sglist, sgl, ret, count) - alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); - if (alloc_size) - vio_cmo_dealloc(viodev, alloc_size); - - return ret; -} - -static void vio_dma_iommu_unmap_sg(struct device *dev, - struct scatterlist *sglist, int nelems, - enum dma_data_direction direction, - unsigned long attrs) -{ - struct vio_dev *viodev = to_vio_dev(dev); - struct iommu_table *tbl; - struct scatterlist *sgl; - size_t alloc_size = 0; - int count; - - tbl = get_iommu_table_base(dev); - for_each_sg(sglist, sgl, nelems, count) - alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); - - dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); - - vio_cmo_dealloc(viodev, alloc_size); -} - -static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask) -{ - return dma_iommu_ops.dma_supported(dev, mask); -} - -static u64 vio_dma_get_required_mask(struct device *dev) -{ - return dma_iommu_ops.get_required_mask(dev); -} - -static struct dma_map_ops vio_dma_mapping_ops = { - .alloc = vio_dma_iommu_alloc_coherent, - .free = vio_dma_iommu_free_coherent, - .mmap = dma_direct_mmap_coherent, - .map_sg = vio_dma_iommu_map_sg, - .unmap_sg = vio_dma_iommu_unmap_sg, - .map_page = vio_dma_iommu_map_page, - .unmap_page = vio_dma_iommu_unmap_page, - .dma_supported = vio_dma_iommu_dma_supported, - .get_required_mask = vio_dma_get_required_mask, -}; - -/** - * vio_cmo_set_dev_desired - Set desired entitlement for a device - * - * @viodev: struct vio_dev for device to alter - * @desired: new desired entitlement level in bytes - * - * For use by devices to request a change to their entitlement at runtime or - * through sysfs. The desired entitlement level is changed and a balancing - * of system resources is scheduled to run in the future. - */ -void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) -{ - unsigned long flags; - struct vio_cmo_dev_entry *dev_ent; - int found = 0; - - if (!firmware_has_feature(FW_FEATURE_CMO)) - return; - - spin_lock_irqsave(&vio_cmo.lock, flags); - if (desired < VIO_CMO_MIN_ENT) - desired = VIO_CMO_MIN_ENT; - - /* - * Changes will not be made for devices not in the device list. - * If it is not in the device list, then no driver is loaded - * for the device and it can not receive entitlement. - */ - list_for_each_entry(dev_ent, &vio_cmo.device_list, list) - if (viodev == dev_ent->viodev) { - found = 1; - break; - } - if (!found) { - spin_unlock_irqrestore(&vio_cmo.lock, flags); - return; - } - - /* Increase/decrease in desired device entitlement */ - if (desired >= viodev->cmo.desired) { - /* Just bump the bus and device values prior to a balance*/ - vio_cmo.desired += desired - viodev->cmo.desired; - viodev->cmo.desired = desired; - } else { - /* Decrease bus and device values for desired entitlement */ - vio_cmo.desired -= viodev->cmo.desired - desired; - viodev->cmo.desired = desired; - /* - * If less entitlement is desired than current entitlement, move - * any reserve memory in the change region to the excess pool. - */ - if (viodev->cmo.entitled > desired) { - vio_cmo.reserve.size -= viodev->cmo.entitled - desired; - vio_cmo.excess.size += viodev->cmo.entitled - desired; - /* - * If entitlement moving from the reserve pool to the - * excess pool is currently unused, add to the excess - * free counter. - */ - if (viodev->cmo.allocated < viodev->cmo.entitled) - vio_cmo.excess.free += viodev->cmo.entitled - - max(viodev->cmo.allocated, desired); - viodev->cmo.entitled = desired; - } - } - schedule_delayed_work(&vio_cmo.balance_q, 0); - spin_unlock_irqrestore(&vio_cmo.lock, flags); -} - -/** - * vio_cmo_bus_probe - Handle CMO specific bus probe activities - * - * @viodev - Pointer to struct vio_dev for device - * - * Determine the devices IO memory entitlement needs, attempting - * to satisfy the system minimum entitlement at first and scheduling - * a balance operation to take care of the rest at a later time. - * - * Returns: 0 on success, -EINVAL when device doesn't support CMO, and - * -ENOMEM when entitlement is not available for device or - * device entry. - * - */ -static int vio_cmo_bus_probe(struct vio_dev *viodev) -{ - struct vio_cmo_dev_entry *dev_ent; - struct device *dev = &viodev->dev; - struct iommu_table *tbl; - struct vio_driver *viodrv = to_vio_driver(dev->driver); - unsigned long flags; - size_t size; - bool dma_capable = false; - - tbl = get_iommu_table_base(dev); - - /* A device requires entitlement if it has a DMA window property */ - switch (viodev->family) { - case VDEVICE: - if (of_get_property(viodev->dev.of_node, - "ibm,my-dma-window", NULL)) - dma_capable = true; - break; - case PFO: - dma_capable = false; - break; - default: - dev_warn(dev, "unknown device family: %d\n", viodev->family); - BUG(); - break; - } - - /* Configure entitlement for the device. */ - if (dma_capable) { - /* Check that the driver is CMO enabled and get desired DMA */ - if (!viodrv->get_desired_dma) { - dev_err(dev, "%s: device driver does not support CMO\n", - __func__); - return -EINVAL; - } - - viodev->cmo.desired = - IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl); - if (viodev->cmo.desired < VIO_CMO_MIN_ENT) - viodev->cmo.desired = VIO_CMO_MIN_ENT; - size = VIO_CMO_MIN_ENT; - - dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry), - GFP_KERNEL); - if (!dev_ent) - return -ENOMEM; - - dev_ent->viodev = viodev; - spin_lock_irqsave(&vio_cmo.lock, flags); - list_add(&dev_ent->list, &vio_cmo.device_list); - } else { - viodev->cmo.desired = 0; - size = 0; - spin_lock_irqsave(&vio_cmo.lock, flags); - } - - /* - * If the needs for vio_cmo.min have not changed since they - * were last set, the number of devices in the OF tree has - * been constant and the IO memory for this is already in - * the reserve pool. - */ - if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) * - VIO_CMO_MIN_ENT)) { - /* Updated desired entitlement if device requires it */ - if (size) - vio_cmo.desired += (viodev->cmo.desired - - VIO_CMO_MIN_ENT); - } else { - size_t tmp; - - tmp = vio_cmo.spare + vio_cmo.excess.free; - if (tmp < size) { - dev_err(dev, "%s: insufficient free " - "entitlement to add device. " - "Need %lu, have %lu\n", __func__, - size, (vio_cmo.spare + tmp)); - spin_unlock_irqrestore(&vio_cmo.lock, flags); - return -ENOMEM; - } - - /* Use excess pool first to fulfill request */ - tmp = min(size, vio_cmo.excess.free); - vio_cmo.excess.free -= tmp; - vio_cmo.excess.size -= tmp; - vio_cmo.reserve.size += tmp; - - /* Use spare if excess pool was insufficient */ - vio_cmo.spare -= size - tmp; - - /* Update bus accounting */ - vio_cmo.min += size; - vio_cmo.desired += viodev->cmo.desired; - } - spin_unlock_irqrestore(&vio_cmo.lock, flags); - return 0; -} - -/** - * vio_cmo_bus_remove - Handle CMO specific bus removal activities - * - * @viodev - Pointer to struct vio_dev for device - * - * Remove the device from the cmo device list. The minimum entitlement - * will be reserved for the device as long as it is in the system. The - * rest of the entitlement the device had been allocated will be returned - * to the system. - */ -static void vio_cmo_bus_remove(struct vio_dev *viodev) -{ - struct vio_cmo_dev_entry *dev_ent; - unsigned long flags; - size_t tmp; - - spin_lock_irqsave(&vio_cmo.lock, flags); - if (viodev->cmo.allocated) { - dev_err(&viodev->dev, "%s: device had %lu bytes of IO " - "allocated after remove operation.\n", - __func__, viodev->cmo.allocated); - BUG(); - } - - /* - * Remove the device from the device list being maintained for - * CMO enabled devices. - */ - list_for_each_entry(dev_ent, &vio_cmo.device_list, list) - if (viodev == dev_ent->viodev) { - list_del(&dev_ent->list); - kfree(dev_ent); - break; - } - - /* - * Devices may not require any entitlement and they do not need - * to be processed. Otherwise, return the device's entitlement - * back to the pools. - */ - if (viodev->cmo.entitled) { - /* - * This device has not yet left the OF tree, it's - * minimum entitlement remains in vio_cmo.min and - * vio_cmo.desired - */ - vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); - - /* - * Save min allocation for device in reserve as long - * as it exists in OF tree as determined by later - * balance operation - */ - viodev->cmo.entitled -= VIO_CMO_MIN_ENT; - - /* Replenish spare from freed reserve pool */ - if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) { - tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT - - vio_cmo.spare)); - vio_cmo.spare += tmp; - viodev->cmo.entitled -= tmp; - } - - /* Remaining reserve goes to excess pool */ - vio_cmo.excess.size += viodev->cmo.entitled; - vio_cmo.excess.free += viodev->cmo.entitled; - vio_cmo.reserve.size -= viodev->cmo.entitled; - - /* - * Until the device is removed it will keep a - * minimum entitlement; this will guarantee that - * a module unload/load will result in a success. - */ - viodev->cmo.entitled = VIO_CMO_MIN_ENT; - viodev->cmo.desired = VIO_CMO_MIN_ENT; - atomic_set(&viodev->cmo.allocs_failed, 0); - } - - spin_unlock_irqrestore(&vio_cmo.lock, flags); -} - -static void vio_cmo_set_dma_ops(struct vio_dev *viodev) -{ - set_dma_ops(&viodev->dev, &vio_dma_mapping_ops); -} - -/** - * vio_cmo_bus_init - CMO entitlement initialization at bus init time - * - * Set up the reserve and excess entitlement pools based on available - * system entitlement and the number of devices in the OF tree that - * require entitlement in the reserve pool. - */ -static void vio_cmo_bus_init(void) -{ - struct hvcall_mpp_data mpp_data; - int err; - - memset(&vio_cmo, 0, sizeof(struct vio_cmo)); - spin_lock_init(&vio_cmo.lock); - INIT_LIST_HEAD(&vio_cmo.device_list); - INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance); - - /* Get current system entitlement */ - err = h_get_mpp(&mpp_data); - - /* - * On failure, continue with entitlement set to 0, will panic() - * later when spare is reserved. - */ - if (err != H_SUCCESS) { - printk(KERN_ERR "%s: unable to determine system IO "\ - "entitlement. (%d)\n", __func__, err); - vio_cmo.entitled = 0; - } else { - vio_cmo.entitled = mpp_data.entitled_mem; - } - - /* Set reservation and check against entitlement */ - vio_cmo.spare = VIO_CMO_MIN_ENT; - vio_cmo.reserve.size = vio_cmo.spare; - vio_cmo.reserve.size += (vio_cmo_num_OF_devs() * - VIO_CMO_MIN_ENT); - if (vio_cmo.reserve.size > vio_cmo.entitled) { - printk(KERN_ERR "%s: insufficient system entitlement\n", - __func__); - panic("%s: Insufficient system entitlement", __func__); - } - - /* Set the remaining accounting variables */ - vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size; - vio_cmo.excess.free = vio_cmo.excess.size; - vio_cmo.min = vio_cmo.reserve.size; - vio_cmo.desired = vio_cmo.reserve.size; -} - -/* sysfs device functions and data structures for CMO */ - -#define viodev_cmo_rd_attr(name) \ -static ssize_t viodev_cmo_##name##_show(struct device *dev, \ - struct device_attribute *attr, \ - char *buf) \ -{ \ - return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \ -} - -static ssize_t viodev_cmo_allocs_failed_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct vio_dev *viodev = to_vio_dev(dev); - return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed)); -} - -static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct vio_dev *viodev = to_vio_dev(dev); - atomic_set(&viodev->cmo.allocs_failed, 0); - return count; -} - -static ssize_t viodev_cmo_desired_set(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct vio_dev *viodev = to_vio_dev(dev); - size_t new_desired; - int ret; - - ret = kstrtoul(buf, 10, &new_desired); - if (ret) - return ret; - - vio_cmo_set_dev_desired(viodev, new_desired); - return count; -} - -viodev_cmo_rd_attr(desired); -viodev_cmo_rd_attr(entitled); -viodev_cmo_rd_attr(allocated); - -static ssize_t name_show(struct device *, struct device_attribute *, char *); -static ssize_t devspec_show(struct device *, struct device_attribute *, char *); -static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, - char *buf); -static struct device_attribute vio_cmo_dev_attrs[] = { - __ATTR_RO(name), - __ATTR_RO(devspec), - __ATTR_RO(modalias), - __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, - viodev_cmo_desired_show, viodev_cmo_desired_set), - __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL), - __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL), - __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, - viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset), - __ATTR_NULL -}; - -/* sysfs bus functions and data structures for CMO */ - -#define viobus_cmo_rd_attr(name) \ -static ssize_t cmo_##name##_show(struct bus_type *bt, char *buf) \ -{ \ - return sprintf(buf, "%lu\n", vio_cmo.name); \ -} \ -static BUS_ATTR_RO(cmo_##name) - -#define viobus_cmo_pool_rd_attr(name, var) \ -static ssize_t \ -cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \ -{ \ - return sprintf(buf, "%lu\n", vio_cmo.name.var); \ -} \ -static BUS_ATTR_RO(cmo_##name##_##var) - -viobus_cmo_rd_attr(entitled); -viobus_cmo_rd_attr(spare); -viobus_cmo_rd_attr(min); -viobus_cmo_rd_attr(desired); -viobus_cmo_rd_attr(curr); -viobus_cmo_pool_rd_attr(reserve, size); -viobus_cmo_pool_rd_attr(excess, size); -viobus_cmo_pool_rd_attr(excess, free); - -static ssize_t cmo_high_show(struct bus_type *bt, char *buf) -{ - return sprintf(buf, "%lu\n", vio_cmo.high); -} - -static ssize_t cmo_high_store(struct bus_type *bt, const char *buf, - size_t count) -{ - unsigned long flags; - - spin_lock_irqsave(&vio_cmo.lock, flags); - vio_cmo.high = vio_cmo.curr; - spin_unlock_irqrestore(&vio_cmo.lock, flags); - - return count; -} -static BUS_ATTR_RW(cmo_high); - -static struct attribute *vio_bus_attrs[] = { - &bus_attr_cmo_entitled.attr, - &bus_attr_cmo_spare.attr, - &bus_attr_cmo_min.attr, - &bus_attr_cmo_desired.attr, - &bus_attr_cmo_curr.attr, - &bus_attr_cmo_high.attr, - &bus_attr_cmo_reserve_size.attr, - &bus_attr_cmo_excess_size.attr, - &bus_attr_cmo_excess_free.attr, - NULL, -}; -ATTRIBUTE_GROUPS(vio_bus); - -static void vio_cmo_sysfs_init(void) -{ - vio_bus_type.dev_attrs = vio_cmo_dev_attrs; - vio_bus_type.bus_groups = vio_bus_groups; -} -#else /* CONFIG_PPC_SMLPAR */ -int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } -void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} -static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } -static void vio_cmo_bus_remove(struct vio_dev *viodev) {} -static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} -static void vio_cmo_bus_init(void) {} -static void vio_cmo_sysfs_init(void) { } -#endif /* CONFIG_PPC_SMLPAR */ -EXPORT_SYMBOL(vio_cmo_entitlement_update); -EXPORT_SYMBOL(vio_cmo_set_dev_desired); - - -/* - * Platform Facilities Option (PFO) support - */ - -/** - * vio_h_cop_sync - Perform a synchronous PFO co-processor operation - * - * @vdev - Pointer to a struct vio_dev for device - * @op - Pointer to a struct vio_pfo_op for the operation parameters - * - * Calls the hypervisor to synchronously perform the PFO operation - * described in @op. In the case of a busy response from the hypervisor, - * the operation will be re-submitted indefinitely unless a non-zero timeout - * is specified or an error occurs. The timeout places a limit on when to - * stop re-submitting a operation, the total time can be exceeded if an - * operation is in progress. - * - * If op->hcall_ret is not NULL, this will be set to the return from the - * last h_cop_op call or it will be 0 if an error not involving the h_call - * was encountered. - * - * Returns: - * 0 on success, - * -EINVAL if the h_call fails due to an invalid parameter, - * -E2BIG if the h_call can not be performed synchronously, - * -EBUSY if a timeout is specified and has elapsed, - * -EACCES if the memory area for data/status has been rescinded, or - * -EPERM if a hardware fault has been indicated - */ -int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op) -{ - struct device *dev = &vdev->dev; - unsigned long deadline = 0; - long hret = 0; - int ret = 0; - - if (op->timeout) - deadline = jiffies + msecs_to_jiffies(op->timeout); - - while (true) { - hret = plpar_hcall_norets(H_COP, op->flags, - vdev->resource_id, - op->in, op->inlen, op->out, - op->outlen, op->csbcpb); - - if (hret == H_SUCCESS || - (hret != H_NOT_ENOUGH_RESOURCES && - hret != H_BUSY && hret != H_RESOURCE) || - (op->timeout && time_after(deadline, jiffies))) - break; - - dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret); - } - - switch (hret) { - case H_SUCCESS: - ret = 0; - break; - case H_OP_MODE: - case H_TOO_BIG: - ret = -E2BIG; - break; - case H_RESCINDED: - ret = -EACCES; - break; - case H_HARDWARE: - ret = -EPERM; - break; - case H_NOT_ENOUGH_RESOURCES: - case H_RESOURCE: - case H_BUSY: - ret = -EBUSY; - break; - default: - ret = -EINVAL; - break; - } - - if (ret) - dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n", - __func__, ret, hret); - - op->hcall_err = hret; - return ret; -} -EXPORT_SYMBOL(vio_h_cop_sync); - -static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) -{ - const __be32 *dma_window; - struct iommu_table *tbl; - unsigned long offset, size; - - dma_window = of_get_property(dev->dev.of_node, - "ibm,my-dma-window", NULL); - if (!dma_window) - return NULL; - - tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); - if (tbl == NULL) - return NULL; - - of_parse_dma_window(dev->dev.of_node, dma_window, - &tbl->it_index, &offset, &size); - - /* TCE table size - measured in tce entries */ - tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; - tbl->it_size = size >> tbl->it_page_shift; - /* offset for VIO should always be 0 */ - tbl->it_offset = offset >> tbl->it_page_shift; - tbl->it_busno = 0; - tbl->it_type = TCE_VB; - tbl->it_blocksize = 16; - - if (firmware_has_feature(FW_FEATURE_LPAR)) - tbl->it_ops = &iommu_table_lpar_multi_ops; - else - tbl->it_ops = &iommu_table_pseries_ops; - - return iommu_init_table(tbl, -1); -} - -/** - * vio_match_device: - Tell if a VIO device has a matching - * VIO device id structure. - * @ids: array of VIO device id structures to search in - * @dev: the VIO device structure to match against - * - * Used by a driver to check whether a VIO device present in the - * system is in its list of supported devices. Returns the matching - * vio_device_id structure or NULL if there is no match. - */ -static const struct vio_device_id *vio_match_device( - const struct vio_device_id *ids, const struct vio_dev *dev) -{ - while (ids->type[0] != '\0') { - if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && - of_device_is_compatible(dev->dev.of_node, - ids->compat)) - return ids; - ids++; - } - return NULL; -} - -/* - * Convert from struct device to struct vio_dev and pass to driver. - * dev->driver has already been set by generic code because vio_bus_match - * succeeded. - */ -static int vio_bus_probe(struct device *dev) -{ - struct vio_dev *viodev = to_vio_dev(dev); - struct vio_driver *viodrv = to_vio_driver(dev->driver); - const struct vio_device_id *id; - int error = -ENODEV; - - if (!viodrv->probe) - return error; - - id = vio_match_device(viodrv->id_table, viodev); - if (id) { - memset(&viodev->cmo, 0, sizeof(viodev->cmo)); - if (firmware_has_feature(FW_FEATURE_CMO)) { - error = vio_cmo_bus_probe(viodev); - if (error) - return error; - } - error = viodrv->probe(viodev, id); - if (error && firmware_has_feature(FW_FEATURE_CMO)) - vio_cmo_bus_remove(viodev); - } - - return error; -} - -/* convert from struct device to struct vio_dev and pass to driver. */ -static int vio_bus_remove(struct device *dev) -{ - struct vio_dev *viodev = to_vio_dev(dev); - struct vio_driver *viodrv = to_vio_driver(dev->driver); - struct device *devptr; - int ret = 1; - - /* - * Hold a reference to the device after the remove function is called - * to allow for CMO accounting cleanup for the device. - */ - devptr = get_device(dev); - - if (viodrv->remove) - ret = viodrv->remove(viodev); - - if (!ret && firmware_has_feature(FW_FEATURE_CMO)) - vio_cmo_bus_remove(viodev); - - put_device(devptr); - return ret; -} - -/** - * vio_register_driver: - Register a new vio driver - * @viodrv: The vio_driver structure to be registered. - */ -int __vio_register_driver(struct vio_driver *viodrv, struct module *owner, - const char *mod_name) -{ - pr_debug("%s: driver %s registering\n", __func__, viodrv->name); - - /* fill in 'struct driver' fields */ - viodrv->driver.name = viodrv->name; - viodrv->driver.pm = viodrv->pm; - viodrv->driver.bus = &vio_bus_type; - viodrv->driver.owner = owner; - viodrv->driver.mod_name = mod_name; - - return driver_register(&viodrv->driver); -} -EXPORT_SYMBOL(__vio_register_driver); - -/** - * vio_unregister_driver - Remove registration of vio driver. - * @viodrv: The vio_driver struct to be removed form registration - */ -void vio_unregister_driver(struct vio_driver *viodrv) -{ - driver_unregister(&viodrv->driver); -} -EXPORT_SYMBOL(vio_unregister_driver); - -/* vio_dev refcount hit 0 */ -static void vio_dev_release(struct device *dev) -{ - struct iommu_table *tbl = get_iommu_table_base(dev); - - if (tbl) - iommu_free_table(tbl, of_node_full_name(dev->of_node)); - of_node_put(dev->of_node); - kfree(to_vio_dev(dev)); -} - -/** - * vio_register_device_node: - Register a new vio device. - * @of_node: The OF node for this device. - * - * Creates and initializes a vio_dev structure from the data in - * of_node and adds it to the list of virtual devices. - * Returns a pointer to the created vio_dev or NULL if node has - * NULL device_type or compatible fields. - */ -struct vio_dev *vio_register_device_node(struct device_node *of_node) -{ - struct vio_dev *viodev; - struct device_node *parent_node; - const __be32 *prop; - enum vio_dev_family family; - const char *of_node_name = of_node->name ? of_node->name : ""; - - /* - * Determine if this node is a under the /vdevice node or under the - * /ibm,platform-facilities node. This decides the device's family. - */ - parent_node = of_get_parent(of_node); - if (parent_node) { - if (!strcmp(parent_node->full_name, "/ibm,platform-facilities")) - family = PFO; - else if (!strcmp(parent_node->full_name, "/vdevice")) - family = VDEVICE; - else { - pr_warn("%s: parent(%s) of %s not recognized.\n", - __func__, - parent_node->full_name, - of_node_name); - of_node_put(parent_node); - return NULL; - } - of_node_put(parent_node); - } else { - pr_warn("%s: could not determine the parent of node %s.\n", - __func__, of_node_name); - return NULL; - } - - if (family == PFO) { - if (of_get_property(of_node, "interrupt-controller", NULL)) { - pr_debug("%s: Skipping the interrupt controller %s.\n", - __func__, of_node_name); - return NULL; - } - } - - /* allocate a vio_dev for this node */ - viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL); - if (viodev == NULL) { - pr_warn("%s: allocation failure for VIO device.\n", __func__); - return NULL; - } - - /* we need the 'device_type' property, in order to match with drivers */ - viodev->family = family; - if (viodev->family == VDEVICE) { - unsigned int unit_address; - - if (of_node->type != NULL) - viodev->type = of_node->type; - else { - pr_warn("%s: node %s is missing the 'device_type' " - "property.\n", __func__, of_node_name); - goto out; - } - - prop = of_get_property(of_node, "reg", NULL); - if (prop == NULL) { - pr_warn("%s: node %s missing 'reg'\n", - __func__, of_node_name); - goto out; - } - unit_address = of_read_number(prop, 1); - dev_set_name(&viodev->dev, "%x", unit_address); - viodev->irq = irq_of_parse_and_map(of_node, 0); - viodev->unit_address = unit_address; - } else { - /* PFO devices need their resource_id for submitting COP_OPs - * This is an optional field for devices, but is required when - * performing synchronous ops */ - prop = of_get_property(of_node, "ibm,resource-id", NULL); - if (prop != NULL) - viodev->resource_id = of_read_number(prop, 1); - - dev_set_name(&viodev->dev, "%s", of_node_name); - viodev->type = of_node_name; - viodev->irq = 0; - } - - viodev->name = of_node->name; - viodev->dev.of_node = of_node_get(of_node); - - set_dev_node(&viodev->dev, of_node_to_nid(of_node)); - - /* init generic 'struct device' fields: */ - viodev->dev.parent = &vio_bus_device.dev; - viodev->dev.bus = &vio_bus_type; - viodev->dev.release = vio_dev_release; - - if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) { - if (firmware_has_feature(FW_FEATURE_CMO)) - vio_cmo_set_dma_ops(viodev); - else - set_dma_ops(&viodev->dev, &dma_iommu_ops); - - set_iommu_table_base(&viodev->dev, - vio_build_iommu_table(viodev)); - - /* needed to ensure proper operation of coherent allocations - * later, in case driver doesn't set it explicitly */ - viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64); - viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask; - } - - /* register with generic device framework */ - if (device_register(&viodev->dev)) { - printk(KERN_ERR "%s: failed to register device %s\n", - __func__, dev_name(&viodev->dev)); - put_device(&viodev->dev); - return NULL; - } - - return viodev; - -out: /* Use this exit point for any return prior to device_register */ - kfree(viodev); - - return NULL; -} -EXPORT_SYMBOL(vio_register_device_node); - -/* - * vio_bus_scan_for_devices - Scan OF and register each child device - * @root_name - OF node name for the root of the subtree to search. - * This must be non-NULL - * - * Starting from the root node provide, register the device node for - * each child beneath the root. - */ -static void vio_bus_scan_register_devices(char *root_name) -{ - struct device_node *node_root, *node_child; - - if (!root_name) - return; - - node_root = of_find_node_by_name(NULL, root_name); - if (node_root) { - - /* - * Create struct vio_devices for each virtual device in - * the device tree. Drivers will associate with them later. - */ - node_child = of_get_next_child(node_root, NULL); - while (node_child) { - vio_register_device_node(node_child); - node_child = of_get_next_child(node_root, node_child); - } - of_node_put(node_root); - } -} - -/** - * vio_bus_init: - Initialize the virtual IO bus - */ -static int __init vio_bus_init(void) -{ - int err; - - if (firmware_has_feature(FW_FEATURE_CMO)) - vio_cmo_sysfs_init(); - - err = bus_register(&vio_bus_type); - if (err) { - printk(KERN_ERR "failed to register VIO bus\n"); - return err; - } - - /* - * The fake parent of all vio devices, just to give us - * a nice directory - */ - err = device_register(&vio_bus_device.dev); - if (err) { - printk(KERN_WARNING "%s: device_register returned %i\n", - __func__, err); - return err; - } - - if (firmware_has_feature(FW_FEATURE_CMO)) - vio_cmo_bus_init(); - - return 0; -} -postcore_initcall(vio_bus_init); - -static int __init vio_device_init(void) -{ - vio_bus_scan_register_devices("vdevice"); - vio_bus_scan_register_devices("ibm,platform-facilities"); - - return 0; -} -device_initcall(vio_device_init); - -static ssize_t name_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%s\n", to_vio_dev(dev)->name); -} - -static ssize_t devspec_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct device_node *of_node = dev->of_node; - - return sprintf(buf, "%s\n", of_node_full_name(of_node)); -} - -static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - const struct vio_dev *vio_dev = to_vio_dev(dev); - struct device_node *dn; - const char *cp; - - dn = dev->of_node; - if (!dn) { - strcpy(buf, "\n"); - return strlen(buf); - } - cp = of_get_property(dn, "compatible", NULL); - if (!cp) { - strcpy(buf, "\n"); - return strlen(buf); - } - - return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); -} - -static struct device_attribute vio_dev_attrs[] = { - __ATTR_RO(name), - __ATTR_RO(devspec), - __ATTR_RO(modalias), - __ATTR_NULL -}; - -void vio_unregister_device(struct vio_dev *viodev) -{ - device_unregister(&viodev->dev); -} -EXPORT_SYMBOL(vio_unregister_device); - -static int vio_bus_match(struct device *dev, struct device_driver *drv) -{ - const struct vio_dev *vio_dev = to_vio_dev(dev); - struct vio_driver *vio_drv = to_vio_driver(drv); - const struct vio_device_id *ids = vio_drv->id_table; - - return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL); -} - -static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) -{ - const struct vio_dev *vio_dev = to_vio_dev(dev); - struct device_node *dn; - const char *cp; - - dn = dev->of_node; - if (!dn) - return -ENODEV; - cp = of_get_property(dn, "compatible", NULL); - if (!cp) - return -ENODEV; - - add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); - return 0; -} - -struct bus_type vio_bus_type = { - .name = "vio", - .dev_attrs = vio_dev_attrs, - .uevent = vio_hotplug, - .match = vio_bus_match, - .probe = vio_bus_probe, - .remove = vio_bus_remove, -}; - -/** - * vio_get_attribute: - get attribute for virtual device - * @vdev: The vio device to get property. - * @which: The property/attribute to be extracted. - * @length: Pointer to length of returned data size (unused if NULL). - * - * Calls prom.c's of_get_property() to return the value of the - * attribute specified by @which -*/ -const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) -{ - return of_get_property(vdev->dev.of_node, which, length); -} -EXPORT_SYMBOL(vio_get_attribute); - -#ifdef CONFIG_PPC_PSERIES -/* vio_find_name() - internal because only vio.c knows how we formatted the - * kobject name - */ -static struct vio_dev *vio_find_name(const char *name) -{ - struct device *found; - - found = bus_find_device_by_name(&vio_bus_type, NULL, name); - if (!found) - return NULL; - - return to_vio_dev(found); -} - -/** - * vio_find_node - find an already-registered vio_dev - * @vnode: device_node of the virtual device we're looking for - * - * Takes a reference to the embedded struct device which needs to be dropped - * after use. - */ -struct vio_dev *vio_find_node(struct device_node *vnode) -{ - char kobj_name[20]; - struct device_node *vnode_parent; - const char *dev_type; - - vnode_parent = of_get_parent(vnode); - if (!vnode_parent) - return NULL; - - dev_type = of_get_property(vnode_parent, "device_type", NULL); - of_node_put(vnode_parent); - if (!dev_type) - return NULL; - - /* construct the kobject name from the device node */ - if (!strcmp(dev_type, "vdevice")) { - const __be32 *prop; - - prop = of_get_property(vnode, "reg", NULL); - if (!prop) - return NULL; - snprintf(kobj_name, sizeof(kobj_name), "%x", - (uint32_t)of_read_number(prop, 1)); - } else if (!strcmp(dev_type, "ibm,platform-facilities")) - snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name); - else - return NULL; - - return vio_find_name(kobj_name); -} -EXPORT_SYMBOL(vio_find_node); - -int vio_enable_interrupts(struct vio_dev *dev) -{ - int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); - if (rc != H_SUCCESS) - printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); - return rc; -} -EXPORT_SYMBOL(vio_enable_interrupts); - -int vio_disable_interrupts(struct vio_dev *dev) -{ - int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); - if (rc != H_SUCCESS) - printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); - return rc; -} -EXPORT_SYMBOL(vio_disable_interrupts); -#endif /* CONFIG_PPC_PSERIES */ diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index fbdae8377b71..eae86c35e4c6 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -168,11 +168,6 @@ config MPIC_BROKEN_REGREAD well, but enabling it uses about 8KB of memory to keep copies of the register contents in software. -config IBMVIO - depends on PPC_PSERIES - bool - default y - config IBMEBUS depends on PPC_PSERIES bool "Support for GX bus based adapters" diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index bec90fb30425..f7d78b81951d 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -127,3 +127,8 @@ config HV_PERF_CTRS systems. 24x7 is available on Power 8 systems. If unsure, select Y. + +config IBMVIO + depends on PPC_PSERIES + bool + default y diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index fedc2ccf029d..85ba00233fb0 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_DTL) += dtl.o obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o obj-$(CONFIG_LPARCFG) += lparcfg.o +obj-$(CONFIG_IBMVIO) += vio.o ifeq ($(CONFIG_PPC_PSERIES),y) obj-$(CONFIG_SUSPEND) += suspend.o diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c new file mode 100644 index 000000000000..2c8fb3ec989e --- /dev/null +++ b/arch/powerpc/platforms/pseries/vio.c @@ -0,0 +1,1705 @@ +/* + * IBM PowerPC Virtual I/O Infrastructure Support. + * + * Copyright (c) 2003,2008 IBM Corp. + * Dave Engebretsen engebret@us.ibm.com + * Santiago Leon santil@us.ibm.com + * Hollis Blanchard + * Stephen Rothwell + * Robert Jennings + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +static struct vio_dev vio_bus_device = { /* fake "parent" device */ + .name = "vio", + .type = "", + .dev.init_name = "vio", + .dev.bus = &vio_bus_type, +}; + +#ifdef CONFIG_PPC_SMLPAR +/** + * vio_cmo_pool - A pool of IO memory for CMO use + * + * @size: The size of the pool in bytes + * @free: The amount of free memory in the pool + */ +struct vio_cmo_pool { + size_t size; + size_t free; +}; + +/* How many ms to delay queued balance work */ +#define VIO_CMO_BALANCE_DELAY 100 + +/* Portion out IO memory to CMO devices by this chunk size */ +#define VIO_CMO_BALANCE_CHUNK 131072 + +/** + * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement + * + * @vio_dev: struct vio_dev pointer + * @list: pointer to other devices on bus that are being tracked + */ +struct vio_cmo_dev_entry { + struct vio_dev *viodev; + struct list_head list; +}; + +/** + * vio_cmo - VIO bus accounting structure for CMO entitlement + * + * @lock: spinlock for entire structure + * @balance_q: work queue for balancing system entitlement + * @device_list: list of CMO-enabled devices requiring entitlement + * @entitled: total system entitlement in bytes + * @reserve: pool of memory from which devices reserve entitlement, incl. spare + * @excess: pool of excess entitlement not needed for device reserves or spare + * @spare: IO memory for device hotplug functionality + * @min: minimum necessary for system operation + * @desired: desired memory for system operation + * @curr: bytes currently allocated + * @high: high water mark for IO data usage + */ +static struct vio_cmo { + spinlock_t lock; + struct delayed_work balance_q; + struct list_head device_list; + size_t entitled; + struct vio_cmo_pool reserve; + struct vio_cmo_pool excess; + size_t spare; + size_t min; + size_t desired; + size_t curr; + size_t high; +} vio_cmo; + +/** + * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows + */ +static int vio_cmo_num_OF_devs(void) +{ + struct device_node *node_vroot; + int count = 0; + + /* + * Count the number of vdevice entries with an + * ibm,my-dma-window OF property + */ + node_vroot = of_find_node_by_name(NULL, "vdevice"); + if (node_vroot) { + struct device_node *of_node; + struct property *prop; + + for_each_child_of_node(node_vroot, of_node) { + prop = of_find_property(of_node, "ibm,my-dma-window", + NULL); + if (prop) + count++; + } + } + of_node_put(node_vroot); + return count; +} + +/** + * vio_cmo_alloc - allocate IO memory for CMO-enable devices + * + * @viodev: VIO device requesting IO memory + * @size: size of allocation requested + * + * Allocations come from memory reserved for the devices and any excess + * IO memory available to all devices. The spare pool used to service + * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be + * made available. + * + * Return codes: + * 0 for successful allocation and -ENOMEM for a failure + */ +static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size) +{ + unsigned long flags; + size_t reserve_free = 0; + size_t excess_free = 0; + int ret = -ENOMEM; + + spin_lock_irqsave(&vio_cmo.lock, flags); + + /* Determine the amount of free entitlement available in reserve */ + if (viodev->cmo.entitled > viodev->cmo.allocated) + reserve_free = viodev->cmo.entitled - viodev->cmo.allocated; + + /* If spare is not fulfilled, the excess pool can not be used. */ + if (vio_cmo.spare >= VIO_CMO_MIN_ENT) + excess_free = vio_cmo.excess.free; + + /* The request can be satisfied */ + if ((reserve_free + excess_free) >= size) { + vio_cmo.curr += size; + if (vio_cmo.curr > vio_cmo.high) + vio_cmo.high = vio_cmo.curr; + viodev->cmo.allocated += size; + size -= min(reserve_free, size); + vio_cmo.excess.free -= size; + ret = 0; + } + + spin_unlock_irqrestore(&vio_cmo.lock, flags); + return ret; +} + +/** + * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices + * @viodev: VIO device freeing IO memory + * @size: size of deallocation + * + * IO memory is freed by the device back to the correct memory pools. + * The spare pool is replenished first from either memory pool, then + * the reserve pool is used to reduce device entitlement, the excess + * pool is used to increase the reserve pool toward the desired entitlement + * target, and then the remaining memory is returned to the pools. + * + */ +static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) +{ + unsigned long flags; + size_t spare_needed = 0; + size_t excess_freed = 0; + size_t reserve_freed = size; + size_t tmp; + int balance = 0; + + spin_lock_irqsave(&vio_cmo.lock, flags); + vio_cmo.curr -= size; + + /* Amount of memory freed from the excess pool */ + if (viodev->cmo.allocated > viodev->cmo.entitled) { + excess_freed = min(reserve_freed, (viodev->cmo.allocated - + viodev->cmo.entitled)); + reserve_freed -= excess_freed; + } + + /* Remove allocation from device */ + viodev->cmo.allocated -= (reserve_freed + excess_freed); + + /* Spare is a subset of the reserve pool, replenish it first. */ + spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare; + + /* + * Replenish the spare in the reserve pool from the excess pool. + * This moves entitlement into the reserve pool. + */ + if (spare_needed && excess_freed) { + tmp = min(excess_freed, spare_needed); + vio_cmo.excess.size -= tmp; + vio_cmo.reserve.size += tmp; + vio_cmo.spare += tmp; + excess_freed -= tmp; + spare_needed -= tmp; + balance = 1; + } + + /* + * Replenish the spare in the reserve pool from the reserve pool. + * This removes entitlement from the device down to VIO_CMO_MIN_ENT, + * if needed, and gives it to the spare pool. The amount of used + * memory in this pool does not change. + */ + if (spare_needed && reserve_freed) { + tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); + + vio_cmo.spare += tmp; + viodev->cmo.entitled -= tmp; + reserve_freed -= tmp; + spare_needed -= tmp; + balance = 1; + } + + /* + * Increase the reserve pool until the desired allocation is met. + * Move an allocation freed from the excess pool into the reserve + * pool and schedule a balance operation. + */ + if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) { + tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size)); + + vio_cmo.excess.size -= tmp; + vio_cmo.reserve.size += tmp; + excess_freed -= tmp; + balance = 1; + } + + /* Return memory from the excess pool to that pool */ + if (excess_freed) + vio_cmo.excess.free += excess_freed; + + if (balance) + schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY); + spin_unlock_irqrestore(&vio_cmo.lock, flags); +} + +/** + * vio_cmo_entitlement_update - Manage system entitlement changes + * + * @new_entitlement: new system entitlement to attempt to accommodate + * + * Increases in entitlement will be used to fulfill the spare entitlement + * and the rest is given to the excess pool. Decreases, if they are + * possible, come from the excess pool and from unused device entitlement + * + * Returns: 0 on success, -ENOMEM when change can not be made + */ +int vio_cmo_entitlement_update(size_t new_entitlement) +{ + struct vio_dev *viodev; + struct vio_cmo_dev_entry *dev_ent; + unsigned long flags; + size_t avail, delta, tmp; + + spin_lock_irqsave(&vio_cmo.lock, flags); + + /* Entitlement increases */ + if (new_entitlement > vio_cmo.entitled) { + delta = new_entitlement - vio_cmo.entitled; + + /* Fulfill spare allocation */ + if (vio_cmo.spare < VIO_CMO_MIN_ENT) { + tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare)); + vio_cmo.spare += tmp; + vio_cmo.reserve.size += tmp; + delta -= tmp; + } + + /* Remaining new allocation goes to the excess pool */ + vio_cmo.entitled += delta; + vio_cmo.excess.size += delta; + vio_cmo.excess.free += delta; + + goto out; + } + + /* Entitlement decreases */ + delta = vio_cmo.entitled - new_entitlement; + avail = vio_cmo.excess.free; + + /* + * Need to check how much unused entitlement each device can + * sacrifice to fulfill entitlement change. + */ + list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { + if (avail >= delta) + break; + + viodev = dev_ent->viodev; + if ((viodev->cmo.entitled > viodev->cmo.allocated) && + (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) + avail += viodev->cmo.entitled - + max_t(size_t, viodev->cmo.allocated, + VIO_CMO_MIN_ENT); + } + + if (delta <= avail) { + vio_cmo.entitled -= delta; + + /* Take entitlement from the excess pool first */ + tmp = min(vio_cmo.excess.free, delta); + vio_cmo.excess.size -= tmp; + vio_cmo.excess.free -= tmp; + delta -= tmp; + + /* + * Remove all but VIO_CMO_MIN_ENT bytes from devices + * until entitlement change is served + */ + list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { + if (!delta) + break; + + viodev = dev_ent->viodev; + tmp = 0; + if ((viodev->cmo.entitled > viodev->cmo.allocated) && + (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) + tmp = viodev->cmo.entitled - + max_t(size_t, viodev->cmo.allocated, + VIO_CMO_MIN_ENT); + viodev->cmo.entitled -= min(tmp, delta); + delta -= min(tmp, delta); + } + } else { + spin_unlock_irqrestore(&vio_cmo.lock, flags); + return -ENOMEM; + } + +out: + schedule_delayed_work(&vio_cmo.balance_q, 0); + spin_unlock_irqrestore(&vio_cmo.lock, flags); + return 0; +} + +/** + * vio_cmo_balance - Balance entitlement among devices + * + * @work: work queue structure for this operation + * + * Any system entitlement above the minimum needed for devices, or + * already allocated to devices, can be distributed to the devices. + * The list of devices is iterated through to recalculate the desired + * entitlement level and to determine how much entitlement above the + * minimum entitlement is allocated to devices. + * + * Small chunks of the available entitlement are given to devices until + * their requirements are fulfilled or there is no entitlement left to give. + * Upon completion sizes of the reserve and excess pools are calculated. + * + * The system minimum entitlement level is also recalculated here. + * Entitlement will be reserved for devices even after vio_bus_remove to + * accommodate reloading the driver. The OF tree is walked to count the + * number of devices present and this will remove entitlement for devices + * that have actually left the system after having vio_bus_remove called. + */ +static void vio_cmo_balance(struct work_struct *work) +{ + struct vio_cmo *cmo; + struct vio_dev *viodev; + struct vio_cmo_dev_entry *dev_ent; + unsigned long flags; + size_t avail = 0, level, chunk, need; + int devcount = 0, fulfilled; + + cmo = container_of(work, struct vio_cmo, balance_q.work); + + spin_lock_irqsave(&vio_cmo.lock, flags); + + /* Calculate minimum entitlement and fulfill spare */ + cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT; + BUG_ON(cmo->min > cmo->entitled); + cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min)); + cmo->min += cmo->spare; + cmo->desired = cmo->min; + + /* + * Determine how much entitlement is available and reset device + * entitlements + */ + avail = cmo->entitled - cmo->spare; + list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { + viodev = dev_ent->viodev; + devcount++; + viodev->cmo.entitled = VIO_CMO_MIN_ENT; + cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); + avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); + } + + /* + * Having provided each device with the minimum entitlement, loop + * over the devices portioning out the remaining entitlement + * until there is nothing left. + */ + level = VIO_CMO_MIN_ENT; + while (avail) { + fulfilled = 0; + list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { + viodev = dev_ent->viodev; + + if (viodev->cmo.desired <= level) { + fulfilled++; + continue; + } + + /* + * Give the device up to VIO_CMO_BALANCE_CHUNK + * bytes of entitlement, but do not exceed the + * desired level of entitlement for the device. + */ + chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK); + chunk = min(chunk, (viodev->cmo.desired - + viodev->cmo.entitled)); + viodev->cmo.entitled += chunk; + + /* + * If the memory for this entitlement increase was + * already allocated to the device it does not come + * from the available pool being portioned out. + */ + need = max(viodev->cmo.allocated, viodev->cmo.entitled)- + max(viodev->cmo.allocated, level); + avail -= need; + + } + if (fulfilled == devcount) + break; + level += VIO_CMO_BALANCE_CHUNK; + } + + /* Calculate new reserve and excess pool sizes */ + cmo->reserve.size = cmo->min; + cmo->excess.free = 0; + cmo->excess.size = 0; + need = 0; + list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { + viodev = dev_ent->viodev; + /* Calculated reserve size above the minimum entitlement */ + if (viodev->cmo.entitled) + cmo->reserve.size += (viodev->cmo.entitled - + VIO_CMO_MIN_ENT); + /* Calculated used excess entitlement */ + if (viodev->cmo.allocated > viodev->cmo.entitled) + need += viodev->cmo.allocated - viodev->cmo.entitled; + } + cmo->excess.size = cmo->entitled - cmo->reserve.size; + cmo->excess.free = cmo->excess.size - need; + + cancel_delayed_work(to_delayed_work(work)); + spin_unlock_irqrestore(&vio_cmo.lock, flags); +} + +static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + unsigned long attrs) +{ + struct vio_dev *viodev = to_vio_dev(dev); + void *ret; + + if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) { + atomic_inc(&viodev->cmo.allocs_failed); + return NULL; + } + + ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs); + if (unlikely(ret == NULL)) { + vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); + atomic_inc(&viodev->cmo.allocs_failed); + } + + return ret; +} + +static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + unsigned long attrs) +{ + struct vio_dev *viodev = to_vio_dev(dev); + + dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs); + + vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); +} + +static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction, + unsigned long attrs) +{ + struct vio_dev *viodev = to_vio_dev(dev); + struct iommu_table *tbl; + dma_addr_t ret = DMA_ERROR_CODE; + + tbl = get_iommu_table_base(dev); + if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) { + atomic_inc(&viodev->cmo.allocs_failed); + return ret; + } + + ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); + if (unlikely(dma_mapping_error(dev, ret))) { + vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); + atomic_inc(&viodev->cmo.allocs_failed); + } + + return ret; +} + +static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, + enum dma_data_direction direction, + unsigned long attrs) +{ + struct vio_dev *viodev = to_vio_dev(dev); + struct iommu_table *tbl; + + tbl = get_iommu_table_base(dev); + dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); + + vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); +} + +static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction direction, + unsigned long attrs) +{ + struct vio_dev *viodev = to_vio_dev(dev); + struct iommu_table *tbl; + struct scatterlist *sgl; + int ret, count; + size_t alloc_size = 0; + + tbl = get_iommu_table_base(dev); + for_each_sg(sglist, sgl, nelems, count) + alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); + + if (vio_cmo_alloc(viodev, alloc_size)) { + atomic_inc(&viodev->cmo.allocs_failed); + return 0; + } + + ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs); + + if (unlikely(!ret)) { + vio_cmo_dealloc(viodev, alloc_size); + atomic_inc(&viodev->cmo.allocs_failed); + return ret; + } + + for_each_sg(sglist, sgl, ret, count) + alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); + if (alloc_size) + vio_cmo_dealloc(viodev, alloc_size); + + return ret; +} + +static void vio_dma_iommu_unmap_sg(struct device *dev, + struct scatterlist *sglist, int nelems, + enum dma_data_direction direction, + unsigned long attrs) +{ + struct vio_dev *viodev = to_vio_dev(dev); + struct iommu_table *tbl; + struct scatterlist *sgl; + size_t alloc_size = 0; + int count; + + tbl = get_iommu_table_base(dev); + for_each_sg(sglist, sgl, nelems, count) + alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); + + dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); + + vio_cmo_dealloc(viodev, alloc_size); +} + +static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask) +{ + return dma_iommu_ops.dma_supported(dev, mask); +} + +static u64 vio_dma_get_required_mask(struct device *dev) +{ + return dma_iommu_ops.get_required_mask(dev); +} + +static struct dma_map_ops vio_dma_mapping_ops = { + .alloc = vio_dma_iommu_alloc_coherent, + .free = vio_dma_iommu_free_coherent, + .mmap = dma_direct_mmap_coherent, + .map_sg = vio_dma_iommu_map_sg, + .unmap_sg = vio_dma_iommu_unmap_sg, + .map_page = vio_dma_iommu_map_page, + .unmap_page = vio_dma_iommu_unmap_page, + .dma_supported = vio_dma_iommu_dma_supported, + .get_required_mask = vio_dma_get_required_mask, +}; + +/** + * vio_cmo_set_dev_desired - Set desired entitlement for a device + * + * @viodev: struct vio_dev for device to alter + * @desired: new desired entitlement level in bytes + * + * For use by devices to request a change to their entitlement at runtime or + * through sysfs. The desired entitlement level is changed and a balancing + * of system resources is scheduled to run in the future. + */ +void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) +{ + unsigned long flags; + struct vio_cmo_dev_entry *dev_ent; + int found = 0; + + if (!firmware_has_feature(FW_FEATURE_CMO)) + return; + + spin_lock_irqsave(&vio_cmo.lock, flags); + if (desired < VIO_CMO_MIN_ENT) + desired = VIO_CMO_MIN_ENT; + + /* + * Changes will not be made for devices not in the device list. + * If it is not in the device list, then no driver is loaded + * for the device and it can not receive entitlement. + */ + list_for_each_entry(dev_ent, &vio_cmo.device_list, list) + if (viodev == dev_ent->viodev) { + found = 1; + break; + } + if (!found) { + spin_unlock_irqrestore(&vio_cmo.lock, flags); + return; + } + + /* Increase/decrease in desired device entitlement */ + if (desired >= viodev->cmo.desired) { + /* Just bump the bus and device values prior to a balance*/ + vio_cmo.desired += desired - viodev->cmo.desired; + viodev->cmo.desired = desired; + } else { + /* Decrease bus and device values for desired entitlement */ + vio_cmo.desired -= viodev->cmo.desired - desired; + viodev->cmo.desired = desired; + /* + * If less entitlement is desired than current entitlement, move + * any reserve memory in the change region to the excess pool. + */ + if (viodev->cmo.entitled > desired) { + vio_cmo.reserve.size -= viodev->cmo.entitled - desired; + vio_cmo.excess.size += viodev->cmo.entitled - desired; + /* + * If entitlement moving from the reserve pool to the + * excess pool is currently unused, add to the excess + * free counter. + */ + if (viodev->cmo.allocated < viodev->cmo.entitled) + vio_cmo.excess.free += viodev->cmo.entitled - + max(viodev->cmo.allocated, desired); + viodev->cmo.entitled = desired; + } + } + schedule_delayed_work(&vio_cmo.balance_q, 0); + spin_unlock_irqrestore(&vio_cmo.lock, flags); +} + +/** + * vio_cmo_bus_probe - Handle CMO specific bus probe activities + * + * @viodev - Pointer to struct vio_dev for device + * + * Determine the devices IO memory entitlement needs, attempting + * to satisfy the system minimum entitlement at first and scheduling + * a balance operation to take care of the rest at a later time. + * + * Returns: 0 on success, -EINVAL when device doesn't support CMO, and + * -ENOMEM when entitlement is not available for device or + * device entry. + * + */ +static int vio_cmo_bus_probe(struct vio_dev *viodev) +{ + struct vio_cmo_dev_entry *dev_ent; + struct device *dev = &viodev->dev; + struct iommu_table *tbl; + struct vio_driver *viodrv = to_vio_driver(dev->driver); + unsigned long flags; + size_t size; + bool dma_capable = false; + + tbl = get_iommu_table_base(dev); + + /* A device requires entitlement if it has a DMA window property */ + switch (viodev->family) { + case VDEVICE: + if (of_get_property(viodev->dev.of_node, + "ibm,my-dma-window", NULL)) + dma_capable = true; + break; + case PFO: + dma_capable = false; + break; + default: + dev_warn(dev, "unknown device family: %d\n", viodev->family); + BUG(); + break; + } + + /* Configure entitlement for the device. */ + if (dma_capable) { + /* Check that the driver is CMO enabled and get desired DMA */ + if (!viodrv->get_desired_dma) { + dev_err(dev, "%s: device driver does not support CMO\n", + __func__); + return -EINVAL; + } + + viodev->cmo.desired = + IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl); + if (viodev->cmo.desired < VIO_CMO_MIN_ENT) + viodev->cmo.desired = VIO_CMO_MIN_ENT; + size = VIO_CMO_MIN_ENT; + + dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry), + GFP_KERNEL); + if (!dev_ent) + return -ENOMEM; + + dev_ent->viodev = viodev; + spin_lock_irqsave(&vio_cmo.lock, flags); + list_add(&dev_ent->list, &vio_cmo.device_list); + } else { + viodev->cmo.desired = 0; + size = 0; + spin_lock_irqsave(&vio_cmo.lock, flags); + } + + /* + * If the needs for vio_cmo.min have not changed since they + * were last set, the number of devices in the OF tree has + * been constant and the IO memory for this is already in + * the reserve pool. + */ + if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) * + VIO_CMO_MIN_ENT)) { + /* Updated desired entitlement if device requires it */ + if (size) + vio_cmo.desired += (viodev->cmo.desired - + VIO_CMO_MIN_ENT); + } else { + size_t tmp; + + tmp = vio_cmo.spare + vio_cmo.excess.free; + if (tmp < size) { + dev_err(dev, "%s: insufficient free " + "entitlement to add device. " + "Need %lu, have %lu\n", __func__, + size, (vio_cmo.spare + tmp)); + spin_unlock_irqrestore(&vio_cmo.lock, flags); + return -ENOMEM; + } + + /* Use excess pool first to fulfill request */ + tmp = min(size, vio_cmo.excess.free); + vio_cmo.excess.free -= tmp; + vio_cmo.excess.size -= tmp; + vio_cmo.reserve.size += tmp; + + /* Use spare if excess pool was insufficient */ + vio_cmo.spare -= size - tmp; + + /* Update bus accounting */ + vio_cmo.min += size; + vio_cmo.desired += viodev->cmo.desired; + } + spin_unlock_irqrestore(&vio_cmo.lock, flags); + return 0; +} + +/** + * vio_cmo_bus_remove - Handle CMO specific bus removal activities + * + * @viodev - Pointer to struct vio_dev for device + * + * Remove the device from the cmo device list. The minimum entitlement + * will be reserved for the device as long as it is in the system. The + * rest of the entitlement the device had been allocated will be returned + * to the system. + */ +static void vio_cmo_bus_remove(struct vio_dev *viodev) +{ + struct vio_cmo_dev_entry *dev_ent; + unsigned long flags; + size_t tmp; + + spin_lock_irqsave(&vio_cmo.lock, flags); + if (viodev->cmo.allocated) { + dev_err(&viodev->dev, "%s: device had %lu bytes of IO " + "allocated after remove operation.\n", + __func__, viodev->cmo.allocated); + BUG(); + } + + /* + * Remove the device from the device list being maintained for + * CMO enabled devices. + */ + list_for_each_entry(dev_ent, &vio_cmo.device_list, list) + if (viodev == dev_ent->viodev) { + list_del(&dev_ent->list); + kfree(dev_ent); + break; + } + + /* + * Devices may not require any entitlement and they do not need + * to be processed. Otherwise, return the device's entitlement + * back to the pools. + */ + if (viodev->cmo.entitled) { + /* + * This device has not yet left the OF tree, it's + * minimum entitlement remains in vio_cmo.min and + * vio_cmo.desired + */ + vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); + + /* + * Save min allocation for device in reserve as long + * as it exists in OF tree as determined by later + * balance operation + */ + viodev->cmo.entitled -= VIO_CMO_MIN_ENT; + + /* Replenish spare from freed reserve pool */ + if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) { + tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT - + vio_cmo.spare)); + vio_cmo.spare += tmp; + viodev->cmo.entitled -= tmp; + } + + /* Remaining reserve goes to excess pool */ + vio_cmo.excess.size += viodev->cmo.entitled; + vio_cmo.excess.free += viodev->cmo.entitled; + vio_cmo.reserve.size -= viodev->cmo.entitled; + + /* + * Until the device is removed it will keep a + * minimum entitlement; this will guarantee that + * a module unload/load will result in a success. + */ + viodev->cmo.entitled = VIO_CMO_MIN_ENT; + viodev->cmo.desired = VIO_CMO_MIN_ENT; + atomic_set(&viodev->cmo.allocs_failed, 0); + } + + spin_unlock_irqrestore(&vio_cmo.lock, flags); +} + +static void vio_cmo_set_dma_ops(struct vio_dev *viodev) +{ + set_dma_ops(&viodev->dev, &vio_dma_mapping_ops); +} + +/** + * vio_cmo_bus_init - CMO entitlement initialization at bus init time + * + * Set up the reserve and excess entitlement pools based on available + * system entitlement and the number of devices in the OF tree that + * require entitlement in the reserve pool. + */ +static void vio_cmo_bus_init(void) +{ + struct hvcall_mpp_data mpp_data; + int err; + + memset(&vio_cmo, 0, sizeof(struct vio_cmo)); + spin_lock_init(&vio_cmo.lock); + INIT_LIST_HEAD(&vio_cmo.device_list); + INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance); + + /* Get current system entitlement */ + err = h_get_mpp(&mpp_data); + + /* + * On failure, continue with entitlement set to 0, will panic() + * later when spare is reserved. + */ + if (err != H_SUCCESS) { + printk(KERN_ERR "%s: unable to determine system IO "\ + "entitlement. (%d)\n", __func__, err); + vio_cmo.entitled = 0; + } else { + vio_cmo.entitled = mpp_data.entitled_mem; + } + + /* Set reservation and check against entitlement */ + vio_cmo.spare = VIO_CMO_MIN_ENT; + vio_cmo.reserve.size = vio_cmo.spare; + vio_cmo.reserve.size += (vio_cmo_num_OF_devs() * + VIO_CMO_MIN_ENT); + if (vio_cmo.reserve.size > vio_cmo.entitled) { + printk(KERN_ERR "%s: insufficient system entitlement\n", + __func__); + panic("%s: Insufficient system entitlement", __func__); + } + + /* Set the remaining accounting variables */ + vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size; + vio_cmo.excess.free = vio_cmo.excess.size; + vio_cmo.min = vio_cmo.reserve.size; + vio_cmo.desired = vio_cmo.reserve.size; +} + +/* sysfs device functions and data structures for CMO */ + +#define viodev_cmo_rd_attr(name) \ +static ssize_t viodev_cmo_##name##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \ +} + +static ssize_t viodev_cmo_allocs_failed_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct vio_dev *viodev = to_vio_dev(dev); + return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed)); +} + +static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct vio_dev *viodev = to_vio_dev(dev); + atomic_set(&viodev->cmo.allocs_failed, 0); + return count; +} + +static ssize_t viodev_cmo_desired_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct vio_dev *viodev = to_vio_dev(dev); + size_t new_desired; + int ret; + + ret = kstrtoul(buf, 10, &new_desired); + if (ret) + return ret; + + vio_cmo_set_dev_desired(viodev, new_desired); + return count; +} + +viodev_cmo_rd_attr(desired); +viodev_cmo_rd_attr(entitled); +viodev_cmo_rd_attr(allocated); + +static ssize_t name_show(struct device *, struct device_attribute *, char *); +static ssize_t devspec_show(struct device *, struct device_attribute *, char *); +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, + char *buf); +static struct device_attribute vio_cmo_dev_attrs[] = { + __ATTR_RO(name), + __ATTR_RO(devspec), + __ATTR_RO(modalias), + __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, + viodev_cmo_desired_show, viodev_cmo_desired_set), + __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL), + __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL), + __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, + viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset), + __ATTR_NULL +}; + +/* sysfs bus functions and data structures for CMO */ + +#define viobus_cmo_rd_attr(name) \ +static ssize_t cmo_##name##_show(struct bus_type *bt, char *buf) \ +{ \ + return sprintf(buf, "%lu\n", vio_cmo.name); \ +} \ +static BUS_ATTR_RO(cmo_##name) + +#define viobus_cmo_pool_rd_attr(name, var) \ +static ssize_t \ +cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \ +{ \ + return sprintf(buf, "%lu\n", vio_cmo.name.var); \ +} \ +static BUS_ATTR_RO(cmo_##name##_##var) + +viobus_cmo_rd_attr(entitled); +viobus_cmo_rd_attr(spare); +viobus_cmo_rd_attr(min); +viobus_cmo_rd_attr(desired); +viobus_cmo_rd_attr(curr); +viobus_cmo_pool_rd_attr(reserve, size); +viobus_cmo_pool_rd_attr(excess, size); +viobus_cmo_pool_rd_attr(excess, free); + +static ssize_t cmo_high_show(struct bus_type *bt, char *buf) +{ + return sprintf(buf, "%lu\n", vio_cmo.high); +} + +static ssize_t cmo_high_store(struct bus_type *bt, const char *buf, + size_t count) +{ + unsigned long flags; + + spin_lock_irqsave(&vio_cmo.lock, flags); + vio_cmo.high = vio_cmo.curr; + spin_unlock_irqrestore(&vio_cmo.lock, flags); + + return count; +} +static BUS_ATTR_RW(cmo_high); + +static struct attribute *vio_bus_attrs[] = { + &bus_attr_cmo_entitled.attr, + &bus_attr_cmo_spare.attr, + &bus_attr_cmo_min.attr, + &bus_attr_cmo_desired.attr, + &bus_attr_cmo_curr.attr, + &bus_attr_cmo_high.attr, + &bus_attr_cmo_reserve_size.attr, + &bus_attr_cmo_excess_size.attr, + &bus_attr_cmo_excess_free.attr, + NULL, +}; +ATTRIBUTE_GROUPS(vio_bus); + +static void vio_cmo_sysfs_init(void) +{ + vio_bus_type.dev_attrs = vio_cmo_dev_attrs; + vio_bus_type.bus_groups = vio_bus_groups; +} +#else /* CONFIG_PPC_SMLPAR */ +int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } +void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} +static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } +static void vio_cmo_bus_remove(struct vio_dev *viodev) {} +static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} +static void vio_cmo_bus_init(void) {} +static void vio_cmo_sysfs_init(void) { } +#endif /* CONFIG_PPC_SMLPAR */ +EXPORT_SYMBOL(vio_cmo_entitlement_update); +EXPORT_SYMBOL(vio_cmo_set_dev_desired); + + +/* + * Platform Facilities Option (PFO) support + */ + +/** + * vio_h_cop_sync - Perform a synchronous PFO co-processor operation + * + * @vdev - Pointer to a struct vio_dev for device + * @op - Pointer to a struct vio_pfo_op for the operation parameters + * + * Calls the hypervisor to synchronously perform the PFO operation + * described in @op. In the case of a busy response from the hypervisor, + * the operation will be re-submitted indefinitely unless a non-zero timeout + * is specified or an error occurs. The timeout places a limit on when to + * stop re-submitting a operation, the total time can be exceeded if an + * operation is in progress. + * + * If op->hcall_ret is not NULL, this will be set to the return from the + * last h_cop_op call or it will be 0 if an error not involving the h_call + * was encountered. + * + * Returns: + * 0 on success, + * -EINVAL if the h_call fails due to an invalid parameter, + * -E2BIG if the h_call can not be performed synchronously, + * -EBUSY if a timeout is specified and has elapsed, + * -EACCES if the memory area for data/status has been rescinded, or + * -EPERM if a hardware fault has been indicated + */ +int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op) +{ + struct device *dev = &vdev->dev; + unsigned long deadline = 0; + long hret = 0; + int ret = 0; + + if (op->timeout) + deadline = jiffies + msecs_to_jiffies(op->timeout); + + while (true) { + hret = plpar_hcall_norets(H_COP, op->flags, + vdev->resource_id, + op->in, op->inlen, op->out, + op->outlen, op->csbcpb); + + if (hret == H_SUCCESS || + (hret != H_NOT_ENOUGH_RESOURCES && + hret != H_BUSY && hret != H_RESOURCE) || + (op->timeout && time_after(deadline, jiffies))) + break; + + dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret); + } + + switch (hret) { + case H_SUCCESS: + ret = 0; + break; + case H_OP_MODE: + case H_TOO_BIG: + ret = -E2BIG; + break; + case H_RESCINDED: + ret = -EACCES; + break; + case H_HARDWARE: + ret = -EPERM; + break; + case H_NOT_ENOUGH_RESOURCES: + case H_RESOURCE: + case H_BUSY: + ret = -EBUSY; + break; + default: + ret = -EINVAL; + break; + } + + if (ret) + dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n", + __func__, ret, hret); + + op->hcall_err = hret; + return ret; +} +EXPORT_SYMBOL(vio_h_cop_sync); + +static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) +{ + const __be32 *dma_window; + struct iommu_table *tbl; + unsigned long offset, size; + + dma_window = of_get_property(dev->dev.of_node, + "ibm,my-dma-window", NULL); + if (!dma_window) + return NULL; + + tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); + if (tbl == NULL) + return NULL; + + of_parse_dma_window(dev->dev.of_node, dma_window, + &tbl->it_index, &offset, &size); + + /* TCE table size - measured in tce entries */ + tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; + tbl->it_size = size >> tbl->it_page_shift; + /* offset for VIO should always be 0 */ + tbl->it_offset = offset >> tbl->it_page_shift; + tbl->it_busno = 0; + tbl->it_type = TCE_VB; + tbl->it_blocksize = 16; + + if (firmware_has_feature(FW_FEATURE_LPAR)) + tbl->it_ops = &iommu_table_lpar_multi_ops; + else + tbl->it_ops = &iommu_table_pseries_ops; + + return iommu_init_table(tbl, -1); +} + +/** + * vio_match_device: - Tell if a VIO device has a matching + * VIO device id structure. + * @ids: array of VIO device id structures to search in + * @dev: the VIO device structure to match against + * + * Used by a driver to check whether a VIO device present in the + * system is in its list of supported devices. Returns the matching + * vio_device_id structure or NULL if there is no match. + */ +static const struct vio_device_id *vio_match_device( + const struct vio_device_id *ids, const struct vio_dev *dev) +{ + while (ids->type[0] != '\0') { + if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && + of_device_is_compatible(dev->dev.of_node, + ids->compat)) + return ids; + ids++; + } + return NULL; +} + +/* + * Convert from struct device to struct vio_dev and pass to driver. + * dev->driver has already been set by generic code because vio_bus_match + * succeeded. + */ +static int vio_bus_probe(struct device *dev) +{ + struct vio_dev *viodev = to_vio_dev(dev); + struct vio_driver *viodrv = to_vio_driver(dev->driver); + const struct vio_device_id *id; + int error = -ENODEV; + + if (!viodrv->probe) + return error; + + id = vio_match_device(viodrv->id_table, viodev); + if (id) { + memset(&viodev->cmo, 0, sizeof(viodev->cmo)); + if (firmware_has_feature(FW_FEATURE_CMO)) { + error = vio_cmo_bus_probe(viodev); + if (error) + return error; + } + error = viodrv->probe(viodev, id); + if (error && firmware_has_feature(FW_FEATURE_CMO)) + vio_cmo_bus_remove(viodev); + } + + return error; +} + +/* convert from struct device to struct vio_dev and pass to driver. */ +static int vio_bus_remove(struct device *dev) +{ + struct vio_dev *viodev = to_vio_dev(dev); + struct vio_driver *viodrv = to_vio_driver(dev->driver); + struct device *devptr; + int ret = 1; + + /* + * Hold a reference to the device after the remove function is called + * to allow for CMO accounting cleanup for the device. + */ + devptr = get_device(dev); + + if (viodrv->remove) + ret = viodrv->remove(viodev); + + if (!ret && firmware_has_feature(FW_FEATURE_CMO)) + vio_cmo_bus_remove(viodev); + + put_device(devptr); + return ret; +} + +/** + * vio_register_driver: - Register a new vio driver + * @viodrv: The vio_driver structure to be registered. + */ +int __vio_register_driver(struct vio_driver *viodrv, struct module *owner, + const char *mod_name) +{ + pr_debug("%s: driver %s registering\n", __func__, viodrv->name); + + /* fill in 'struct driver' fields */ + viodrv->driver.name = viodrv->name; + viodrv->driver.pm = viodrv->pm; + viodrv->driver.bus = &vio_bus_type; + viodrv->driver.owner = owner; + viodrv->driver.mod_name = mod_name; + + return driver_register(&viodrv->driver); +} +EXPORT_SYMBOL(__vio_register_driver); + +/** + * vio_unregister_driver - Remove registration of vio driver. + * @viodrv: The vio_driver struct to be removed form registration + */ +void vio_unregister_driver(struct vio_driver *viodrv) +{ + driver_unregister(&viodrv->driver); +} +EXPORT_SYMBOL(vio_unregister_driver); + +/* vio_dev refcount hit 0 */ +static void vio_dev_release(struct device *dev) +{ + struct iommu_table *tbl = get_iommu_table_base(dev); + + if (tbl) + iommu_free_table(tbl, of_node_full_name(dev->of_node)); + of_node_put(dev->of_node); + kfree(to_vio_dev(dev)); +} + +/** + * vio_register_device_node: - Register a new vio device. + * @of_node: The OF node for this device. + * + * Creates and initializes a vio_dev structure from the data in + * of_node and adds it to the list of virtual devices. + * Returns a pointer to the created vio_dev or NULL if node has + * NULL device_type or compatible fields. + */ +struct vio_dev *vio_register_device_node(struct device_node *of_node) +{ + struct vio_dev *viodev; + struct device_node *parent_node; + const __be32 *prop; + enum vio_dev_family family; + const char *of_node_name = of_node->name ? of_node->name : ""; + + /* + * Determine if this node is a under the /vdevice node or under the + * /ibm,platform-facilities node. This decides the device's family. + */ + parent_node = of_get_parent(of_node); + if (parent_node) { + if (!strcmp(parent_node->full_name, "/ibm,platform-facilities")) + family = PFO; + else if (!strcmp(parent_node->full_name, "/vdevice")) + family = VDEVICE; + else { + pr_warn("%s: parent(%s) of %s not recognized.\n", + __func__, + parent_node->full_name, + of_node_name); + of_node_put(parent_node); + return NULL; + } + of_node_put(parent_node); + } else { + pr_warn("%s: could not determine the parent of node %s.\n", + __func__, of_node_name); + return NULL; + } + + if (family == PFO) { + if (of_get_property(of_node, "interrupt-controller", NULL)) { + pr_debug("%s: Skipping the interrupt controller %s.\n", + __func__, of_node_name); + return NULL; + } + } + + /* allocate a vio_dev for this node */ + viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL); + if (viodev == NULL) { + pr_warn("%s: allocation failure for VIO device.\n", __func__); + return NULL; + } + + /* we need the 'device_type' property, in order to match with drivers */ + viodev->family = family; + if (viodev->family == VDEVICE) { + unsigned int unit_address; + + if (of_node->type != NULL) + viodev->type = of_node->type; + else { + pr_warn("%s: node %s is missing the 'device_type' " + "property.\n", __func__, of_node_name); + goto out; + } + + prop = of_get_property(of_node, "reg", NULL); + if (prop == NULL) { + pr_warn("%s: node %s missing 'reg'\n", + __func__, of_node_name); + goto out; + } + unit_address = of_read_number(prop, 1); + dev_set_name(&viodev->dev, "%x", unit_address); + viodev->irq = irq_of_parse_and_map(of_node, 0); + viodev->unit_address = unit_address; + } else { + /* PFO devices need their resource_id for submitting COP_OPs + * This is an optional field for devices, but is required when + * performing synchronous ops */ + prop = of_get_property(of_node, "ibm,resource-id", NULL); + if (prop != NULL) + viodev->resource_id = of_read_number(prop, 1); + + dev_set_name(&viodev->dev, "%s", of_node_name); + viodev->type = of_node_name; + viodev->irq = 0; + } + + viodev->name = of_node->name; + viodev->dev.of_node = of_node_get(of_node); + + set_dev_node(&viodev->dev, of_node_to_nid(of_node)); + + /* init generic 'struct device' fields: */ + viodev->dev.parent = &vio_bus_device.dev; + viodev->dev.bus = &vio_bus_type; + viodev->dev.release = vio_dev_release; + + if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) { + if (firmware_has_feature(FW_FEATURE_CMO)) + vio_cmo_set_dma_ops(viodev); + else + set_dma_ops(&viodev->dev, &dma_iommu_ops); + + set_iommu_table_base(&viodev->dev, + vio_build_iommu_table(viodev)); + + /* needed to ensure proper operation of coherent allocations + * later, in case driver doesn't set it explicitly */ + viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64); + viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask; + } + + /* register with generic device framework */ + if (device_register(&viodev->dev)) { + printk(KERN_ERR "%s: failed to register device %s\n", + __func__, dev_name(&viodev->dev)); + put_device(&viodev->dev); + return NULL; + } + + return viodev; + +out: /* Use this exit point for any return prior to device_register */ + kfree(viodev); + + return NULL; +} +EXPORT_SYMBOL(vio_register_device_node); + +/* + * vio_bus_scan_for_devices - Scan OF and register each child device + * @root_name - OF node name for the root of the subtree to search. + * This must be non-NULL + * + * Starting from the root node provide, register the device node for + * each child beneath the root. + */ +static void vio_bus_scan_register_devices(char *root_name) +{ + struct device_node *node_root, *node_child; + + if (!root_name) + return; + + node_root = of_find_node_by_name(NULL, root_name); + if (node_root) { + + /* + * Create struct vio_devices for each virtual device in + * the device tree. Drivers will associate with them later. + */ + node_child = of_get_next_child(node_root, NULL); + while (node_child) { + vio_register_device_node(node_child); + node_child = of_get_next_child(node_root, node_child); + } + of_node_put(node_root); + } +} + +/** + * vio_bus_init: - Initialize the virtual IO bus + */ +static int __init vio_bus_init(void) +{ + int err; + + if (firmware_has_feature(FW_FEATURE_CMO)) + vio_cmo_sysfs_init(); + + err = bus_register(&vio_bus_type); + if (err) { + printk(KERN_ERR "failed to register VIO bus\n"); + return err; + } + + /* + * The fake parent of all vio devices, just to give us + * a nice directory + */ + err = device_register(&vio_bus_device.dev); + if (err) { + printk(KERN_WARNING "%s: device_register returned %i\n", + __func__, err); + return err; + } + + if (firmware_has_feature(FW_FEATURE_CMO)) + vio_cmo_bus_init(); + + return 0; +} +postcore_initcall(vio_bus_init); + +static int __init vio_device_init(void) +{ + vio_bus_scan_register_devices("vdevice"); + vio_bus_scan_register_devices("ibm,platform-facilities"); + + return 0; +} +device_initcall(vio_device_init); + +static ssize_t name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", to_vio_dev(dev)->name); +} + +static ssize_t devspec_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct device_node *of_node = dev->of_node; + + return sprintf(buf, "%s\n", of_node_full_name(of_node)); +} + +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + const struct vio_dev *vio_dev = to_vio_dev(dev); + struct device_node *dn; + const char *cp; + + dn = dev->of_node; + if (!dn) { + strcpy(buf, "\n"); + return strlen(buf); + } + cp = of_get_property(dn, "compatible", NULL); + if (!cp) { + strcpy(buf, "\n"); + return strlen(buf); + } + + return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); +} + +static struct device_attribute vio_dev_attrs[] = { + __ATTR_RO(name), + __ATTR_RO(devspec), + __ATTR_RO(modalias), + __ATTR_NULL +}; + +void vio_unregister_device(struct vio_dev *viodev) +{ + device_unregister(&viodev->dev); +} +EXPORT_SYMBOL(vio_unregister_device); + +static int vio_bus_match(struct device *dev, struct device_driver *drv) +{ + const struct vio_dev *vio_dev = to_vio_dev(dev); + struct vio_driver *vio_drv = to_vio_driver(drv); + const struct vio_device_id *ids = vio_drv->id_table; + + return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL); +} + +static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) +{ + const struct vio_dev *vio_dev = to_vio_dev(dev); + struct device_node *dn; + const char *cp; + + dn = dev->of_node; + if (!dn) + return -ENODEV; + cp = of_get_property(dn, "compatible", NULL); + if (!cp) + return -ENODEV; + + add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); + return 0; +} + +struct bus_type vio_bus_type = { + .name = "vio", + .dev_attrs = vio_dev_attrs, + .uevent = vio_hotplug, + .match = vio_bus_match, + .probe = vio_bus_probe, + .remove = vio_bus_remove, +}; + +/** + * vio_get_attribute: - get attribute for virtual device + * @vdev: The vio device to get property. + * @which: The property/attribute to be extracted. + * @length: Pointer to length of returned data size (unused if NULL). + * + * Calls prom.c's of_get_property() to return the value of the + * attribute specified by @which +*/ +const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) +{ + return of_get_property(vdev->dev.of_node, which, length); +} +EXPORT_SYMBOL(vio_get_attribute); + +#ifdef CONFIG_PPC_PSERIES +/* vio_find_name() - internal because only vio.c knows how we formatted the + * kobject name + */ +static struct vio_dev *vio_find_name(const char *name) +{ + struct device *found; + + found = bus_find_device_by_name(&vio_bus_type, NULL, name); + if (!found) + return NULL; + + return to_vio_dev(found); +} + +/** + * vio_find_node - find an already-registered vio_dev + * @vnode: device_node of the virtual device we're looking for + * + * Takes a reference to the embedded struct device which needs to be dropped + * after use. + */ +struct vio_dev *vio_find_node(struct device_node *vnode) +{ + char kobj_name[20]; + struct device_node *vnode_parent; + const char *dev_type; + + vnode_parent = of_get_parent(vnode); + if (!vnode_parent) + return NULL; + + dev_type = of_get_property(vnode_parent, "device_type", NULL); + of_node_put(vnode_parent); + if (!dev_type) + return NULL; + + /* construct the kobject name from the device node */ + if (!strcmp(dev_type, "vdevice")) { + const __be32 *prop; + + prop = of_get_property(vnode, "reg", NULL); + if (!prop) + return NULL; + snprintf(kobj_name, sizeof(kobj_name), "%x", + (uint32_t)of_read_number(prop, 1)); + } else if (!strcmp(dev_type, "ibm,platform-facilities")) + snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name); + else + return NULL; + + return vio_find_name(kobj_name); +} +EXPORT_SYMBOL(vio_find_node); + +int vio_enable_interrupts(struct vio_dev *dev) +{ + int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); + if (rc != H_SUCCESS) + printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); + return rc; +} +EXPORT_SYMBOL(vio_enable_interrupts); + +int vio_disable_interrupts(struct vio_dev *dev) +{ + int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); + if (rc != H_SUCCESS) + printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); + return rc; +} +EXPORT_SYMBOL(vio_disable_interrupts); +#endif /* CONFIG_PPC_PSERIES */ -- cgit From 30757de2030d1729437419293996b51919ddc63a Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 15 Nov 2016 14:47:43 +1100 Subject: powerpc/pseries: Move ibmebus.c into platforms pseries ibmebus.c is pseries only code, so move it in there. Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/Makefile | 1 - arch/powerpc/kernel/ibmebus.c | 767 ------------------------------- arch/powerpc/platforms/Kconfig | 6 - arch/powerpc/platforms/pseries/Kconfig | 6 + arch/powerpc/platforms/pseries/Makefile | 1 + arch/powerpc/platforms/pseries/ibmebus.c | 767 +++++++++++++++++++++++++++++++ 6 files changed, 774 insertions(+), 774 deletions(-) delete mode 100644 arch/powerpc/kernel/ibmebus.c create mode 100644 arch/powerpc/platforms/pseries/ibmebus.c diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 9c57ebf61e4d..26b5a5e02e69 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -58,7 +58,6 @@ obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y) obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o obj-$(CONFIG_RTAS_PROC) += rtas-proc.o -obj-$(CONFIG_IBMEBUS) += ibmebus.o obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \ eeh_driver.o eeh_event.o eeh_sysfs.o obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c deleted file mode 100644 index 35f5244782d9..000000000000 --- a/arch/powerpc/kernel/ibmebus.c +++ /dev/null @@ -1,767 +0,0 @@ -/* - * IBM PowerPC IBM eBus Infrastructure Support. - * - * Copyright (c) 2005 IBM Corporation - * Joachim Fenkes - * Heiko J Schick - * - * All rights reserved. - * - * This source code is distributed under a dual license of GPL v2.0 and OpenIB - * BSD. - * - * OpenIB BSD License - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials - * provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER - * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static struct device ibmebus_bus_device = { /* fake "parent" device */ - .init_name = "ibmebus", -}; - -struct bus_type ibmebus_bus_type; - -/* These devices will automatically be added to the bus during init */ -static const struct of_device_id ibmebus_matches[] __initconst = { - { .compatible = "IBM,lhca" }, - { .compatible = "IBM,lhea" }, - {}, -}; - -static void *ibmebus_alloc_coherent(struct device *dev, - size_t size, - dma_addr_t *dma_handle, - gfp_t flag, - unsigned long attrs) -{ - void *mem; - - mem = kmalloc(size, flag); - *dma_handle = (dma_addr_t)mem; - - return mem; -} - -static void ibmebus_free_coherent(struct device *dev, - size_t size, void *vaddr, - dma_addr_t dma_handle, - unsigned long attrs) -{ - kfree(vaddr); -} - -static dma_addr_t ibmebus_map_page(struct device *dev, - struct page *page, - unsigned long offset, - size_t size, - enum dma_data_direction direction, - unsigned long attrs) -{ - return (dma_addr_t)(page_address(page) + offset); -} - -static void ibmebus_unmap_page(struct device *dev, - dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction, - unsigned long attrs) -{ - return; -} - -static int ibmebus_map_sg(struct device *dev, - struct scatterlist *sgl, - int nents, enum dma_data_direction direction, - unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) { - sg->dma_address = (dma_addr_t) sg_virt(sg); - sg->dma_length = sg->length; - } - - return nents; -} - -static void ibmebus_unmap_sg(struct device *dev, - struct scatterlist *sg, - int nents, enum dma_data_direction direction, - unsigned long attrs) -{ - return; -} - -static int ibmebus_dma_supported(struct device *dev, u64 mask) -{ - return mask == DMA_BIT_MASK(64); -} - -static u64 ibmebus_dma_get_required_mask(struct device *dev) -{ - return DMA_BIT_MASK(64); -} - -static struct dma_map_ops ibmebus_dma_ops = { - .alloc = ibmebus_alloc_coherent, - .free = ibmebus_free_coherent, - .map_sg = ibmebus_map_sg, - .unmap_sg = ibmebus_unmap_sg, - .dma_supported = ibmebus_dma_supported, - .get_required_mask = ibmebus_dma_get_required_mask, - .map_page = ibmebus_map_page, - .unmap_page = ibmebus_unmap_page, -}; - -static int ibmebus_match_path(struct device *dev, void *data) -{ - struct device_node *dn = to_platform_device(dev)->dev.of_node; - return (dn->full_name && - (strcasecmp((char *)data, dn->full_name) == 0)); -} - -static int ibmebus_match_node(struct device *dev, void *data) -{ - return to_platform_device(dev)->dev.of_node == data; -} - -static int ibmebus_create_device(struct device_node *dn) -{ - struct platform_device *dev; - int ret; - - dev = of_device_alloc(dn, NULL, &ibmebus_bus_device); - if (!dev) - return -ENOMEM; - - dev->dev.bus = &ibmebus_bus_type; - dev->dev.archdata.dma_ops = &ibmebus_dma_ops; - - ret = of_device_add(dev); - if (ret) - platform_device_put(dev); - return ret; -} - -static int ibmebus_create_devices(const struct of_device_id *matches) -{ - struct device_node *root, *child; - struct device *dev; - int ret = 0; - - root = of_find_node_by_path("/"); - - for_each_child_of_node(root, child) { - if (!of_match_node(matches, child)) - continue; - - dev = bus_find_device(&ibmebus_bus_type, NULL, child, - ibmebus_match_node); - if (dev) { - put_device(dev); - continue; - } - - ret = ibmebus_create_device(child); - if (ret) { - printk(KERN_ERR "%s: failed to create device (%i)", - __func__, ret); - of_node_put(child); - break; - } - } - - of_node_put(root); - return ret; -} - -int ibmebus_register_driver(struct platform_driver *drv) -{ - /* If the driver uses devices that ibmebus doesn't know, add them */ - ibmebus_create_devices(drv->driver.of_match_table); - - drv->driver.bus = &ibmebus_bus_type; - return driver_register(&drv->driver); -} -EXPORT_SYMBOL(ibmebus_register_driver); - -void ibmebus_unregister_driver(struct platform_driver *drv) -{ - driver_unregister(&drv->driver); -} -EXPORT_SYMBOL(ibmebus_unregister_driver); - -int ibmebus_request_irq(u32 ist, irq_handler_t handler, - unsigned long irq_flags, const char *devname, - void *dev_id) -{ - unsigned int irq = irq_create_mapping(NULL, ist); - - if (!irq) - return -EINVAL; - - return request_irq(irq, handler, irq_flags, devname, dev_id); -} -EXPORT_SYMBOL(ibmebus_request_irq); - -void ibmebus_free_irq(u32 ist, void *dev_id) -{ - unsigned int irq = irq_find_mapping(NULL, ist); - - free_irq(irq, dev_id); - irq_dispose_mapping(irq); -} -EXPORT_SYMBOL(ibmebus_free_irq); - -static char *ibmebus_chomp(const char *in, size_t count) -{ - char *out = kmalloc(count + 1, GFP_KERNEL); - - if (!out) - return NULL; - - memcpy(out, in, count); - out[count] = '\0'; - if (out[count - 1] == '\n') - out[count - 1] = '\0'; - - return out; -} - -static ssize_t ibmebus_store_probe(struct bus_type *bus, - const char *buf, size_t count) -{ - struct device_node *dn = NULL; - struct device *dev; - char *path; - ssize_t rc = 0; - - path = ibmebus_chomp(buf, count); - if (!path) - return -ENOMEM; - - dev = bus_find_device(&ibmebus_bus_type, NULL, path, - ibmebus_match_path); - if (dev) { - put_device(dev); - printk(KERN_WARNING "%s: %s has already been probed\n", - __func__, path); - rc = -EEXIST; - goto out; - } - - if ((dn = of_find_node_by_path(path))) { - rc = ibmebus_create_device(dn); - of_node_put(dn); - } else { - printk(KERN_WARNING "%s: no such device node: %s\n", - __func__, path); - rc = -ENODEV; - } - -out: - kfree(path); - if (rc) - return rc; - return count; -} -static BUS_ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe); - -static ssize_t ibmebus_store_remove(struct bus_type *bus, - const char *buf, size_t count) -{ - struct device *dev; - char *path; - - path = ibmebus_chomp(buf, count); - if (!path) - return -ENOMEM; - - if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path, - ibmebus_match_path))) { - of_device_unregister(to_platform_device(dev)); - put_device(dev); - - kfree(path); - return count; - } else { - printk(KERN_WARNING "%s: %s not on the bus\n", - __func__, path); - - kfree(path); - return -ENODEV; - } -} -static BUS_ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove); - -static struct attribute *ibmbus_bus_attrs[] = { - &bus_attr_probe.attr, - &bus_attr_remove.attr, - NULL, -}; -ATTRIBUTE_GROUPS(ibmbus_bus); - -static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) -{ - const struct of_device_id *matches = drv->of_match_table; - - if (!matches) - return 0; - - return of_match_device(matches, dev) != NULL; -} - -static int ibmebus_bus_device_probe(struct device *dev) -{ - int error = -ENODEV; - struct platform_driver *drv; - struct platform_device *of_dev; - - drv = to_platform_driver(dev->driver); - of_dev = to_platform_device(dev); - - if (!drv->probe) - return error; - - of_dev_get(of_dev); - - if (of_driver_match_device(dev, dev->driver)) - error = drv->probe(of_dev); - if (error) - of_dev_put(of_dev); - - return error; -} - -static int ibmebus_bus_device_remove(struct device *dev) -{ - struct platform_device *of_dev = to_platform_device(dev); - struct platform_driver *drv = to_platform_driver(dev->driver); - - if (dev->driver && drv->remove) - drv->remove(of_dev); - return 0; -} - -static void ibmebus_bus_device_shutdown(struct device *dev) -{ - struct platform_device *of_dev = to_platform_device(dev); - struct platform_driver *drv = to_platform_driver(dev->driver); - - if (dev->driver && drv->shutdown) - drv->shutdown(of_dev); -} - -/* - * ibmebus_bus_device_attrs - */ -static ssize_t devspec_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *ofdev; - - ofdev = to_platform_device(dev); - return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name); -} - -static ssize_t name_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *ofdev; - - ofdev = to_platform_device(dev); - return sprintf(buf, "%s\n", ofdev->dev.of_node->name); -} - -static ssize_t modalias_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2); - buf[len] = '\n'; - buf[len+1] = 0; - return len+1; -} - -static struct device_attribute ibmebus_bus_device_attrs[] = { - __ATTR_RO(devspec), - __ATTR_RO(name), - __ATTR_RO(modalias), - __ATTR_NULL -}; - -#ifdef CONFIG_PM_SLEEP -static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg) -{ - struct platform_device *of_dev = to_platform_device(dev); - struct platform_driver *drv = to_platform_driver(dev->driver); - int ret = 0; - - if (dev->driver && drv->suspend) - ret = drv->suspend(of_dev, mesg); - return ret; -} - -static int ibmebus_bus_legacy_resume(struct device *dev) -{ - struct platform_device *of_dev = to_platform_device(dev); - struct platform_driver *drv = to_platform_driver(dev->driver); - int ret = 0; - - if (dev->driver && drv->resume) - ret = drv->resume(of_dev); - return ret; -} - -static int ibmebus_bus_pm_prepare(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (drv && drv->pm && drv->pm->prepare) - ret = drv->pm->prepare(dev); - - return ret; -} - -static void ibmebus_bus_pm_complete(struct device *dev) -{ - struct device_driver *drv = dev->driver; - - if (drv && drv->pm && drv->pm->complete) - drv->pm->complete(dev); -} - -#ifdef CONFIG_SUSPEND - -static int ibmebus_bus_pm_suspend(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->suspend) - ret = drv->pm->suspend(dev); - } else { - ret = ibmebus_bus_legacy_suspend(dev, PMSG_SUSPEND); - } - - return ret; -} - -static int ibmebus_bus_pm_suspend_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->suspend_noirq) - ret = drv->pm->suspend_noirq(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_resume(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->resume) - ret = drv->pm->resume(dev); - } else { - ret = ibmebus_bus_legacy_resume(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_resume_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->resume_noirq) - ret = drv->pm->resume_noirq(dev); - } - - return ret; -} - -#else /* !CONFIG_SUSPEND */ - -#define ibmebus_bus_pm_suspend NULL -#define ibmebus_bus_pm_resume NULL -#define ibmebus_bus_pm_suspend_noirq NULL -#define ibmebus_bus_pm_resume_noirq NULL - -#endif /* !CONFIG_SUSPEND */ - -#ifdef CONFIG_HIBERNATE_CALLBACKS - -static int ibmebus_bus_pm_freeze(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->freeze) - ret = drv->pm->freeze(dev); - } else { - ret = ibmebus_bus_legacy_suspend(dev, PMSG_FREEZE); - } - - return ret; -} - -static int ibmebus_bus_pm_freeze_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->freeze_noirq) - ret = drv->pm->freeze_noirq(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_thaw(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->thaw) - ret = drv->pm->thaw(dev); - } else { - ret = ibmebus_bus_legacy_resume(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_thaw_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->thaw_noirq) - ret = drv->pm->thaw_noirq(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_poweroff(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->poweroff) - ret = drv->pm->poweroff(dev); - } else { - ret = ibmebus_bus_legacy_suspend(dev, PMSG_HIBERNATE); - } - - return ret; -} - -static int ibmebus_bus_pm_poweroff_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->poweroff_noirq) - ret = drv->pm->poweroff_noirq(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_restore(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->restore) - ret = drv->pm->restore(dev); - } else { - ret = ibmebus_bus_legacy_resume(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_restore_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->restore_noirq) - ret = drv->pm->restore_noirq(dev); - } - - return ret; -} - -#else /* !CONFIG_HIBERNATE_CALLBACKS */ - -#define ibmebus_bus_pm_freeze NULL -#define ibmebus_bus_pm_thaw NULL -#define ibmebus_bus_pm_poweroff NULL -#define ibmebus_bus_pm_restore NULL -#define ibmebus_bus_pm_freeze_noirq NULL -#define ibmebus_bus_pm_thaw_noirq NULL -#define ibmebus_bus_pm_poweroff_noirq NULL -#define ibmebus_bus_pm_restore_noirq NULL - -#endif /* !CONFIG_HIBERNATE_CALLBACKS */ - -static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { - .prepare = ibmebus_bus_pm_prepare, - .complete = ibmebus_bus_pm_complete, - .suspend = ibmebus_bus_pm_suspend, - .resume = ibmebus_bus_pm_resume, - .freeze = ibmebus_bus_pm_freeze, - .thaw = ibmebus_bus_pm_thaw, - .poweroff = ibmebus_bus_pm_poweroff, - .restore = ibmebus_bus_pm_restore, - .suspend_noirq = ibmebus_bus_pm_suspend_noirq, - .resume_noirq = ibmebus_bus_pm_resume_noirq, - .freeze_noirq = ibmebus_bus_pm_freeze_noirq, - .thaw_noirq = ibmebus_bus_pm_thaw_noirq, - .poweroff_noirq = ibmebus_bus_pm_poweroff_noirq, - .restore_noirq = ibmebus_bus_pm_restore_noirq, -}; - -#define IBMEBUS_BUS_PM_OPS_PTR (&ibmebus_bus_dev_pm_ops) - -#else /* !CONFIG_PM_SLEEP */ - -#define IBMEBUS_BUS_PM_OPS_PTR NULL - -#endif /* !CONFIG_PM_SLEEP */ - -struct bus_type ibmebus_bus_type = { - .name = "ibmebus", - .uevent = of_device_uevent_modalias, - .bus_groups = ibmbus_bus_groups, - .match = ibmebus_bus_bus_match, - .probe = ibmebus_bus_device_probe, - .remove = ibmebus_bus_device_remove, - .shutdown = ibmebus_bus_device_shutdown, - .dev_attrs = ibmebus_bus_device_attrs, - .pm = IBMEBUS_BUS_PM_OPS_PTR, -}; -EXPORT_SYMBOL(ibmebus_bus_type); - -static int __init ibmebus_bus_init(void) -{ - int err; - - printk(KERN_INFO "IBM eBus Device Driver\n"); - - err = bus_register(&ibmebus_bus_type); - if (err) { - printk(KERN_ERR "%s: failed to register IBM eBus.\n", - __func__); - return err; - } - - err = device_register(&ibmebus_bus_device); - if (err) { - printk(KERN_WARNING "%s: device_register returned %i\n", - __func__, err); - bus_unregister(&ibmebus_bus_type); - - return err; - } - - err = ibmebus_create_devices(ibmebus_matches); - if (err) { - device_unregister(&ibmebus_bus_device); - bus_unregister(&ibmebus_bus_type); - return err; - } - - return 0; -} -postcore_initcall(ibmebus_bus_init); diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index eae86c35e4c6..7e3a2ebba29b 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -168,12 +168,6 @@ config MPIC_BROKEN_REGREAD well, but enabling it uses about 8KB of memory to keep copies of the register contents in software. -config IBMEBUS - depends on PPC_PSERIES - bool "Support for GX bus based adapters" - help - Bus device driver for GX bus based adapters. - config EEH bool depends on (PPC_POWERNV || PPC_PSERIES) && PCI diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index f7d78b81951d..fbf2e4477f88 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -132,3 +132,9 @@ config IBMVIO depends on PPC_PSERIES bool default y + +config IBMEBUS + depends on PPC_PSERIES + bool "Support for GX bus based adapters" + help + Bus device driver for GX bus based adapters. diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 85ba00233fb0..942fe116a8ba 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_DTL) += dtl.o obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o obj-$(CONFIG_LPARCFG) += lparcfg.o obj-$(CONFIG_IBMVIO) += vio.o +obj-$(CONFIG_IBMEBUS) += ibmebus.o ifeq ($(CONFIG_PPC_PSERIES),y) obj-$(CONFIG_SUSPEND) += suspend.o diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c new file mode 100644 index 000000000000..35f5244782d9 --- /dev/null +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -0,0 +1,767 @@ +/* + * IBM PowerPC IBM eBus Infrastructure Support. + * + * Copyright (c) 2005 IBM Corporation + * Joachim Fenkes + * Heiko J Schick + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct device ibmebus_bus_device = { /* fake "parent" device */ + .init_name = "ibmebus", +}; + +struct bus_type ibmebus_bus_type; + +/* These devices will automatically be added to the bus during init */ +static const struct of_device_id ibmebus_matches[] __initconst = { + { .compatible = "IBM,lhca" }, + { .compatible = "IBM,lhea" }, + {}, +}; + +static void *ibmebus_alloc_coherent(struct device *dev, + size_t size, + dma_addr_t *dma_handle, + gfp_t flag, + unsigned long attrs) +{ + void *mem; + + mem = kmalloc(size, flag); + *dma_handle = (dma_addr_t)mem; + + return mem; +} + +static void ibmebus_free_coherent(struct device *dev, + size_t size, void *vaddr, + dma_addr_t dma_handle, + unsigned long attrs) +{ + kfree(vaddr); +} + +static dma_addr_t ibmebus_map_page(struct device *dev, + struct page *page, + unsigned long offset, + size_t size, + enum dma_data_direction direction, + unsigned long attrs) +{ + return (dma_addr_t)(page_address(page) + offset); +} + +static void ibmebus_unmap_page(struct device *dev, + dma_addr_t dma_addr, + size_t size, + enum dma_data_direction direction, + unsigned long attrs) +{ + return; +} + +static int ibmebus_map_sg(struct device *dev, + struct scatterlist *sgl, + int nents, enum dma_data_direction direction, + unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) { + sg->dma_address = (dma_addr_t) sg_virt(sg); + sg->dma_length = sg->length; + } + + return nents; +} + +static void ibmebus_unmap_sg(struct device *dev, + struct scatterlist *sg, + int nents, enum dma_data_direction direction, + unsigned long attrs) +{ + return; +} + +static int ibmebus_dma_supported(struct device *dev, u64 mask) +{ + return mask == DMA_BIT_MASK(64); +} + +static u64 ibmebus_dma_get_required_mask(struct device *dev) +{ + return DMA_BIT_MASK(64); +} + +static struct dma_map_ops ibmebus_dma_ops = { + .alloc = ibmebus_alloc_coherent, + .free = ibmebus_free_coherent, + .map_sg = ibmebus_map_sg, + .unmap_sg = ibmebus_unmap_sg, + .dma_supported = ibmebus_dma_supported, + .get_required_mask = ibmebus_dma_get_required_mask, + .map_page = ibmebus_map_page, + .unmap_page = ibmebus_unmap_page, +}; + +static int ibmebus_match_path(struct device *dev, void *data) +{ + struct device_node *dn = to_platform_device(dev)->dev.of_node; + return (dn->full_name && + (strcasecmp((char *)data, dn->full_name) == 0)); +} + +static int ibmebus_match_node(struct device *dev, void *data) +{ + return to_platform_device(dev)->dev.of_node == data; +} + +static int ibmebus_create_device(struct device_node *dn) +{ + struct platform_device *dev; + int ret; + + dev = of_device_alloc(dn, NULL, &ibmebus_bus_device); + if (!dev) + return -ENOMEM; + + dev->dev.bus = &ibmebus_bus_type; + dev->dev.archdata.dma_ops = &ibmebus_dma_ops; + + ret = of_device_add(dev); + if (ret) + platform_device_put(dev); + return ret; +} + +static int ibmebus_create_devices(const struct of_device_id *matches) +{ + struct device_node *root, *child; + struct device *dev; + int ret = 0; + + root = of_find_node_by_path("/"); + + for_each_child_of_node(root, child) { + if (!of_match_node(matches, child)) + continue; + + dev = bus_find_device(&ibmebus_bus_type, NULL, child, + ibmebus_match_node); + if (dev) { + put_device(dev); + continue; + } + + ret = ibmebus_create_device(child); + if (ret) { + printk(KERN_ERR "%s: failed to create device (%i)", + __func__, ret); + of_node_put(child); + break; + } + } + + of_node_put(root); + return ret; +} + +int ibmebus_register_driver(struct platform_driver *drv) +{ + /* If the driver uses devices that ibmebus doesn't know, add them */ + ibmebus_create_devices(drv->driver.of_match_table); + + drv->driver.bus = &ibmebus_bus_type; + return driver_register(&drv->driver); +} +EXPORT_SYMBOL(ibmebus_register_driver); + +void ibmebus_unregister_driver(struct platform_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL(ibmebus_unregister_driver); + +int ibmebus_request_irq(u32 ist, irq_handler_t handler, + unsigned long irq_flags, const char *devname, + void *dev_id) +{ + unsigned int irq = irq_create_mapping(NULL, ist); + + if (!irq) + return -EINVAL; + + return request_irq(irq, handler, irq_flags, devname, dev_id); +} +EXPORT_SYMBOL(ibmebus_request_irq); + +void ibmebus_free_irq(u32 ist, void *dev_id) +{ + unsigned int irq = irq_find_mapping(NULL, ist); + + free_irq(irq, dev_id); + irq_dispose_mapping(irq); +} +EXPORT_SYMBOL(ibmebus_free_irq); + +static char *ibmebus_chomp(const char *in, size_t count) +{ + char *out = kmalloc(count + 1, GFP_KERNEL); + + if (!out) + return NULL; + + memcpy(out, in, count); + out[count] = '\0'; + if (out[count - 1] == '\n') + out[count - 1] = '\0'; + + return out; +} + +static ssize_t ibmebus_store_probe(struct bus_type *bus, + const char *buf, size_t count) +{ + struct device_node *dn = NULL; + struct device *dev; + char *path; + ssize_t rc = 0; + + path = ibmebus_chomp(buf, count); + if (!path) + return -ENOMEM; + + dev = bus_find_device(&ibmebus_bus_type, NULL, path, + ibmebus_match_path); + if (dev) { + put_device(dev); + printk(KERN_WARNING "%s: %s has already been probed\n", + __func__, path); + rc = -EEXIST; + goto out; + } + + if ((dn = of_find_node_by_path(path))) { + rc = ibmebus_create_device(dn); + of_node_put(dn); + } else { + printk(KERN_WARNING "%s: no such device node: %s\n", + __func__, path); + rc = -ENODEV; + } + +out: + kfree(path); + if (rc) + return rc; + return count; +} +static BUS_ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe); + +static ssize_t ibmebus_store_remove(struct bus_type *bus, + const char *buf, size_t count) +{ + struct device *dev; + char *path; + + path = ibmebus_chomp(buf, count); + if (!path) + return -ENOMEM; + + if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path, + ibmebus_match_path))) { + of_device_unregister(to_platform_device(dev)); + put_device(dev); + + kfree(path); + return count; + } else { + printk(KERN_WARNING "%s: %s not on the bus\n", + __func__, path); + + kfree(path); + return -ENODEV; + } +} +static BUS_ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove); + +static struct attribute *ibmbus_bus_attrs[] = { + &bus_attr_probe.attr, + &bus_attr_remove.attr, + NULL, +}; +ATTRIBUTE_GROUPS(ibmbus_bus); + +static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) +{ + const struct of_device_id *matches = drv->of_match_table; + + if (!matches) + return 0; + + return of_match_device(matches, dev) != NULL; +} + +static int ibmebus_bus_device_probe(struct device *dev) +{ + int error = -ENODEV; + struct platform_driver *drv; + struct platform_device *of_dev; + + drv = to_platform_driver(dev->driver); + of_dev = to_platform_device(dev); + + if (!drv->probe) + return error; + + of_dev_get(of_dev); + + if (of_driver_match_device(dev, dev->driver)) + error = drv->probe(of_dev); + if (error) + of_dev_put(of_dev); + + return error; +} + +static int ibmebus_bus_device_remove(struct device *dev) +{ + struct platform_device *of_dev = to_platform_device(dev); + struct platform_driver *drv = to_platform_driver(dev->driver); + + if (dev->driver && drv->remove) + drv->remove(of_dev); + return 0; +} + +static void ibmebus_bus_device_shutdown(struct device *dev) +{ + struct platform_device *of_dev = to_platform_device(dev); + struct platform_driver *drv = to_platform_driver(dev->driver); + + if (dev->driver && drv->shutdown) + drv->shutdown(of_dev); +} + +/* + * ibmebus_bus_device_attrs + */ +static ssize_t devspec_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *ofdev; + + ofdev = to_platform_device(dev); + return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name); +} + +static ssize_t name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *ofdev; + + ofdev = to_platform_device(dev); + return sprintf(buf, "%s\n", ofdev->dev.of_node->name); +} + +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2); + buf[len] = '\n'; + buf[len+1] = 0; + return len+1; +} + +static struct device_attribute ibmebus_bus_device_attrs[] = { + __ATTR_RO(devspec), + __ATTR_RO(name), + __ATTR_RO(modalias), + __ATTR_NULL +}; + +#ifdef CONFIG_PM_SLEEP +static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg) +{ + struct platform_device *of_dev = to_platform_device(dev); + struct platform_driver *drv = to_platform_driver(dev->driver); + int ret = 0; + + if (dev->driver && drv->suspend) + ret = drv->suspend(of_dev, mesg); + return ret; +} + +static int ibmebus_bus_legacy_resume(struct device *dev) +{ + struct platform_device *of_dev = to_platform_device(dev); + struct platform_driver *drv = to_platform_driver(dev->driver); + int ret = 0; + + if (dev->driver && drv->resume) + ret = drv->resume(of_dev); + return ret; +} + +static int ibmebus_bus_pm_prepare(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm && drv->pm->prepare) + ret = drv->pm->prepare(dev); + + return ret; +} + +static void ibmebus_bus_pm_complete(struct device *dev) +{ + struct device_driver *drv = dev->driver; + + if (drv && drv->pm && drv->pm->complete) + drv->pm->complete(dev); +} + +#ifdef CONFIG_SUSPEND + +static int ibmebus_bus_pm_suspend(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->suspend) + ret = drv->pm->suspend(dev); + } else { + ret = ibmebus_bus_legacy_suspend(dev, PMSG_SUSPEND); + } + + return ret; +} + +static int ibmebus_bus_pm_suspend_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->suspend_noirq) + ret = drv->pm->suspend_noirq(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_resume(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->resume) + ret = drv->pm->resume(dev); + } else { + ret = ibmebus_bus_legacy_resume(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_resume_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->resume_noirq) + ret = drv->pm->resume_noirq(dev); + } + + return ret; +} + +#else /* !CONFIG_SUSPEND */ + +#define ibmebus_bus_pm_suspend NULL +#define ibmebus_bus_pm_resume NULL +#define ibmebus_bus_pm_suspend_noirq NULL +#define ibmebus_bus_pm_resume_noirq NULL + +#endif /* !CONFIG_SUSPEND */ + +#ifdef CONFIG_HIBERNATE_CALLBACKS + +static int ibmebus_bus_pm_freeze(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->freeze) + ret = drv->pm->freeze(dev); + } else { + ret = ibmebus_bus_legacy_suspend(dev, PMSG_FREEZE); + } + + return ret; +} + +static int ibmebus_bus_pm_freeze_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->freeze_noirq) + ret = drv->pm->freeze_noirq(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_thaw(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->thaw) + ret = drv->pm->thaw(dev); + } else { + ret = ibmebus_bus_legacy_resume(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_thaw_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->thaw_noirq) + ret = drv->pm->thaw_noirq(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_poweroff(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->poweroff) + ret = drv->pm->poweroff(dev); + } else { + ret = ibmebus_bus_legacy_suspend(dev, PMSG_HIBERNATE); + } + + return ret; +} + +static int ibmebus_bus_pm_poweroff_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->poweroff_noirq) + ret = drv->pm->poweroff_noirq(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_restore(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->restore) + ret = drv->pm->restore(dev); + } else { + ret = ibmebus_bus_legacy_resume(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_restore_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->restore_noirq) + ret = drv->pm->restore_noirq(dev); + } + + return ret; +} + +#else /* !CONFIG_HIBERNATE_CALLBACKS */ + +#define ibmebus_bus_pm_freeze NULL +#define ibmebus_bus_pm_thaw NULL +#define ibmebus_bus_pm_poweroff NULL +#define ibmebus_bus_pm_restore NULL +#define ibmebus_bus_pm_freeze_noirq NULL +#define ibmebus_bus_pm_thaw_noirq NULL +#define ibmebus_bus_pm_poweroff_noirq NULL +#define ibmebus_bus_pm_restore_noirq NULL + +#endif /* !CONFIG_HIBERNATE_CALLBACKS */ + +static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { + .prepare = ibmebus_bus_pm_prepare, + .complete = ibmebus_bus_pm_complete, + .suspend = ibmebus_bus_pm_suspend, + .resume = ibmebus_bus_pm_resume, + .freeze = ibmebus_bus_pm_freeze, + .thaw = ibmebus_bus_pm_thaw, + .poweroff = ibmebus_bus_pm_poweroff, + .restore = ibmebus_bus_pm_restore, + .suspend_noirq = ibmebus_bus_pm_suspend_noirq, + .resume_noirq = ibmebus_bus_pm_resume_noirq, + .freeze_noirq = ibmebus_bus_pm_freeze_noirq, + .thaw_noirq = ibmebus_bus_pm_thaw_noirq, + .poweroff_noirq = ibmebus_bus_pm_poweroff_noirq, + .restore_noirq = ibmebus_bus_pm_restore_noirq, +}; + +#define IBMEBUS_BUS_PM_OPS_PTR (&ibmebus_bus_dev_pm_ops) + +#else /* !CONFIG_PM_SLEEP */ + +#define IBMEBUS_BUS_PM_OPS_PTR NULL + +#endif /* !CONFIG_PM_SLEEP */ + +struct bus_type ibmebus_bus_type = { + .name = "ibmebus", + .uevent = of_device_uevent_modalias, + .bus_groups = ibmbus_bus_groups, + .match = ibmebus_bus_bus_match, + .probe = ibmebus_bus_device_probe, + .remove = ibmebus_bus_device_remove, + .shutdown = ibmebus_bus_device_shutdown, + .dev_attrs = ibmebus_bus_device_attrs, + .pm = IBMEBUS_BUS_PM_OPS_PTR, +}; +EXPORT_SYMBOL(ibmebus_bus_type); + +static int __init ibmebus_bus_init(void) +{ + int err; + + printk(KERN_INFO "IBM eBus Device Driver\n"); + + err = bus_register(&ibmebus_bus_type); + if (err) { + printk(KERN_ERR "%s: failed to register IBM eBus.\n", + __func__); + return err; + } + + err = device_register(&ibmebus_bus_device); + if (err) { + printk(KERN_WARNING "%s: device_register returned %i\n", + __func__, err); + bus_unregister(&ibmebus_bus_type); + + return err; + } + + err = ibmebus_create_devices(ibmebus_matches); + if (err) { + device_unregister(&ibmebus_bus_device); + bus_unregister(&ibmebus_bus_type); + return err; + } + + return 0; +} +postcore_initcall(ibmebus_bus_init); -- cgit From 90ee8762e2b2d5c6e9fa6463f9db8bcb5d9df405 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 15 Nov 2016 14:47:44 +1100 Subject: powerpc/pseries: Disable IBMEBUS on little endian builds The IBMEBUS code supports the GX bus found on Power7 and earlier CPUs. On Power8 it has been replaced, and so we have no need for it. We don't actually have a config symbol for Power8 vs Power7 etc., but we only support booting little endian on Power8 or later, so use that as a reasonable approximation. Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index fbf2e4477f88..e1c280a95d58 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -134,7 +134,7 @@ config IBMVIO default y config IBMEBUS - depends on PPC_PSERIES + depends on PPC_PSERIES && !CPU_LITTLE_ENDIAN bool "Support for GX bus based adapters" help Bus device driver for GX bus based adapters. -- cgit From 3baad97067ba20653c0c31eb307c5c7d9480fcb0 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 2 Nov 2016 21:06:17 +1100 Subject: powerpc/configs: Turn on PPC crypto implementations in the server defconfigs These are the PPC optimised versions of various crypto algorithms, so we should turn them on by default to get test coverage. Suggested-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/configs/powernv_defconfig | 3 +++ arch/powerpc/configs/ppc64_defconfig | 3 +++ arch/powerpc/configs/pseries_defconfig | 3 +++ 3 files changed, 9 insertions(+) diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index d77af0eca967..4bdc966687cc 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -296,6 +296,8 @@ CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPT_CRC32C_VPMSUM=m +CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -304,6 +306,7 @@ CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index df5f1616375f..a11a68c7236e 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -332,6 +332,8 @@ CONFIG_PPC_EARLY_DEBUG=y CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPT_CRC32C_VPMSUM=m +CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -340,6 +342,7 @@ CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 536e81110936..e8cc508d53c6 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -295,6 +295,8 @@ CONFIG_XMON=y CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPT_CRC32C_VPMSUM=m +CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m @@ -303,6 +305,7 @@ CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -- cgit From e9eb0278dad9c7a2631d5432180a130710110c09 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 28 Oct 2016 17:39:53 +1100 Subject: powerpc/64: Used named initialisers for ibm_pa_features The ibm_pa_features array consists of structures that describe which bit and byte in the ibm,pa-features property toggles one or more flags in either the CPU, MMU, or user visible feature flags. Each one consists of 7 values, which are all unsigned long, int or char, meaning the compiler gives us no warning if we assign the wrong values to the wrong elements. In fact we have had a bug here in the past, where we were setting incorrect bits, see commit 6997e57d693b ("powerpc: scan_features() updates incorrect bits for REAL_LE"). So switch to using named initialisers for the structure elements, to reduce the likelihood of future bugs, and hopefully improve readability also. Signed-off-by: Michael Ellerman Reviewed-by: Balbir Singh --- arch/powerpc/kernel/prom.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index b0245bed6f54..a7b87b6b4ef4 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -156,21 +156,22 @@ static struct ibm_pa_feature { unsigned char pabit; /* bit number (big-endian) */ unsigned char invert; /* if 1, pa bit set => clear feature */ } ibm_pa_features[] __initdata = { - {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0}, - {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0}, - {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0}, - {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0}, - {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1}, - {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0}, - {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0}, + { .pabyte = 0, .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU }, + { .pabyte = 0, .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU }, + { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL }, + { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE }, + { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE }, + { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX }, + { .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN }, + { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE, + .cpu_user_ftrs = PPC_FEATURE_TRUE_LE }, /* * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n), * we don't want to turn on TM here, so we use the *_COMP versions * which are 0 if the kernel doesn't support TM. */ - {CPU_FTR_TM_COMP, 0, 0, - PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0}, - {0, MMU_FTR_TYPE_RADIX, 0, 0, 40, 0, 0}, + { .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP, + .cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP }, }; static void __init scan_features(unsigned long node, const unsigned char *ftrs, -- cgit From d4791db527bf397c84c9956c3ece9692ed5322ac Mon Sep 17 00:00:00 2001 From: Russell Currey Date: Wed, 16 Nov 2016 12:12:26 +1100 Subject: powerpc/powernv: Don't warn on PE init if unfreeze is unsupported Whenever a PE is initialised in powernv, opal_pci_eeh_freeze_clear() is called. This is to remove any existing freeze, and has no negative side effects if the PE is already in an unfrozen state. On PHB backends that don't support this operation and return OPAL_UNSUPPORTED, this creates a scary and misleading warning message. Skip the warning message on init if OPAL_UNSUPPORTED is returned. As far as I'm aware, this currently only affects NPUs. Fixes: 313483d ("powerpc/powernv: Unfreeze PE on allocation") Signed-off-by: Russell Currey Acked-by: Gavin Shan Reviewed-by: Andrew Donnellan Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/pci-ioda.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index d4b33dd2d9e7..dcdfee0cd4f2 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -145,7 +145,7 @@ static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) */ rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); - if (rc != OPAL_SUCCESS) + if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED) pr_warn("%s: Error %lld unfreezing PHB#%d-PE#%d\n", __func__, rc, phb->hose->global_number, pe_no); -- cgit From 1f52f1761456c919814c995710153a8f878353df Mon Sep 17 00:00:00 2001 From: Russell Currey Date: Wed, 16 Nov 2016 14:02:15 +1100 Subject: powerpc/pci: Always print PHB and PE numbers as hexadecimal PHB, PE (and by association MVE) numbers are printed as a mix of decimal and hexadecimal throughout the kernel. This can be misleading, so make them all hexadecimal. Standardising on hex instead of dec because: - PHB numbers are presented in hex in sysfs/debugfs (and lspci, etc) - PE numbers are presented as hex in sysfs and parsed in hex in debugfs The only place I think this could cause confusing are the messages during boot, i.e. pci 000a:01 : [PE# 000] Secondary bus 1 associated with PE#0 which can be a quick way to check PE numbers. pe_level_printk() will only print two characters instead of three, so the above would be pci 000a:01 : [PE# 00] Secondary bus 1 associated with PE#0 which gives a hint it's in hex. Signed-off-by: Russell Currey Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/eeh.c | 6 ++--- arch/powerpc/kernel/eeh_driver.c | 8 +++--- arch/powerpc/kernel/eeh_event.c | 4 +-- arch/powerpc/kernel/eeh_pe.c | 4 +-- arch/powerpc/platforms/powernv/eeh-powernv.c | 4 +-- arch/powerpc/platforms/powernv/npu-dma.c | 2 +- arch/powerpc/platforms/powernv/pci-ioda.c | 38 ++++++++++++++-------------- arch/powerpc/platforms/powernv/pci.c | 6 ++--- arch/powerpc/platforms/pseries/eeh_pseries.c | 8 +++--- 9 files changed, 40 insertions(+), 40 deletions(-) diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index f25731627d7f..927d16269ad6 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -372,7 +372,7 @@ static int eeh_phb_check_failure(struct eeh_pe *pe) /* Find the PHB PE */ phb_pe = eeh_phb_pe_get(pe->phb); if (!phb_pe) { - pr_warn("%s Can't find PE for PHB#%d\n", + pr_warn("%s Can't find PE for PHB#%x\n", __func__, pe->phb->global_number); return -EEXIST; } @@ -664,7 +664,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function) rc = eeh_ops->set_option(pe, function); if (rc) pr_warn("%s: Unexpected state change %d on " - "PHB#%d-PE#%x, err=%d\n", + "PHB#%x-PE#%x, err=%d\n", __func__, function, pe->phb->global_number, pe->addr, rc); @@ -864,7 +864,7 @@ int eeh_reset_pe(struct eeh_pe *pe) } if (state < 0) { - pr_warn("%s: Unrecoverable slot failure on PHB#%d-PE#%x", + pr_warn("%s: Unrecoverable slot failure on PHB#%x-PE#%x", __func__, pe->phb->global_number, pe->addr); ret = -ENOTRECOVERABLE; goto out; diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index a62be72da274..ac984d2038ab 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -732,7 +732,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe) frozen_bus = eeh_pe_bus_get(pe); if (!frozen_bus) { - pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n", + pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", __func__, pe->phb->global_number, pe->addr); return; } @@ -876,7 +876,7 @@ excess_failures: * are due to poorly seated PCI cards. Only 10% or so are * due to actual, failed cards. */ - pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n" + pr_err("EEH: PHB#%x-PE#%x has failed %d times in the\n" "last hour and has been permanently disabled.\n" "Please try reseating or replacing it.\n", pe->phb->global_number, pe->addr, @@ -884,7 +884,7 @@ excess_failures: goto perm_error; hard_fail: - pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n" + pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n" "Please try reseating or replacing it\n", pe->phb->global_number, pe->addr); @@ -998,7 +998,7 @@ static void eeh_handle_special_event(void) bus = eeh_pe_bus_get(phb_pe); if (!bus) { pr_err("%s: Cannot find PCI bus for " - "PHB#%d-PE#%x\n", + "PHB#%x-PE#%x\n", __func__, pe->phb->global_number, pe->addr); diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c index 82e7327e3cd0..accbf8b5fd46 100644 --- a/arch/powerpc/kernel/eeh_event.c +++ b/arch/powerpc/kernel/eeh_event.c @@ -75,11 +75,11 @@ static int eeh_event_handler(void * dummy) if (pe) { eeh_pe_state_mark(pe, EEH_PE_RECOVERING); if (pe->type & EEH_PE_PHB) - pr_info("EEH: Detected error on PHB#%d\n", + pr_info("EEH: Detected error on PHB#%x\n", pe->phb->global_number); else pr_info("EEH: Detected PCI bus error on " - "PHB#%d-PE#%x\n", + "PHB#%x-PE#%x\n", pe->phb->global_number, pe->addr); eeh_handle_event(pe); eeh_pe_state_clear(pe, EEH_PE_RECOVERING); diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index de7d091c4c31..cc4b206f77e4 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -104,7 +104,7 @@ int eeh_phb_pe_create(struct pci_controller *phb) /* Put it into the list */ list_add_tail(&pe->child, &eeh_phb_pe); - pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number); + pr_debug("EEH: Add PE for PHB#%x\n", phb->global_number); return 0; } @@ -333,7 +333,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) /* Check if the PE number is valid */ if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) { - pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%d\n", + pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%x\n", __func__, edev->config_addr, edev->phb->global_number); return -EINVAL; } diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 2354ea51e871..6fb5522acd70 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -393,7 +393,7 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data) /* Create PE */ ret = eeh_add_to_parent_pe(edev); if (ret) { - pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%d)\n", + pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%x)\n", __func__, hose->global_number, pdn->busno, PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret); return NULL; @@ -1097,7 +1097,7 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option) bus = eeh_pe_bus_get(pe); if (!bus) { - pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n", + pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", __func__, pe->phb->global_number, pe->addr); return -EIO; } diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index aec85e778028..73b155fd4481 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -263,7 +263,7 @@ static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe) /* Enable the bypass window */ top = roundup_pow_of_two(top); - dev_info(&npe->pdev->dev, "Enabling bypass for PE %d\n", + dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n", npe->pe_number); rc = opal_pci_map_pe_dma_window_real(phb->opal_id, npe->pe_number, npe->pe_number, diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index dcdfee0cd4f2..b07680cd2518 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -83,7 +83,7 @@ void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, PCI_SLOT(pe->rid), PCI_FUNC(pe->rid)); #endif /* CONFIG_PCI_IOV*/ - printk("%spci %s: [PE# %.3d] %pV", + printk("%spci %s: [PE# %.2x] %pV", level, pfix, pe->pe_number, &vaf); va_end(args); @@ -146,7 +146,7 @@ static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED) - pr_warn("%s: Error %lld unfreezing PHB#%d-PE#%d\n", + pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n", __func__, rc, phb->hose->global_number, pe_no); return &phb->ioda.pe_array[pe_no]; @@ -155,13 +155,13 @@ static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no) { if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) { - pr_warn("%s: Invalid PE %d on PHB#%x\n", + pr_warn("%s: Invalid PE %x on PHB#%x\n", __func__, pe_no, phb->hose->global_number); return; } if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) - pr_debug("%s: PE %d was reserved on PHB#%x\n", + pr_debug("%s: PE %x was reserved on PHB#%x\n", __func__, pe_no, phb->hose->global_number); pnv_ioda_init_pe(phb, pe_no); @@ -229,7 +229,7 @@ static int pnv_ioda2_init_m64(struct pnv_phb *phb) else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) r->end -= (2 * phb->ioda.m64_segsize); else - pr_warn(" Cannot strip M64 segment for reserved PE#%d\n", + pr_warn(" Cannot strip M64 segment for reserved PE#%x\n", phb->ioda.reserved_pe_idx); return 0; @@ -291,7 +291,7 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb) OPAL_M64_WINDOW_TYPE, index, base, 0, PNV_IODA1_M64_SEGS * segsz); if (rc != OPAL_SUCCESS) { - pr_warn(" Error %lld setting M64 PHB#%d-BAR#%d\n", + pr_warn(" Error %lld setting M64 PHB#%x-BAR#%d\n", rc, phb->hose->global_number, index); goto fail; } @@ -300,7 +300,7 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb) OPAL_M64_WINDOW_TYPE, index, OPAL_ENABLE_M64_SPLIT); if (rc != OPAL_SUCCESS) { - pr_warn(" Error %lld enabling M64 PHB#%d-BAR#%d\n", + pr_warn(" Error %lld enabling M64 PHB#%x-BAR#%d\n", rc, phb->hose->global_number, index); goto fail; } @@ -316,7 +316,7 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb) else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) r->end -= (2 * phb->ioda.m64_segsize); else - WARN(1, "Wrong reserved PE#%d on PHB#%d\n", + WARN(1, "Wrong reserved PE#%x on PHB#%x\n", phb->ioda.reserved_pe_idx, phb->hose->global_number); return 0; @@ -414,7 +414,7 @@ static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all) pe->pe_number / PNV_IODA1_M64_SEGS, pe->pe_number % PNV_IODA1_M64_SEGS); if (rc != OPAL_SUCCESS) - pr_warn("%s: Error %lld mapping M64 for PHB#%d-PE#%d\n", + pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n", __func__, rc, phb->hose->global_number, pe->pe_number); } @@ -941,14 +941,14 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) pe->mve_number = pe->pe_number; rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number); if (rc != OPAL_SUCCESS) { - pe_err(pe, "OPAL error %ld setting up MVE %d\n", + pe_err(pe, "OPAL error %ld setting up MVE %x\n", rc, pe->mve_number); pe->mve_number = -1; } else { rc = opal_pci_set_mve_enable(phb->opal_id, pe->mve_number, OPAL_ENABLE_MVE); if (rc) { - pe_err(pe, "OPAL error %ld enabling MVE %d\n", + pe_err(pe, "OPAL error %ld enabling MVE %x\n", rc, pe->mve_number); pe->mve_number = -1; } @@ -1159,10 +1159,10 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) pe->rid = bus->busn_res.start << 8; if (all) - pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n", + pe_info(pe, "Secondary bus %d..%d associated with PE#%x\n", bus->busn_res.start, bus->busn_res.end, pe->pe_number); else - pe_info(pe, "Secondary bus %d associated with PE#%d\n", + pe_info(pe, "Secondary bus %d associated with PE#%x\n", bus->busn_res.start, pe->pe_number); if (pnv_ioda_configure_pe(phb, pe)) { @@ -1213,7 +1213,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev) * peer NPU. */ dev_info(&npu_pdev->dev, - "Associating to existing PE %d\n", pe_num); + "Associating to existing PE %x\n", pe_num); pci_dev_get(npu_pdev); npu_pdn = pci_get_pdn(npu_pdev); rid = npu_pdev->bus->number << 8 | npu_pdn->devfn; @@ -1539,7 +1539,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) | pci_iov_virtfn_devfn(pdev, vf_index); - pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n", + pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n", hose->global_number, pdev->bus->number, PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)), PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num); @@ -2844,7 +2844,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, pnv_set_msi_irq_chip(phb, virq); pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d)," - " address=%x_%08x data=%x PE# %d\n", + " address=%x_%08x data=%x PE# %x\n", pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num, msg->address_hi, msg->address_lo, data, pe->pe_number); @@ -2993,7 +2993,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index); if (rc != OPAL_SUCCESS) { - pr_err("%s: Error %lld mapping IO segment#%d to PE#%d\n", + pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n", __func__, rc, index, pe->pe_number); break; } @@ -3017,7 +3017,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index); if (rc != OPAL_SUCCESS) { - pr_err("%s: Error %lld mapping M32 segment#%d to PE#%d", + pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x", __func__, rc, index, pe->pe_number); break; } @@ -3281,7 +3281,7 @@ static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type) pnv_pci_ioda2_setup_dma_pe(phb, pe); break; default: - pr_warn("%s: No DMA for PHB#%d (type %d)\n", + pr_warn("%s: No DMA for PHB#%x (type %d)\n", __func__, phb->hose->global_number, phb->type); } } diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index db7b8020f68e..c6d554fe585c 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -234,7 +234,7 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, int i; data = (struct OpalIoP7IOCPhbErrorData *)common; - pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n", + pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n", hose->global_number, be32_to_cpu(common->version)); if (data->brdgCtl) @@ -326,7 +326,7 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, int i; data = (struct OpalIoPhb3ErrorData*)common; - pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n", + pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n", hose->global_number, be32_to_cpu(common->version)); if (data->brdgCtl) pr_info("brdgCtl: %08x\n", @@ -516,7 +516,7 @@ static void pnv_pci_config_check_eeh(struct pci_dn *pdn) } } - pr_devel(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n", + pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n", (pdn->busno << 8) | (pdn->devfn), pe_no, fstate); /* Clear the frozen state if applicable */ diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 1c428f06b14c..1eef46d9cf30 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -270,7 +270,7 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data) eeh_add_flag(EEH_ENABLED); eeh_add_to_parent_pe(edev); - pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%d-PE#%x\n", + pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%x-PE#%x\n", __func__, pdn->busno, PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), pe.phb->global_number, pe.addr); @@ -371,7 +371,7 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) pe->config_addr, BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid), 0); if (ret) { - pr_warn("%s: Failed to get address for PHB#%d-PE#%x\n", + pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", __func__, pe->phb->global_number, pe->config_addr); return 0; } @@ -384,7 +384,7 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) pe->config_addr, BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid), 0); if (ret) { - pr_warn("%s: Failed to get address for PHB#%d-PE#%x\n", + pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", __func__, pe->phb->global_number, pe->config_addr); return 0; } @@ -653,7 +653,7 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe) rtas_busy_delay(ret); } - pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", + pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n", __func__, pe->phb->global_number, pe->addr, ret); return ret; } -- cgit From 6654c9368a6ff75a36230d8eb94676da1d01f5ae Mon Sep 17 00:00:00 2001 From: Russell Currey Date: Thu, 17 Nov 2016 16:07:47 +1100 Subject: powerpc/eeh: Refactor EEH PE reset functions eeh_pe_reset and eeh_reset_pe are two different functions in the same file which do mostly the same thing. Not only is this confusing, but potentially causes disrepancies in functionality, notably eeh_reset_pe as it does not check return values for failure. Refactor this into the following: - eeh_pe_reset(): stays as is, performs a single operation, exported - eeh_pe_reset_full(): new, full reset process that calls eeh_pe_reset() - eeh_reset_pe(): removed and replaced by eeh_pe_reset_full() - eeh_reset_pe_once(): removed Signed-off-by: Russell Currey Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/ppc-pci.h | 2 +- arch/powerpc/kernel/eeh.c | 78 +++++++++++++++++--------------------- arch/powerpc/kernel/eeh_driver.c | 4 +- 3 files changed, 38 insertions(+), 46 deletions(-) diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index 0f73de069f19..726288048652 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h @@ -53,7 +53,7 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev); struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr); void eeh_slot_error_detail(struct eeh_pe *pe, int severity); int eeh_pci_enable(struct eeh_pe *pe, int function); -int eeh_reset_pe(struct eeh_pe *); +int eeh_pe_reset_full(struct eeh_pe *pe); void eeh_save_bars(struct eeh_dev *edev); int rtas_write_config(struct pci_dn *, int where, int size, u32 val); int rtas_read_config(struct pci_dn *, int where, int size, u32 *val); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 927d16269ad6..8180bfd7ab93 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -808,76 +808,67 @@ static void *eeh_set_dev_freset(void *data, void *flag) } /** - * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second + * eeh_pe_reset_full - Complete a full reset process on the indicated PE * @pe: EEH PE * - * Assert the PCI #RST line for 1/4 second. + * This function executes a full reset procedure on a PE, including setting + * the appropriate flags, performing a fundamental or hot reset, and then + * deactivating the reset status. It is designed to be used within the EEH + * subsystem, as opposed to eeh_pe_reset which is exported to drivers and + * only performs a single operation at a time. + * + * This function will attempt to reset a PE three times before failing. */ -static void eeh_reset_pe_once(struct eeh_pe *pe) +int eeh_pe_reset_full(struct eeh_pe *pe) { + int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); + int reset_state = (EEH_PE_RESET | EEH_PE_CFG_BLOCKED); + int type = EEH_RESET_HOT; unsigned int freset = 0; + int i, state, ret; - /* Determine type of EEH reset required for - * Partitionable Endpoint, a hot-reset (1) - * or a fundamental reset (3). - * A fundamental reset required by any device under - * Partitionable Endpoint trumps hot-reset. + /* + * Determine the type of reset to perform - hot or fundamental. + * Hot reset is the default operation, unless any device under the + * PE requires a fundamental reset. */ eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset); if (freset) - eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); - else - eeh_ops->reset(pe, EEH_RESET_HOT); - - eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); -} - -/** - * eeh_reset_pe - Reset the indicated PE - * @pe: EEH PE - * - * This routine should be called to reset indicated device, including - * PE. A PE might include multiple PCI devices and sometimes PCI bridges - * might be involved as well. - */ -int eeh_reset_pe(struct eeh_pe *pe) -{ - int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); - int i, state, ret; + type = EEH_RESET_FUNDAMENTAL; - /* Mark as reset and block config space */ - eeh_pe_state_mark(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED); + /* Mark the PE as in reset state and block config space accesses */ + eeh_pe_state_mark(pe, reset_state); - /* Take three shots at resetting the bus */ + /* Make three attempts at resetting the bus */ for (i = 0; i < 3; i++) { - eeh_reset_pe_once(pe); + ret = eeh_pe_reset(pe, type); + if (ret) + break; - /* - * EEH_PE_ISOLATED is expected to be removed after - * BAR restore. - */ + ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE); + if (ret) + break; + + /* Wait until the PE is in a functioning state */ state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); - if ((state & flags) == flags) { - ret = 0; - goto out; - } + if ((state & active_flags) == active_flags) + break; if (state < 0) { pr_warn("%s: Unrecoverable slot failure on PHB#%x-PE#%x", __func__, pe->phb->global_number, pe->addr); ret = -ENOTRECOVERABLE; - goto out; + break; } - /* We might run out of credits */ + /* Set error in case this is our last attempt */ ret = -EIO; pr_warn("%s: Failure %d resetting PHB#%x-PE#%x\n (%d)\n", __func__, state, pe->phb->global_number, pe->addr, (i + 1)); } -out: - eeh_pe_state_clear(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED); + eeh_pe_state_clear(pe, reset_state); return ret; } @@ -1601,6 +1592,7 @@ static int eeh_pe_reenable_devices(struct eeh_pe *pe) return eeh_unfreeze_pe(pe, true); } + /** * eeh_pe_reset - Issue PE reset according to specified type * @pe: EEH PE diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index ac984d2038ab..555a47bd5d1a 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -588,7 +588,7 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe) eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL); /* Issue reset */ - ret = eeh_reset_pe(pe); + ret = eeh_pe_reset_full(pe); if (ret) { eeh_pe_state_clear(pe, EEH_PE_RECOVERING); return ret; @@ -659,7 +659,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, * config accesses. So we prefer to block them. However, controlled * PCI config accesses initiated from EEH itself are allowed. */ - rc = eeh_reset_pe(pe); + rc = eeh_pe_reset_full(pe); if (rc) return rc; -- cgit From 64168f4296fa507ba059307984cddcf957a81d5f Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 15 Nov 2016 20:36:06 +0530 Subject: powerpc/mm/coproc: Handle bad address on coproc slb fault VSID 0 is bad address. Don't create slb entries on coproc fault for bad address Signed-off-by: Aneesh Kumar K.V Reviewed-by: Balbir Singh Reviewed-by: Ian Munsie Signed-off-by: Michael Ellerman --- arch/powerpc/mm/copro_fault.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 362954f98029..aaa7ec6788b9 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -134,6 +134,9 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) pr_debug("%s: invalid region access at %016llx\n", __func__, ea); return 1; } + /* Bad address */ + if (!vsid) + return 1; vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey; -- cgit From 9b894c91a95b13b51846f83ee4fa4b086a874c72 Mon Sep 17 00:00:00 2001 From: Florian Larysch Date: Thu, 17 Nov 2016 11:23:24 +0100 Subject: DT: i2c: W83793 is a trivial device Signed-off-by: Florian Larysch Acked-by: Rob Herring Signed-off-by: Scott Wood --- Documentation/devicetree/bindings/i2c/trivial-devices.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt index fbbad6446741..c65aff0edf56 100644 --- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt +++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt @@ -158,4 +158,5 @@ ti,tsc2003 I2C Touch-Screen Controller ti,tmp102 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface ti,tmp103 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface ti,tmp275 Digital Temperature Sensor +winbond,w83793 Winbond/Nuvoton H/W Monitor winbond,wpct301 i2c trusted platform module (TPM) -- cgit From 8ba1baabde85bfc27ab486422dc8fca3092bb9df Mon Sep 17 00:00:00 2001 From: Florian Larysch Date: Thu, 17 Nov 2016 11:23:23 +0100 Subject: powerpc/dts: add device tree entry for W83793 on T4240RDB The T4240RDB contains a W83793 hardware monitoring chip. Add a device tree entry to make the driver attach to it, as the i2c-mpc bus driver dropped support for class-based instantiation of devices a long time ago. Signed-off-by: Florian Larysch Signed-off-by: Scott Wood --- arch/powerpc/boot/dts/fsl/t4240rdb.dts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/boot/dts/fsl/t4240rdb.dts b/arch/powerpc/boot/dts/fsl/t4240rdb.dts index cc0a264b8acb..8166c660712a 100644 --- a/arch/powerpc/boot/dts/fsl/t4240rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t4240rdb.dts @@ -125,6 +125,10 @@ }; i2c@118000 { + hwmon@2f { + compatible = "winbond,w83793"; + reg = <0x2f>; + }; eeprom@52 { compatible = "at24,24c256"; reg = <0x52>; -- cgit From 57907a73e0ff7ccd561d0202acaabe5537ed359e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 12 Nov 2016 17:01:30 +0000 Subject: soc/fsl: fix spelling mistakes in critical error messages Trivial fix to spelling mistake "uncommited" to "uncommitted" in critical error messages. Signed-off-by: Colin Ian King Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/bman.c | 2 +- drivers/soc/fsl/qbman/qman.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c index ffa48fdbb1a9..48b7eb698689 100644 --- a/drivers/soc/fsl/qbman/bman.c +++ b/drivers/soc/fsl/qbman/bman.c @@ -391,7 +391,7 @@ static void bm_rcr_finish(struct bm_portal *portal) i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); if (i != rcr_ptr2idx(rcr->cursor)) - pr_crit("losing uncommited RCR entries\n"); + pr_crit("losing uncommitted RCR entries\n"); i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); if (i != rcr->ci) diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 119054bc922b..deeacd5d123e 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -443,7 +443,7 @@ static inline void qm_eqcr_finish(struct qm_portal *portal) DPAA_ASSERT(!eqcr->busy); if (pi != eqcr_ptr2idx(eqcr->cursor)) - pr_crit("losing uncommited EQCR entries\n"); + pr_crit("losing uncommitted EQCR entries\n"); if (ci != eqcr->ci) pr_crit("missing existing EQCR completions\n"); if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) -- cgit From 00b6cfc4c4ee32c6031d1645548f1bf4e6104c45 Mon Sep 17 00:00:00 2001 From: David Engraf Date: Tue, 15 Nov 2016 09:24:27 +0100 Subject: powerpc/85xx/qemu: Enable CONFIG_E500 and CONFIG_PPC_E500MC The QEMU e500 board needs to enable CONFIG_E500 to correctly boot. QEMU for ppc64 uses e5500/e6500 emulation, thus CONFIG_PPC_E500MC is required as well. Signed-off-by: David Engraf Signed-off-by: Scott Wood --- arch/powerpc/platforms/85xx/Kconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index 9dc1d28975b9..47b389dc4938 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -253,6 +253,8 @@ endif # PPC32 config PPC_QEMU_E500 bool "QEMU generic e500 platform" select DEFAULT_UIMAGE + select E500 + select PPC_E500MC if PPC64 help This option enables support for running as a QEMU guest using QEMU's generic e500 machine. This is not required if you're -- cgit From 5bda6c0eb7cddc9e109c9f79d500288af0704e7a Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 29 Oct 2016 16:27:14 +0200 Subject: powerpc/fsl_soc: improve and simplify fsl_get_sys_freq Use of_property_read_u32 instead of the generic of_get_property to simplify the code. In addition move the declaration of sysfreq into fsl_get_sys_freq because it's private to this function. Signed-off-by: Heiner Kallweit Signed-off-by: Scott Wood --- arch/powerpc/sysdev/fsl_soc.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index d93056eedcb0..48fc36b3851f 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -77,13 +77,10 @@ phys_addr_t get_immrbase(void) EXPORT_SYMBOL(get_immrbase); -static u32 sysfreq = -1; - u32 fsl_get_sys_freq(void) { + static u32 sysfreq = -1; struct device_node *soc; - const u32 *prop; - int size; if (sysfreq != -1) return sysfreq; @@ -92,12 +89,9 @@ u32 fsl_get_sys_freq(void) if (!soc) return -1; - prop = of_get_property(soc, "clock-frequency", &size); - if (!prop || size != sizeof(*prop) || *prop == 0) - prop = of_get_property(soc, "bus-frequency", &size); - - if (prop && size == sizeof(*prop)) - sysfreq = *prop; + of_property_read_u32(soc, "clock-frequency", &sysfreq); + if (sysfreq == -1 || !sysfreq) + of_property_read_u32(soc, "bus-frequency", &sysfreq); of_node_put(soc); return sysfreq; -- cgit From 47a48a92102b7e5a33356f87ca1b4873d2fce2f1 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 29 Oct 2016 16:28:25 +0200 Subject: powerpc/fsl_soc: improve and simplify get_brgfreq Use of_property_read_u32 instead of the generic of_get_property to simplify the code. In addition move the declaration of brgfreq into get_brgfreq because it's private to this function. Signed-off-by: Heiner Kallweit [scottwood: minor whitespace fixes] Signed-off-by: Scott Wood --- arch/powerpc/sysdev/fsl_soc.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index 48fc36b3851f..ebccf0bad4a1 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -100,23 +100,17 @@ EXPORT_SYMBOL(fsl_get_sys_freq); #if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx) -static u32 brgfreq = -1; - u32 get_brgfreq(void) { + static u32 brgfreq = -1; struct device_node *node; - const unsigned int *prop; - int size; if (brgfreq != -1) return brgfreq; node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg"); if (node) { - prop = of_get_property(node, "clock-frequency", &size); - if (prop && size == 4) - brgfreq = *prop; - + of_property_read_u32(node, "clock-frequency", &brgfreq); of_node_put(node); return brgfreq; } @@ -129,15 +123,11 @@ u32 get_brgfreq(void) node = of_find_node_by_type(NULL, "qe"); if (node) { - prop = of_get_property(node, "brg-frequency", &size); - if (prop && size == 4) - brgfreq = *prop; - - if (brgfreq == -1 || brgfreq == 0) { - prop = of_get_property(node, "bus-frequency", &size); - if (prop && size == 4) - brgfreq = *prop / 2; - } + of_property_read_u32(node, "brg-frequency", &brgfreq); + if (brgfreq == -1 || !brgfreq) + if (!of_property_read_u32(node, "bus-frequency", + &brgfreq)) + brgfreq /= 2; of_node_put(node); } -- cgit From 0f236988e9ec13d0ee49f580f945d594bd7989a7 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 29 Oct 2016 16:29:04 +0200 Subject: powerpc/fsl_soc: improve and simplify get_baudrate Use of_property_read_u32 instead of the generic of_get_property to simplify the code. In addition move the declaration of fs_baudrate into get_baudrate because it's private to this function. Signed-off-by: Heiner Kallweit Signed-off-by: Scott Wood --- arch/powerpc/sysdev/fsl_soc.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index ebccf0bad4a1..19101f9cfcfc 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -136,10 +136,9 @@ u32 get_brgfreq(void) EXPORT_SYMBOL(get_brgfreq); -static u32 fs_baudrate = -1; - u32 get_baudrate(void) { + static u32 fs_baudrate = -1; struct device_node *node; if (fs_baudrate != -1) @@ -147,12 +146,7 @@ u32 get_baudrate(void) node = of_find_node_by_type(NULL, "serial"); if (node) { - int size; - const unsigned int *prop = of_get_property(node, - "current-speed", &size); - - if (prop) - fs_baudrate = *prop; + of_property_read_u32(node, "current-speed", &fs_baudrate); of_node_put(node); } -- cgit From ee3dd586f163413548de0f0021ef1ff6ca0354d4 Mon Sep 17 00:00:00 2001 From: Andy Fleming Date: Sun, 23 Oct 2016 18:48:38 -0500 Subject: powerpc/85xx: Enable gpio power/reset driver These config changes build: drivers/power/reset/gpio-poweroff.c drivers/power/reset/gpio-restart.c Signed-off-by: Andy Fleming Signed-off-by: Scott Wood --- arch/powerpc/configs/fsl-emb-nonhw.config | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config index 1a61e81ab0cd..cc49c95494da 100644 --- a/arch/powerpc/configs/fsl-emb-nonhw.config +++ b/arch/powerpc/configs/fsl-emb-nonhw.config @@ -44,6 +44,7 @@ CONFIG_FORCE_MAX_ZONEORDER=13 CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAME_WARN=1024 CONFIG_FTL=y +CONFIG_GPIO_GENERIC_PLATFORM=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_HIGH_RES_TIMERS=y @@ -104,8 +105,13 @@ CONFIG_PACKET=y CONFIG_PARTITION_ADVANCED=y CONFIG_PERF_EVENTS=y CONFIG_POSIX_MQUEUE=y +CONFIG_POWER_SUPPLY=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y CONFIG_QNX4FS_FS=m CONFIG_RCU_TRACE=y +CONFIG_RESET_CONTROLLER=y CONFIG_ROOT_NFS=y CONFIG_SYSV_FS=m CONFIG_SYSVIPC=y -- cgit From 02b7d2a833b007bae45be8d4adbae0f2a43d8dc4 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Wed, 16 Nov 2016 16:40:14 +0200 Subject: soc/qman: Check ioremap return value Signed-off-by: Madalin Bucur Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman_ccsr.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index 0cace9e0077e..f4e6e70de259 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c @@ -444,6 +444,9 @@ static int zero_priv_mem(struct device *dev, struct device_node *node, /* map as cacheable, non-guarded */ void __iomem *tmpp = ioremap_prot(addr, sz, 0); + if (!tmpp) + return -ENOMEM; + memset_io(tmpp, 0, sz); flush_dcache_range((unsigned long)tmpp, (unsigned long)tmpp + sz); -- cgit From 2f28d218d4b82e0eb5c437f25004a4d36e32ab35 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Wed, 16 Nov 2016 16:40:15 +0200 Subject: soc/qman: Replace of_get_property() with portable equivalent Use arch portable of_property_read_u32() instead, which takes care of endianness conversions. Signed-off-by: Madalin Bucur Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/dpaa_sys.h | 1 + drivers/soc/fsl/qbman/qman_portal.c | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h index b63fd72295c6..2eaf3184f61d 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.h +++ b/drivers/soc/fsl/qbman/dpaa_sys.h @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index 148614388fca..0c3e8adae647 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -238,9 +238,9 @@ static int qman_portal_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; struct qm_portal_config *pcfg; struct resource *addr_phys[2]; - const u32 *channel; void __iomem *va; - int irq, len, cpu; + int irq, cpu, err; + u32 val; pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); if (!pcfg) @@ -264,13 +264,13 @@ static int qman_portal_probe(struct platform_device *pdev) return -ENXIO; } - channel = of_get_property(node, "cell-index", &len); - if (!channel || (len != 4)) { + err = of_property_read_u32(node, "cell-index", &val); + if (err) { dev_err(dev, "Can't get %s property 'cell-index'\n", node->full_name); - return -ENXIO; + return err; } - pcfg->channel = *channel; + pcfg->channel = val; pcfg->cpu = -1; irq = platform_get_irq(pdev, 0); if (irq <= 0) { -- cgit From d95cb0d361d509bda3c351c5933904477b103af0 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:16 +0200 Subject: soc/qman: Fix h/w resource cleanup error path handling qman_query_fq*() may return other error codes apart from -ERANGE, in which cases the error handling done by the resource cleanup callers would be wrong. The patch fixes the handling of those cases, and cleans up related code inside the resource cleanup & release handlers (i.e. replace hardcoded fqid value with corresponding define). Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index deeacd5d123e..e350ed6450ae 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -2789,15 +2789,18 @@ static int qpool_cleanup(u32 qp) struct qm_mcr_queryfq_np np; err = qman_query_fq_np(&fq, &np); - if (err) + if (err == -ERANGE) /* FQID range exceeded, found no problems */ return 0; + else if (WARN_ON(err)) + return err; + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { struct qm_fqd fqd; err = qman_query_fq(&fq, &fqd); if (WARN_ON(err)) - return 0; + return err; if (qm_fqd_get_chan(&fqd) == qp) { /* The channel is the FQ's target, clean it */ err = qman_shutdown_fq(fq.fqid); @@ -2836,7 +2839,7 @@ static int cgr_cleanup(u32 cgrid) * error, looking for non-OOS FQDs whose CGR is the CGR being released */ struct qman_fq fq = { - .fqid = 1 + .fqid = QM_FQID_RANGE_START }; int err; @@ -2844,15 +2847,18 @@ static int cgr_cleanup(u32 cgrid) struct qm_mcr_queryfq_np np; err = qman_query_fq_np(&fq, &np); - if (err) + if (err == -ERANGE) /* FQID range exceeded, found no problems */ return 0; + else if (WARN_ON(err)) + return err; + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { struct qm_fqd fqd; err = qman_query_fq(&fq, &fqd); if (WARN_ON(err)) - return 0; + return err; if ((fqd.fq_ctrl & QM_FQCTRL_CGE) && fqd.cgid == cgrid) { pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", -- cgit From 333e72cf475065a42b7bf33d76214eca45b9acbb Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:17 +0200 Subject: soc/qbman: Fix resource leak on portal probing error path In case init_pcfg() returns with error the CI region must be unmapped too. Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/bman_portal.c | 17 ++++++++++++----- drivers/soc/fsl/qbman/qman_portal.c | 17 ++++++++++++----- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c index 6579cc18811a..dd3f5d7617d2 100644 --- a/drivers/soc/fsl/qbman/bman_portal.c +++ b/drivers/soc/fsl/qbman/bman_portal.c @@ -146,15 +146,19 @@ static int bman_portal_probe(struct platform_device *pdev) pcfg->irq = irq; va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); - if (!va) + if (!va) { + dev_err(dev, "ioremap::CE failed\n"); goto err_ioremap1; + } pcfg->addr_virt[DPAA_PORTAL_CE] = va; va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), _PAGE_GUARDED | _PAGE_NO_CACHE); - if (!va) + if (!va) { + dev_err(dev, "ioremap::CI failed\n"); goto err_ioremap2; + } pcfg->addr_virt[DPAA_PORTAL_CI] = va; @@ -170,8 +174,10 @@ static int bman_portal_probe(struct platform_device *pdev) spin_unlock(&bman_lock); pcfg->cpu = cpu; - if (!init_pcfg(pcfg)) - goto err_ioremap2; + if (!init_pcfg(pcfg)) { + dev_err(dev, "portal init failed\n"); + goto err_portal_init; + } /* clear irq affinity if assigned cpu is offline */ if (!cpu_online(cpu)) @@ -179,10 +185,11 @@ static int bman_portal_probe(struct platform_device *pdev) return 0; +err_portal_init: + iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]); err_ioremap2: iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); err_ioremap1: - dev_err(dev, "ioremap failed\n"); return -ENXIO; } diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index 0c3e8adae647..6651168333e1 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -280,15 +280,19 @@ static int qman_portal_probe(struct platform_device *pdev) pcfg->irq = irq; va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); - if (!va) + if (!va) { + dev_err(dev, "ioremap::CE failed\n"); goto err_ioremap1; + } pcfg->addr_virt[DPAA_PORTAL_CE] = va; va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), _PAGE_GUARDED | _PAGE_NO_CACHE); - if (!va) + if (!va) { + dev_err(dev, "ioremap::CI failed\n"); goto err_ioremap2; + } pcfg->addr_virt[DPAA_PORTAL_CI] = va; @@ -306,8 +310,10 @@ static int qman_portal_probe(struct platform_device *pdev) spin_unlock(&qman_lock); pcfg->cpu = cpu; - if (!init_pcfg(pcfg)) - goto err_ioremap2; + if (!init_pcfg(pcfg)) { + dev_err(dev, "portal init failed\n"); + goto err_portal_init; + } /* clear irq affinity if assigned cpu is offline */ if (!cpu_online(cpu)) @@ -315,10 +321,11 @@ static int qman_portal_probe(struct platform_device *pdev) return 0; +err_portal_init: + iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]); err_ioremap2: iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); err_ioremap1: - dev_err(dev, "ioremap failed\n"); return -ENXIO; } -- cgit From 9f3670e8f6df6557fefe3ae0e626189c8c30c74c Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:18 +0200 Subject: soc/qman: Fix struct qm_fqd set accessor for context_a context_a.hi is 32bit Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- include/soc/fsl/qman.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h index 37f3eb001a16..1405810e4050 100644 --- a/include/soc/fsl/qman.h +++ b/include/soc/fsl/qman.h @@ -411,7 +411,7 @@ static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr) static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr) { - fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr)); + fqd->context_a.hi = cpu_to_be32(upper_32_bits(addr)); fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr)); } -- cgit From b6e969dbaec0fe6633a756468e74151ebb1e6c49 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:19 +0200 Subject: soc/qman: Fix direct access to fd's addr_lo, use proper accesor Use the proper accessor to get the FD address. Accessing the internal field "addr_lo" directly is not portable and error prone. Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index e350ed6450ae..96b0f004e007 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -1239,8 +1239,8 @@ static int qman_create_portal(struct qman_portal *portal, /* special handling, drain just in case it's a few FQRNIs */ const union qm_mr_entry *e = qm_mr_current(p); - dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x", - e->verb, e->ern.rc, e->ern.fd.addr_lo); + dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n", + e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd)); goto fail_dqrr_mr_empty; } /* Success */ -- cgit From 57e68a5e23570ff3ab76772a974724c226fcee7d Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:20 +0200 Subject: soc/qman: test: Fix implementation of fd_cmp() This function must only return the truth value of whether two frame descriptors are different or not. It does NOT have to compute some obscure difference between fd fields and return it as an int, making sparse complain about type conversions in the process. Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman_test_api.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c index 6880ff17f45e..dba6a80622ed 100644 --- a/drivers/soc/fsl/qbman/qman_test_api.c +++ b/drivers/soc/fsl/qbman/qman_test_api.c @@ -90,22 +90,15 @@ static void fd_inc(struct qm_fd *fd) } /* The only part of the 'fd' we can't memcmp() is the ppid */ -static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) +static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b) { - int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; + bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b); - if (!r) { - enum qm_fd_format fmt_a, fmt_b; + neq |= qm_fd_get_format(a) != qm_fd_get_format(b); + neq |= a->cfg != b->cfg; + neq |= a->cmd != b->cmd; - fmt_a = qm_fd_get_format(a); - fmt_b = qm_fd_get_format(b); - r = fmt_a - fmt_b; - } - if (!r) - r = a->cfg - b->cfg; - if (!r) - r = a->cmd - b->cmd; - return r; + return neq; } /* test */ @@ -217,12 +210,12 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, struct qman_fq *fq, const struct qm_dqrr_entry *dq) { - if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) { + if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) { pr_err("BADNESS: dequeued frame doesn't match;\n"); return qman_cb_dqrr_consume; } fd_inc(&fd_dq); - if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { + if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) { sdqcr_complete = 1; wake_up(&waitqueue); } -- cgit From 0fbeac3b0f67a50d45e8a7d72ee3e0301ac7e9a9 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:21 +0200 Subject: soc/qman: Don't add a new platform device for dma mapping The qman portals are platform devices themselves, so they should handle dma mappings. Creating a dummy platform device in order to support dma mapping operations is not justified (and not portable). Instead, do the mapping against the first portal that has been initialised. Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman.c | 31 ++++++++++--------------------- drivers/soc/fsl/qbman/qman_portal.c | 11 +++++++++++ drivers/soc/fsl/qbman/qman_priv.h | 1 + 3 files changed, 22 insertions(+), 21 deletions(-) diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 96b0f004e007..4ceb9a1da7ee 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -962,8 +962,6 @@ struct qman_portal { u32 sdqcr; /* probing time config params for cpu-affine portals */ const struct qm_portal_config *config; - /* needed for providing a non-NULL device to dma_map_***() */ - struct platform_device *pdev; /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ struct qman_cgrs *cgrs; /* linked-list of CSCN handlers. */ @@ -1133,7 +1131,6 @@ static int qman_create_portal(struct qman_portal *portal, const struct qman_cgrs *cgrs) { struct qm_portal *p; - char buf[16]; int ret; u32 isdr; @@ -1196,15 +1193,6 @@ static int qman_create_portal(struct qman_portal *portal, portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; - sprintf(buf, "qportal-%d", c->channel); - portal->pdev = platform_device_alloc(buf, -1); - if (!portal->pdev) - goto fail_devalloc; - if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) - goto fail_devadd; - ret = platform_device_add(portal->pdev); - if (ret) - goto fail_devadd; isdr = 0xffffffff; qm_out(p, QM_REG_ISDR, isdr); portal->irq_sources = 0; @@ -1256,10 +1244,6 @@ fail_eqcr_empty: fail_affinity: free_irq(c->irq, portal); fail_irq: - platform_device_del(portal->pdev); -fail_devadd: - platform_device_put(portal->pdev); -fail_devalloc: kfree(portal->cgrs); fail_cgrs: qm_mc_finish(p); @@ -1321,9 +1305,6 @@ static void qman_destroy_portal(struct qman_portal *qm) qm_dqrr_finish(&qm->p); qm_eqcr_finish(&qm->p); - platform_device_del(qm->pdev); - platform_device_put(qm->pdev); - qm->config = NULL; } @@ -1817,8 +1798,16 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) memset(&mcc->initfq.fqd.context_a, 0, sizeof(mcc->initfq.fqd.context_a)); } else { - phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), - DMA_TO_DEVICE); + struct qman_portal *p = qman_dma_portal; + + phys_fq = dma_map_single(p->config->dev, fq, + sizeof(*fq), DMA_TO_DEVICE); + if (dma_mapping_error(p->config->dev, phys_fq)) { + dev_err(p->config->dev, "dma_mapping failed\n"); + ret = -EIO; + goto out; + } + qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); } } diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index 6651168333e1..e6da2ea78554 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -30,6 +30,8 @@ #include "qman_priv.h" +struct qman_portal *qman_dma_portal; + /* Enable portal interupts (as opposed to polling mode) */ #define CONFIG_FSL_DPA_PIRQ_SLOW 1 #define CONFIG_FSL_DPA_PIRQ_FAST 1 @@ -150,6 +152,10 @@ static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg) /* all assigned portals are initialized now */ qman_init_cgr_all(); } + + if (!qman_dma_portal) + qman_dma_portal = p; + spin_unlock(&qman_lock); dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); @@ -310,6 +316,11 @@ static int qman_portal_probe(struct platform_device *pdev) spin_unlock(&qman_lock); pcfg->cpu = cpu; + if (dma_set_mask(dev, DMA_BIT_MASK(40))) { + dev_err(dev, "dma_set_mask() failed\n"); + goto err_portal_init; + } + if (!init_pcfg(pcfg)) { dev_err(dev, "portal init failed\n"); goto err_portal_init; diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h index 5cf821e623a9..59b4b463d884 100644 --- a/drivers/soc/fsl/qbman/qman_priv.h +++ b/drivers/soc/fsl/qbman/qman_priv.h @@ -367,5 +367,6 @@ int qman_alloc_fq_table(u32 num_fqids); #define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) extern struct qman_portal *affine_portals[NR_CPUS]; +extern struct qman_portal *qman_dma_portal; const struct qm_portal_config *qman_get_qm_portal_config( struct qman_portal *portal); -- cgit From 021ba010663146305b0ba51246a41223d38943af Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:22 +0200 Subject: soc/qman: test: Don't use dummy platform device for dma mapping Replace dummy platform device hack with a reference to a portal's platform device, in order to dma map the test frame for this small unit test. The 2 qman symbols need to be exported because this self test is a kernel module. Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman.c | 1 + drivers/soc/fsl/qbman/qman_portal.c | 1 + drivers/soc/fsl/qbman/qman_test_stash.c | 31 ++++++++++++++++++++----------- 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 4ceb9a1da7ee..307c818e14ab 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -2711,6 +2711,7 @@ const struct qm_portal_config *qman_get_qm_portal_config( { return portal->config; } +EXPORT_SYMBOL(qman_get_qm_portal_config); struct gen_pool *qm_fqalloc; /* FQID allocator */ struct gen_pool *qm_qpalloc; /* pool-channel allocator */ diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index e6da2ea78554..c9a9bcb1aea2 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -31,6 +31,7 @@ #include "qman_priv.h" struct qman_portal *qman_dma_portal; +EXPORT_SYMBOL(qman_dma_portal); /* Enable portal interupts (as opposed to polling mode) */ #define CONFIG_FSL_DPA_PIRQ_SLOW 1 diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c index 43cf66ba42f5..f8d25faf4d8a 100644 --- a/drivers/soc/fsl/qbman/qman_test_stash.c +++ b/drivers/soc/fsl/qbman/qman_test_stash.c @@ -191,6 +191,9 @@ static void *__frame_ptr; static u32 *frame_ptr; static dma_addr_t frame_dma; +/* needed for dma_map*() */ +static const struct qm_portal_config *pcfg; + /* the main function waits on this */ static DECLARE_WAIT_QUEUE_HEAD(queue); @@ -210,16 +213,14 @@ static int allocate_frame_data(void) { u32 lfsr = HP_FIRST_WORD; int loop; - struct platform_device *pdev = platform_device_alloc("foobar", -1); - if (!pdev) { - pr_crit("platform_device_alloc() failed"); - return -EIO; - } - if (platform_device_add(pdev)) { - pr_crit("platform_device_add() failed"); + if (!qman_dma_portal) { + pr_crit("portal not available\n"); return -EIO; } + + pcfg = qman_get_qm_portal_config(qman_dma_portal); + __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); if (!__frame_ptr) return -ENOMEM; @@ -229,15 +230,22 @@ static int allocate_frame_data(void) frame_ptr[loop] = lfsr; lfsr = do_lfsr(lfsr); } - frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS, + + frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS, DMA_BIDIRECTIONAL); - platform_device_del(pdev); - platform_device_put(pdev); + if (dma_mapping_error(pcfg->dev, frame_dma)) { + pr_crit("dma mapping failure\n"); + kfree(__frame_ptr); + return -EIO; + } + return 0; } static void deallocate_frame_data(void) { + dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS, + DMA_BIDIRECTIONAL); kfree(__frame_ptr); } @@ -249,7 +257,8 @@ static inline int process_frame_data(struct hp_handler *handler, int loop; if (qm_fd_addr_get64(fd) != handler->addr) { - pr_crit("bad frame address"); + pr_crit("bad frame address, [%llX != %llX]\n", + qm_fd_addr_get64(fd), handler->addr); return -EIO; } for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { -- cgit From e5caf693ac6ac498ef05c7c1947024d5b678626f Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:23 +0200 Subject: soc/qman: Remove redundant checks from qman_create_cgr() opts is checked redundantly. Move local_opts declaration inside its usage scope. Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 307c818e14ab..4a1697eefcdf 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -2294,7 +2294,6 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, struct qm_mcc_initcgr *opts) { struct qm_mcr_querycgr cgr_state; - struct qm_mcc_initcgr local_opts = {}; int ret; struct qman_portal *p; @@ -2316,11 +2315,12 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, spin_lock(&p->cgr_lock); if (opts) { + struct qm_mcc_initcgr local_opts = *opts; + ret = qman_query_cgr(cgr, &cgr_state); if (ret) goto out; - if (opts) - local_opts = *opts; + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) local_opts.cgr.cscn_targ_upd_ctrl = QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); @@ -2331,7 +2331,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; /* send init if flags indicate so */ - if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) + if (flags & QMAN_CGR_FLAG_USE_INIT) ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts); else -- cgit From 7ff07da015c290cd2b0627c53267fbc1336cf055 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:24 +0200 Subject: soc/qman: Remove unused struct qm_mcc* layouts 1. qm_mcc_querywq layout not used for now, so drop it; 2. queryfq, queryfq_np and alterfq are used only for accesses to the 'fqid' field, so replace these with a generic 'fq' layout. As a consequence, 'querycgr' turns into 'cgr' following the same reasoning above and for consistent naming. Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman.c | 58 ++++++++++++--------------------------- drivers/soc/fsl/qbman/qman_priv.h | 6 ---- 2 files changed, 18 insertions(+), 46 deletions(-) diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 4a1697eefcdf..0d2e3bcaaf9e 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -183,41 +183,22 @@ struct qm_mr { }; /* MC (Management Command) command */ -/* "Query FQ" */ -struct qm_mcc_queryfq { +/* "FQ" command layout */ +struct qm_mcc_fq { u8 _ncw_verb; u8 __reserved1[3]; u32 fqid; /* 24-bit */ u8 __reserved2[56]; } __packed; -/* "Alter FQ State Commands " */ -struct qm_mcc_alterfq { - u8 _ncw_verb; - u8 __reserved1[3]; - u32 fqid; /* 24-bit */ - u8 __reserved2; - u8 count; /* number of consecutive FQID */ - u8 __reserved3[10]; - u32 context_b; /* frame queue context b */ - u8 __reserved4[40]; -} __packed; -/* "Query CGR" */ -struct qm_mcc_querycgr { +/* "CGR" command layout */ +struct qm_mcc_cgr { u8 _ncw_verb; u8 __reserved1[30]; u8 cgid; u8 __reserved2[32]; }; -struct qm_mcc_querywq { - u8 _ncw_verb; - u8 __reserved; - /* select channel if verb != QUERYWQ_DEDICATED */ - u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */ - u8 __reserved2[60]; -} __packed; - #define QM_MCC_VERB_VBIT 0x80 #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ #define QM_MCC_VERB_INITFQ_PARKED 0x40 @@ -243,12 +224,9 @@ union qm_mc_command { u8 __reserved[63]; }; struct qm_mcc_initfq initfq; - struct qm_mcc_queryfq queryfq; - struct qm_mcc_alterfq alterfq; struct qm_mcc_initcgr initcgr; - struct qm_mcc_querycgr querycgr; - struct qm_mcc_querywq querywq; - struct qm_mcc_queryfq_np queryfq_np; + struct qm_mcc_fq fq; + struct qm_mcc_cgr cgr; }; /* MC (Management Command) result */ @@ -1777,7 +1755,7 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) mcc = qm_mc_start(&p->p); if (opts) mcc->initfq = *opts; - mcc->initfq.fqid = fq->fqid; + mcc->fq.fqid = fq->fqid; mcc->initfq.count = 0; /* * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a @@ -1873,7 +1851,7 @@ int qman_schedule_fq(struct qman_fq *fq) goto out; } mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fq->fqid; + mcc->fq.fqid = fq->fqid; qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(p->config->dev, "ALTER_SCHED timeout\n"); @@ -1916,7 +1894,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) goto out; } mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fq->fqid; + mcc->fq.fqid = fq->fqid; qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); @@ -1995,7 +1973,7 @@ int qman_oos_fq(struct qman_fq *fq) goto out; } mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fq->fqid; + mcc->fq.fqid = fq->fqid; qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2021,7 +1999,7 @@ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) int ret = 0; mcc = qm_mc_start(&p->p); - mcc->queryfq.fqid = fq->fqid; + mcc->fq.fqid = fq->fqid; qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2047,7 +2025,7 @@ static int qman_query_fq_np(struct qman_fq *fq, int ret = 0; mcc = qm_mc_start(&p->p); - mcc->queryfq.fqid = fq->fqid; + mcc->fq.fqid = fq->fqid; qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2075,7 +2053,7 @@ static int qman_query_cgr(struct qman_cgr *cgr, int ret = 0; mcc = qm_mc_start(&p->p); - mcc->querycgr.cgid = cgr->cgrid; + mcc->cgr.cgid = cgr->cgrid; qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2526,7 +2504,7 @@ static int qman_shutdown_fq(u32 fqid) dev = p->config->dev; /* Determine the state of the FQID */ mcc = qm_mc_start(&p->p); - mcc->queryfq_np.fqid = fqid; + mcc->fq.fqid = fqid; qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(dev, "QUERYFQ_NP timeout\n"); @@ -2541,7 +2519,7 @@ static int qman_shutdown_fq(u32 fqid) /* Query which channel the FQ is using */ mcc = qm_mc_start(&p->p); - mcc->queryfq.fqid = fqid; + mcc->fq.fqid = fqid; qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(dev, "QUERYFQ timeout\n"); @@ -2561,7 +2539,7 @@ static int qman_shutdown_fq(u32 fqid) case QM_MCR_NP_STATE_PARKED: orl_empty = 0; mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fqid; + mcc->fq.fqid = fqid; qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(dev, "QUERYFQ_NP timeout\n"); @@ -2656,7 +2634,7 @@ static int qman_shutdown_fq(u32 fqid) cpu_relax(); } mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fqid; + mcc->fq.fqid = fqid; qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2676,7 +2654,7 @@ static int qman_shutdown_fq(u32 fqid) case QM_MCR_NP_STATE_RETIRED: /* Send OOS Command */ mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fqid; + mcc->fq.fqid = fqid; qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h index 59b4b463d884..5606d74c21d4 100644 --- a/drivers/soc/fsl/qbman/qman_priv.h +++ b/drivers/soc/fsl/qbman/qman_priv.h @@ -90,12 +90,6 @@ static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) } /* "Query FQ Non-Programmable Fields" */ -struct qm_mcc_queryfq_np { - u8 _ncw_verb; - u8 __reserved1[3]; - u32 fqid; /* 24-bit */ - u8 __reserved2[56]; -} __packed; struct qm_mcr_queryfq_np { u8 verb; -- cgit From d6753c7e65e70e90ea991797059f4204b66cf523 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:25 +0200 Subject: soc/qman: Fix accesses to fqid, cleanup Preventively mask every access to the 'fqid' h/w field, since it is defined as a 24-bit field, for every h/w descriptor. Add generic accessors for this field to ensure correct access. Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman.c | 32 ++++++++++++++++---------------- include/soc/fsl/qman.h | 5 +++++ 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 0d2e3bcaaf9e..06626bee0e7e 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -471,7 +471,7 @@ static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) { DPAA_ASSERT(eqcr->busy); DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); - DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); + DPAA_ASSERT(!(eqcr->cursor->fqid & ~QM_FQID_MASK)); DPAA_ASSERT(eqcr->available >= 1); } @@ -1387,7 +1387,7 @@ static void qm_mr_process_task(struct work_struct *work) case QM_MR_VERB_FQRN: case QM_MR_VERB_FQRL: /* Lookup in the retirement table */ - fq = fqid_to_fq(msg->fq.fqid); + fq = fqid_to_fq(qm_fqid_get(&msg->fq)); if (WARN_ON(!fq)) break; fq_state_change(p, fq, msg, verb); @@ -1755,7 +1755,7 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) mcc = qm_mc_start(&p->p); if (opts) mcc->initfq = *opts; - mcc->fq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); mcc->initfq.count = 0; /* * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a @@ -1851,7 +1851,7 @@ int qman_schedule_fq(struct qman_fq *fq) goto out; } mcc = qm_mc_start(&p->p); - mcc->fq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(p->config->dev, "ALTER_SCHED timeout\n"); @@ -1894,7 +1894,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) goto out; } mcc = qm_mc_start(&p->p); - mcc->fq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); @@ -1937,7 +1937,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) msg.verb = QM_MR_VERB_FQRNI; msg.fq.fqs = mcr->alterfq.fqs; - msg.fq.fqid = fq->fqid; + qm_fqid_set(&msg.fq, fq->fqid); msg.fq.contextB = fq_to_tag(fq); fq->cb.fqs(p, fq, &msg); } @@ -1973,7 +1973,7 @@ int qman_oos_fq(struct qman_fq *fq) goto out; } mcc = qm_mc_start(&p->p); - mcc->fq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -1999,7 +1999,7 @@ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) int ret = 0; mcc = qm_mc_start(&p->p); - mcc->fq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2025,7 +2025,7 @@ static int qman_query_fq_np(struct qman_fq *fq, int ret = 0; mcc = qm_mc_start(&p->p); - mcc->fq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2206,7 +2206,7 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) if (unlikely(!eq)) goto out; - eq->fqid = fq->fqid; + qm_fqid_set(eq, fq->fqid); eq->tag = fq_to_tag(fq); eq->fd = *fd; @@ -2468,7 +2468,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, } while (wait && !dqrr); while (dqrr) { - if (dqrr->fqid == fqid && (dqrr->stat & s)) + if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s)) found = 1; qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); qm_dqrr_pvb_update(p); @@ -2504,7 +2504,7 @@ static int qman_shutdown_fq(u32 fqid) dev = p->config->dev; /* Determine the state of the FQID */ mcc = qm_mc_start(&p->p); - mcc->fq.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(dev, "QUERYFQ_NP timeout\n"); @@ -2519,7 +2519,7 @@ static int qman_shutdown_fq(u32 fqid) /* Query which channel the FQ is using */ mcc = qm_mc_start(&p->p); - mcc->fq.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(dev, "QUERYFQ timeout\n"); @@ -2539,7 +2539,7 @@ static int qman_shutdown_fq(u32 fqid) case QM_MCR_NP_STATE_PARKED: orl_empty = 0; mcc = qm_mc_start(&p->p); - mcc->fq.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(dev, "QUERYFQ_NP timeout\n"); @@ -2634,7 +2634,7 @@ static int qman_shutdown_fq(u32 fqid) cpu_relax(); } mcc = qm_mc_start(&p->p); - mcc->fq.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2654,7 +2654,7 @@ static int qman_shutdown_fq(u32 fqid) case QM_MCR_NP_STATE_RETIRED: /* Send OOS Command */ mcc = qm_mc_start(&p->p); - mcc->fq.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h index 1405810e4050..f3fa53fef421 100644 --- a/include/soc/fsl/qman.h +++ b/include/soc/fsl/qman.h @@ -262,6 +262,11 @@ struct qm_dqrr_entry { #define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */ #define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/ +/* 'fqid' is a 24-bit field in every h/w descriptor */ +#define QM_FQID_MASK GENMASK(23, 0) +#define qm_fqid_set(p, v) ((p)->fqid = ((v) & QM_FQID_MASK)) +#define qm_fqid_get(p) ((p)->fqid & QM_FQID_MASK) + /* "ERN Message Response" */ /* "FQ State Change Notification" */ union qm_mr_entry { -- cgit From b539945286bdd2d7c4cb92ef1237873b1c00c778 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:26 +0200 Subject: soc/qman: Drop unused field from eqcr/dqrr descriptors ORP ("Order Restoration Point") mechanism not supported. Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman.c | 3 +-- include/soc/fsl/qman.h | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 06626bee0e7e..c679e0285ccd 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -141,7 +141,7 @@ struct qm_eqcr_entry { u8 _ncw_verb; /* writes to this are non-coherent */ u8 dca; u16 seqnum; - u32 orp; /* 24-bit */ + u8 __reserved[4]; u32 fqid; /* 24-bit */ u32 tag; struct qm_fd fd; @@ -470,7 +470,6 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) { DPAA_ASSERT(eqcr->busy); - DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); DPAA_ASSERT(!(eqcr->cursor->fqid & ~QM_FQID_MASK)); DPAA_ASSERT(eqcr->available >= 1); } diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h index f3fa53fef421..20bbc716a6a8 100644 --- a/include/soc/fsl/qman.h +++ b/include/soc/fsl/qman.h @@ -279,8 +279,7 @@ union qm_mr_entry { u8 dca; u16 seqnum; u8 rc; /* Rej Code: 8-bit */ - u8 orp_hi; /* ORP: 24-bit */ - u16 orp_lo; + u8 __reserved[3]; u32 fqid; /* 24-bit */ u32 tag; struct qm_fd fd; -- cgit From 5a42f1ecb304789f75a017b2a36bc6d1dac6dcf2 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:27 +0200 Subject: soc/qbman: Handle endianness of qm/bm_in/out() Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/bman.c | 6 +++--- drivers/soc/fsl/qbman/qman.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c index 48b7eb698689..a3d6d7cfa929 100644 --- a/drivers/soc/fsl/qbman/bman.c +++ b/drivers/soc/fsl/qbman/bman.c @@ -167,12 +167,12 @@ struct bm_portal { /* Cache-inhibited register access. */ static inline u32 bm_in(struct bm_portal *p, u32 offset) { - return __raw_readl(p->addr.ci + offset); + return be32_to_cpu(__raw_readl(p->addr.ci + offset)); } static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) { - __raw_writel(val, p->addr.ci + offset); + __raw_writel(cpu_to_be32(val), p->addr.ci + offset); } /* Cache Enabled Portal Access */ @@ -188,7 +188,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset) static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) { - return __raw_readl(p->addr.ce + offset); + return be32_to_cpu(__raw_readl(p->addr.ce + offset)); } struct bman_portal { diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index c679e0285ccd..936aee19747d 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -321,12 +321,12 @@ struct qm_portal { /* Cache-inhibited register access. */ static inline u32 qm_in(struct qm_portal *p, u32 offset) { - return __raw_readl(p->addr.ci + offset); + return be32_to_cpu(__raw_readl(p->addr.ci + offset)); } static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) { - __raw_writel(val, p->addr.ci + offset); + __raw_writel(cpu_to_be32(val), p->addr.ci + offset); } /* Cache Enabled Portal Access */ @@ -342,7 +342,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) { - return __raw_readl(p->addr.ce + offset); + return be32_to_cpu(__raw_readl(p->addr.ce + offset)); } /* --- EQCR API --- */ -- cgit From efe848cdb724be2d0cc61f6329997f9a7cd7948f Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:28 +0200 Subject: soc/qman: Change remaining contextB into context_b There are multiple occurences of both contextB and context_b in different h/w descriptors, referring to the same descriptor field known as "Context B". Stick with the "context_b" naming, for obvious reasons including consistency (see also context_a). Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman.c | 12 ++++++------ include/soc/fsl/qman.h | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 936aee19747d..8386acc1d37d 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -1395,7 +1395,7 @@ static void qm_mr_process_task(struct work_struct *work) break; case QM_MR_VERB_FQPN: /* Parked */ - fq = tag_to_fq(msg->fq.contextB); + fq = tag_to_fq(msg->fq.context_b); fq_state_change(p, fq, msg, verb); if (fq->cb.fqs) fq->cb.fqs(p, fq, msg); @@ -1494,7 +1494,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p, if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { /* - * VDQCR: don't trust contextB as the FQ may have + * VDQCR: don't trust context_b as the FQ may have * been configured for h/w consumption and we're * draining it post-retirement. */ @@ -1520,8 +1520,8 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p, if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) clear_vdqcr(p, fq); } else { - /* SDQCR: contextB points to the FQ */ - fq = tag_to_fq(dq->contextB); + /* SDQCR: context_b points to the FQ */ + fq = tag_to_fq(dq->context_b); /* Now let the callback do its stuff */ res = fq->cb.dqrr(p, fq, dq); /* @@ -1757,7 +1757,7 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) qm_fqid_set(&mcc->fq, fq->fqid); mcc->initfq.count = 0; /* - * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a + * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a * demux pointer. Otherwise, the caller-provided value is allowed to * stand, don't overwrite it. */ @@ -1937,7 +1937,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) msg.verb = QM_MR_VERB_FQRNI; msg.fq.fqs = mcr->alterfq.fqs; qm_fqid_set(&msg.fq, fq->fqid); - msg.fq.contextB = fq_to_tag(fq); + msg.fq.context_b = fq_to_tag(fq); fq->cb.fqs(p, fq, &msg); } } else if (res == QM_MCR_RESULT_PENDING) { diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h index 20bbc716a6a8..c80eccac64f6 100644 --- a/include/soc/fsl/qman.h +++ b/include/soc/fsl/qman.h @@ -248,7 +248,7 @@ struct qm_dqrr_entry { u8 tok; u8 __reserved2[3]; u32 fqid; /* 24-bit */ - u32 contextB; + u32 context_b; struct qm_fd fd; u8 __reserved4[32]; } __packed; @@ -290,7 +290,7 @@ union qm_mr_entry { u8 fqs; /* Frame Queue Status */ u8 __reserved1[6]; u32 fqid; /* 24-bit */ - u32 contextB; + u32 context_b; u8 __reserved2[48]; } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ }; @@ -658,7 +658,7 @@ struct qman_cgr; /* * This enum, and the callback type that returns it, are used when handling * dequeued frames via DQRR. Note that for "null" callbacks registered with the - * portal object (for handling dequeues that do not demux because contextB is + * portal object (for handling dequeues that do not demux because context_b is * NULL), the return value *MUST* be qman_cb_dqrr_consume. */ enum qman_cb_dqrr_result { @@ -863,11 +863,11 @@ void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools); * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to * pre-existing frame-queues that aren't to be otherwise interfered with, it * prevents all other modifications to the frame queue. The TO_DCPORTAL flag - * causes the driver to honour any contextB modifications requested in the + * causes the driver to honour any context_b modifications requested in the * qm_init_fq() API, as this indicates the frame queue will be consumed by a * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by - * software portals, the contextB field is controlled by the driver and can't be - * modified by the caller. + * software portals, the context_b field is controlled by the driver and can't + * be modified by the caller. */ int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); -- cgit From 496bfa11de2e6805b63168d45c2f92edbeae54e2 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:29 +0200 Subject: soc/qman: Clean up CGR CSCN target update operations Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman.c | 37 +++++++++++++++++++++++-------------- include/soc/fsl/qman.h | 4 ++-- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 8386acc1d37d..acee99da71a7 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -2248,7 +2248,23 @@ out: } #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) -#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n)) + +/* congestion state change notification target update control */ +static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val) +{ + if (qman_ip_rev >= QMAN_REV30) + cgr->cscn_targ_upd_ctrl = QM_CGR_TARG_UDP_CTRL_WRITE_BIT | pi; + else + cgr->cscn_targ = val | QM_CGR_TARG_PORTAL(pi); +} + +static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val) +{ + if (qman_ip_rev >= QMAN_REV30) + cgr->cscn_targ_upd_ctrl = pi; + else + cgr->cscn_targ = val & ~QM_CGR_TARG_PORTAL(pi); +} static u8 qman_cgr_cpus[CGR_NUM]; @@ -2298,13 +2314,8 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, if (ret) goto out; - if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) - local_opts.cgr.cscn_targ_upd_ctrl = - QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); - else - /* Overwrite TARG */ - local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | - TARG_MASK(p); + qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p), + cgr_state.cgr.cscn_targ); local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; /* send init if flags indicate so */ @@ -2371,13 +2382,11 @@ int qman_delete_cgr(struct qman_cgr *cgr) list_add(&cgr->node, &p->cgr_cbs); goto release_lock; } - /* Overwrite TARG */ + local_opts.we_mask = QM_CGR_WE_CSCN_TARG; - if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) - local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); - else - local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & - ~(TARG_MASK(p)); + qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p), + cgr_state.cgr.cscn_targ); + ret = qm_modify_cgr(cgr, 0, &local_opts); if (ret) /* add back to the list */ diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h index c80eccac64f6..d01d5a358945 100644 --- a/include/soc/fsl/qman.h +++ b/include/soc/fsl/qman.h @@ -553,8 +553,8 @@ struct __qm_mc_cgr { u8 cscn_en; /* boolean, use QM_CGR_EN */ union { struct { - u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */ - u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */ + u16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */ + u16 cscn_targ_dcp_low; }; u32 cscn_targ; /* use QM_CGR_TARG_* */ }; -- cgit From 82de5797a26087c651a06de327e6aa9ea69d5d7f Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Mon, 21 Nov 2016 22:36:40 +0530 Subject: powerpc: Remove extraneous header from asm-prototypes.h Commit 03465f899bda ("powerpc: Use kprobe blacklist for exception handlers") removed __kprobes annotation from some of the prototypes, but left the kprobes header include directive unchanged. Remove it as it is no longer needed. Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/asm-prototypes.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index d1492736d852..dfef1174663e 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -13,8 +13,6 @@ */ #include -#include - #include /* SMP */ -- cgit From 6cc89bad60a673a24386f1ada83de8a068a78909 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Mon, 21 Nov 2016 22:36:41 +0530 Subject: powerpc/kprobes: Invoke handlers directly Invoke the kprobe handlers directly rather than through notify_die(), to reduce path taken for handling kprobes. Similar to commit 6f6343f53d13 ("kprobes/x86: Call exception handlers directly from do_int3/do_debug"). While at it, rename post_kprobe_handler() to kprobe_post_handler() for more uniform naming. Reported-by: Masami Hiramatsu Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kprobes.h | 7 +++++++ arch/powerpc/kernel/kprobes.c | 29 +++++++---------------------- arch/powerpc/kernel/traps.c | 13 +++++++++++++ 3 files changed, 27 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index 2c9759bdb63b..97b8c1f83453 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -32,6 +32,7 @@ #include #include +#ifdef CONFIG_KPROBES #define __ARCH_WANT_KPROBES_INSN_SLOT struct pt_regs; @@ -127,5 +128,11 @@ struct kprobe_ctlblk { extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); +extern int kprobe_handler(struct pt_regs *regs); +extern int kprobe_post_handler(struct pt_regs *regs); +#else +static inline int kprobe_handler(struct pt_regs *regs) { return 0; } +static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; } +#endif /* CONFIG_KPROBES */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_KPROBES_H */ diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 9479d8e360cf..ad108b842669 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -140,13 +140,16 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, regs->link = (unsigned long)kretprobe_trampoline; } -static int __kprobes kprobe_handler(struct pt_regs *regs) +int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; unsigned int *addr = (unsigned int *)regs->nip; struct kprobe_ctlblk *kcb; + if (user_mode(regs)) + return 0; + /* * We don't want to be preempted for the entire * duration of kprobe processing @@ -359,12 +362,12 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, * single-stepped a copy of the instruction. The address of this * copy is p->ainsn.insn. */ -static int __kprobes post_kprobe_handler(struct pt_regs *regs) +int __kprobes kprobe_post_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - if (!cur) + if (!cur || user_mode(regs)) return 0; /* make sure we got here for instruction we have a kprobe on */ @@ -470,25 +473,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) int __kprobes kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { - struct die_args *args = (struct die_args *)data; - int ret = NOTIFY_DONE; - - if (args->regs && user_mode(args->regs)) - return ret; - - switch (val) { - case DIE_BPT: - if (kprobe_handler(args->regs)) - ret = NOTIFY_STOP; - break; - case DIE_SSTEP: - if (post_kprobe_handler(args->regs)) - ret = NOTIFY_STOP; - break; - default: - break; - } - return ret; + return NOTIFY_DONE; } unsigned long arch_deref_entry_point(void *entry) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 3eb20d16c67c..b922c6f4a2fe 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -64,6 +64,7 @@ #include #include #include +#include #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) int (*__debugger)(struct pt_regs *regs) __read_mostly; @@ -826,6 +827,9 @@ void single_step_exception(struct pt_regs *regs) clear_single_step(regs); + if (kprobe_post_handler(regs)) + return; + if (notify_die(DIE_SSTEP, "single_step", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) goto bail; @@ -1179,6 +1183,9 @@ void program_check_exception(struct pt_regs *regs) if (debugger_bpt(regs)) goto bail; + if (kprobe_handler(regs)) + goto bail; + /* trap exception */ if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) @@ -1747,6 +1754,9 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status) return; } + if (kprobe_post_handler(regs)) + return; + if (notify_die(DIE_SSTEP, "block_step", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) { return; @@ -1761,6 +1771,9 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status) /* Clear the instruction completion event */ mtspr(SPRN_DBSR, DBSR_IC); + if (kprobe_post_handler(regs)) + return; + if (notify_die(DIE_SSTEP, "single_step", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) { return; -- cgit From 8004ca995c24a078ae362f6dd9f8e1056e65c733 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sat, 19 Nov 2016 14:42:14 +0100 Subject: powerpc/pseries/ibmebus: Remove legacy suspend/resume support There are no ibmebus driver that make use of legacy suspend/resume. This patch removes the support for it from ibmebus framework, new ibmebus driver (as unlikely as they are) wanting to use suspend/resume should use dev_pm_ops. Since there aren't any special bus specific things to do during suspend/resume and since the PM core will automatically fallback directly to using the device's PM ops if no bus PM ops are specified there is no need to have any special ibmebus PM ops at all. Signed-off-by: Lars-Peter Clausen Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/ibmebus.c | 298 ------------------------------- 1 file changed, 298 deletions(-) diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index 35f5244782d9..614c28537141 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -423,303 +423,6 @@ static struct device_attribute ibmebus_bus_device_attrs[] = { __ATTR_NULL }; -#ifdef CONFIG_PM_SLEEP -static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg) -{ - struct platform_device *of_dev = to_platform_device(dev); - struct platform_driver *drv = to_platform_driver(dev->driver); - int ret = 0; - - if (dev->driver && drv->suspend) - ret = drv->suspend(of_dev, mesg); - return ret; -} - -static int ibmebus_bus_legacy_resume(struct device *dev) -{ - struct platform_device *of_dev = to_platform_device(dev); - struct platform_driver *drv = to_platform_driver(dev->driver); - int ret = 0; - - if (dev->driver && drv->resume) - ret = drv->resume(of_dev); - return ret; -} - -static int ibmebus_bus_pm_prepare(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (drv && drv->pm && drv->pm->prepare) - ret = drv->pm->prepare(dev); - - return ret; -} - -static void ibmebus_bus_pm_complete(struct device *dev) -{ - struct device_driver *drv = dev->driver; - - if (drv && drv->pm && drv->pm->complete) - drv->pm->complete(dev); -} - -#ifdef CONFIG_SUSPEND - -static int ibmebus_bus_pm_suspend(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->suspend) - ret = drv->pm->suspend(dev); - } else { - ret = ibmebus_bus_legacy_suspend(dev, PMSG_SUSPEND); - } - - return ret; -} - -static int ibmebus_bus_pm_suspend_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->suspend_noirq) - ret = drv->pm->suspend_noirq(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_resume(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->resume) - ret = drv->pm->resume(dev); - } else { - ret = ibmebus_bus_legacy_resume(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_resume_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->resume_noirq) - ret = drv->pm->resume_noirq(dev); - } - - return ret; -} - -#else /* !CONFIG_SUSPEND */ - -#define ibmebus_bus_pm_suspend NULL -#define ibmebus_bus_pm_resume NULL -#define ibmebus_bus_pm_suspend_noirq NULL -#define ibmebus_bus_pm_resume_noirq NULL - -#endif /* !CONFIG_SUSPEND */ - -#ifdef CONFIG_HIBERNATE_CALLBACKS - -static int ibmebus_bus_pm_freeze(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->freeze) - ret = drv->pm->freeze(dev); - } else { - ret = ibmebus_bus_legacy_suspend(dev, PMSG_FREEZE); - } - - return ret; -} - -static int ibmebus_bus_pm_freeze_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->freeze_noirq) - ret = drv->pm->freeze_noirq(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_thaw(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->thaw) - ret = drv->pm->thaw(dev); - } else { - ret = ibmebus_bus_legacy_resume(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_thaw_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->thaw_noirq) - ret = drv->pm->thaw_noirq(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_poweroff(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->poweroff) - ret = drv->pm->poweroff(dev); - } else { - ret = ibmebus_bus_legacy_suspend(dev, PMSG_HIBERNATE); - } - - return ret; -} - -static int ibmebus_bus_pm_poweroff_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->poweroff_noirq) - ret = drv->pm->poweroff_noirq(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_restore(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->restore) - ret = drv->pm->restore(dev); - } else { - ret = ibmebus_bus_legacy_resume(dev); - } - - return ret; -} - -static int ibmebus_bus_pm_restore_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->restore_noirq) - ret = drv->pm->restore_noirq(dev); - } - - return ret; -} - -#else /* !CONFIG_HIBERNATE_CALLBACKS */ - -#define ibmebus_bus_pm_freeze NULL -#define ibmebus_bus_pm_thaw NULL -#define ibmebus_bus_pm_poweroff NULL -#define ibmebus_bus_pm_restore NULL -#define ibmebus_bus_pm_freeze_noirq NULL -#define ibmebus_bus_pm_thaw_noirq NULL -#define ibmebus_bus_pm_poweroff_noirq NULL -#define ibmebus_bus_pm_restore_noirq NULL - -#endif /* !CONFIG_HIBERNATE_CALLBACKS */ - -static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { - .prepare = ibmebus_bus_pm_prepare, - .complete = ibmebus_bus_pm_complete, - .suspend = ibmebus_bus_pm_suspend, - .resume = ibmebus_bus_pm_resume, - .freeze = ibmebus_bus_pm_freeze, - .thaw = ibmebus_bus_pm_thaw, - .poweroff = ibmebus_bus_pm_poweroff, - .restore = ibmebus_bus_pm_restore, - .suspend_noirq = ibmebus_bus_pm_suspend_noirq, - .resume_noirq = ibmebus_bus_pm_resume_noirq, - .freeze_noirq = ibmebus_bus_pm_freeze_noirq, - .thaw_noirq = ibmebus_bus_pm_thaw_noirq, - .poweroff_noirq = ibmebus_bus_pm_poweroff_noirq, - .restore_noirq = ibmebus_bus_pm_restore_noirq, -}; - -#define IBMEBUS_BUS_PM_OPS_PTR (&ibmebus_bus_dev_pm_ops) - -#else /* !CONFIG_PM_SLEEP */ - -#define IBMEBUS_BUS_PM_OPS_PTR NULL - -#endif /* !CONFIG_PM_SLEEP */ - struct bus_type ibmebus_bus_type = { .name = "ibmebus", .uevent = of_device_uevent_modalias, @@ -729,7 +432,6 @@ struct bus_type ibmebus_bus_type = { .remove = ibmebus_bus_device_remove, .shutdown = ibmebus_bus_device_shutdown, .dev_attrs = ibmebus_bus_device_attrs, - .pm = IBMEBUS_BUS_PM_OPS_PTR, }; EXPORT_SYMBOL(ibmebus_bus_type); -- cgit From d0563a1297e234ed37f6b51c2e9321accebd1839 Mon Sep 17 00:00:00 2001 From: Pan Xinhui Date: Wed, 27 Apr 2016 17:16:45 +0800 Subject: powerpc: Implement {cmp}xchg for u8 and u16 Implement xchg{u8,u16}{local,relaxed}, and cmpxchg{u8,u16}{,local,acquire,relaxed}. It works on all ppc. remove volatile of first parameter in __cmpxchg_local and __cmpxchg Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Pan Xinhui Acked-by: Boqun Feng Acked-by: Peter Zijlstra (Intel) Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cmpxchg.h | 109 ++++++++++++++++++++++++++++++++++++- 1 file changed, 106 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h index 44efe739b6b9..c12f110261b2 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -7,6 +7,71 @@ #include #include +#ifdef __BIG_ENDIAN +#define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE) +#else +#define BITOFF_CAL(size, off) (off * BITS_PER_BYTE) +#endif + +#define XCHG_GEN(type, sfx, cl) \ +static inline u32 __xchg_##type##sfx(void *p, u32 val) \ +{ \ + unsigned int prev, prev_mask, tmp, bitoff, off; \ + \ + off = (unsigned long)p % sizeof(u32); \ + bitoff = BITOFF_CAL(sizeof(type), off); \ + p -= off; \ + val <<= bitoff; \ + prev_mask = (u32)(type)-1 << bitoff; \ + \ + __asm__ __volatile__( \ +"1: lwarx %0,0,%3\n" \ +" andc %1,%0,%5\n" \ +" or %1,%1,%4\n" \ + PPC405_ERR77(0,%3) \ +" stwcx. %1,0,%3\n" \ +" bne- 1b\n" \ + : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \ + : "r" (p), "r" (val), "r" (prev_mask) \ + : "cc", cl); \ + \ + return prev >> bitoff; \ +} + +#define CMPXCHG_GEN(type, sfx, br, br2, cl) \ +static inline \ +u32 __cmpxchg_##type##sfx(void *p, u32 old, u32 new) \ +{ \ + unsigned int prev, prev_mask, tmp, bitoff, off; \ + \ + off = (unsigned long)p % sizeof(u32); \ + bitoff = BITOFF_CAL(sizeof(type), off); \ + p -= off; \ + old <<= bitoff; \ + new <<= bitoff; \ + prev_mask = (u32)(type)-1 << bitoff; \ + \ + __asm__ __volatile__( \ + br \ +"1: lwarx %0,0,%3\n" \ +" and %1,%0,%6\n" \ +" cmpw 0,%1,%4\n" \ +" bne- 2f\n" \ +" andc %1,%0,%6\n" \ +" or %1,%1,%5\n" \ + PPC405_ERR77(0,%3) \ +" stwcx. %1,0,%3\n" \ +" bne- 1b\n" \ + br2 \ + "\n" \ +"2:" \ + : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \ + : "r" (p), "r" (old), "r" (new), "r" (prev_mask) \ + : "cc", cl); \ + \ + return prev >> bitoff; \ +} + /* * Atomic exchange * @@ -14,6 +79,11 @@ * the previous value stored there. */ +XCHG_GEN(u8, _local, "memory"); +XCHG_GEN(u8, _relaxed, "cc"); +XCHG_GEN(u16, _local, "memory"); +XCHG_GEN(u16, _relaxed, "cc"); + static __always_inline unsigned long __xchg_u32_local(volatile void *p, unsigned long val) { @@ -85,9 +155,13 @@ __xchg_u64_relaxed(u64 *p, unsigned long val) #endif static __always_inline unsigned long -__xchg_local(volatile void *ptr, unsigned long x, unsigned int size) +__xchg_local(void *ptr, unsigned long x, unsigned int size) { switch (size) { + case 1: + return __xchg_u8_local(ptr, x); + case 2: + return __xchg_u16_local(ptr, x); case 4: return __xchg_u32_local(ptr, x); #ifdef CONFIG_PPC64 @@ -103,6 +177,10 @@ static __always_inline unsigned long __xchg_relaxed(void *ptr, unsigned long x, unsigned int size) { switch (size) { + case 1: + return __xchg_u8_relaxed(ptr, x); + case 2: + return __xchg_u16_relaxed(ptr, x); case 4: return __xchg_u32_relaxed(ptr, x); #ifdef CONFIG_PPC64 @@ -131,6 +209,15 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size) * and return the old value of *p. */ +CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory"); +CMPXCHG_GEN(u8, _local, , , "memory"); +CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory"); +CMPXCHG_GEN(u8, _relaxed, , , "cc"); +CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory"); +CMPXCHG_GEN(u16, _local, , , "memory"); +CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory"); +CMPXCHG_GEN(u16, _relaxed, , , "cc"); + static __always_inline unsigned long __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) { @@ -312,10 +399,14 @@ __cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new) #endif static __always_inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, +__cmpxchg(void *ptr, unsigned long old, unsigned long new, unsigned int size) { switch (size) { + case 1: + return __cmpxchg_u8(ptr, old, new); + case 2: + return __cmpxchg_u16(ptr, old, new); case 4: return __cmpxchg_u32(ptr, old, new); #ifdef CONFIG_PPC64 @@ -328,10 +419,14 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, } static __always_inline unsigned long -__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, +__cmpxchg_local(void *ptr, unsigned long old, unsigned long new, unsigned int size) { switch (size) { + case 1: + return __cmpxchg_u8_local(ptr, old, new); + case 2: + return __cmpxchg_u16_local(ptr, old, new); case 4: return __cmpxchg_u32_local(ptr, old, new); #ifdef CONFIG_PPC64 @@ -348,6 +443,10 @@ __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new, unsigned int size) { switch (size) { + case 1: + return __cmpxchg_u8_relaxed(ptr, old, new); + case 2: + return __cmpxchg_u16_relaxed(ptr, old, new); case 4: return __cmpxchg_u32_relaxed(ptr, old, new); #ifdef CONFIG_PPC64 @@ -364,6 +463,10 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new, unsigned int size) { switch (size) { + case 1: + return __cmpxchg_u8_acquire(ptr, old, new); + case 2: + return __cmpxchg_u16_acquire(ptr, old, new); case 4: return __cmpxchg_u32_acquire(ptr, old, new); #ifdef CONFIG_PPC64 -- cgit From 6533b7c16ee5712041b4e324100550e02a9a5dda Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 22 Nov 2016 11:49:30 +0100 Subject: powerpc: Initial stack protector (-fstack-protector) support Partialy copied from commit c743f38013aef ("ARM: initial stack protector (-fstack-protector) support") This is the very basic stuff without the changing canary upon task switch yet. Just the Kconfig option and a constant canary value initialized at boot time. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/stackprotector.h | 40 +++++++++++++++++++++++++++++++ arch/powerpc/kernel/Makefile | 4 ++++ arch/powerpc/kernel/process.c | 6 +++++ 4 files changed, 51 insertions(+) create mode 100644 arch/powerpc/include/asm/stackprotector.h diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 5ee3dbeab323..cfc0688a25e9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -163,6 +163,7 @@ config PPC select HAVE_VIRT_CPU_ACCOUNTING select HAVE_ARCH_HARDENED_USERCOPY select HAVE_KERNEL_GZIP + select HAVE_CC_STACKPROTECTOR config GENERIC_CSUM def_bool CPU_LITTLE_ENDIAN diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h new file mode 100644 index 000000000000..6720190eabec --- /dev/null +++ b/arch/powerpc/include/asm/stackprotector.h @@ -0,0 +1,40 @@ +/* + * GCC stack protector support. + * + * Stack protector works by putting predefined pattern at the start of + * the stack frame and verifying that it hasn't been overwritten when + * returning from the function. The pattern is called stack canary + * and gcc expects it to be defined by a global variable called + * "__stack_chk_guard" on PPC. This unfortunately means that on SMP + * we cannot have a different canary value per task. + */ + +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H + +#include +#include +#include + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= mftb(); + canary ^= LINUX_VERSION_CODE; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 26b5a5e02e69..7a9f7164cc2e 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -19,6 +19,10 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) +# -fstack-protector triggers protection checks in this code, +# but it is being used too early to link to meaningful stack_chk logic. +CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector) + ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 5b62e8c36210..9da9a42595ce 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -64,6 +64,12 @@ #include #include +#ifdef CONFIG_CC_STACKPROTECTOR +#include +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + /* Transactional Memory debug */ #ifdef TM_DEBUG_SW #define TM_DEBUG(x...) printk(KERN_INFO x) -- cgit From 902e06eb86cd62753974c249bd1dedae2825b430 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 22 Nov 2016 11:49:32 +0100 Subject: powerpc/32: Change the stack protector canary value per task Partially copied from commit df0698be14c66 ("ARM: stack protector: change the canary value per task") A new random value for the canary is stored in the task struct whenever a new task is forked. This is meant to allow for different canary values per task. On powerpc, GCC expects the canary value to be found in a global variable called __stack_chk_guard. So this variable has to be updated with the value stored in the task struct whenever a task switch occurs. Because the variable GCC expects is global, this cannot work on SMP unfortunately. So, on SMP, the same initial canary value is kept throughout, making this feature a bit less effective although it is still useful. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/asm-offsets.c | 3 +++ arch/powerpc/kernel/entry_32.S | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index caec7bf3b99a..5c860302efec 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -91,6 +91,9 @@ int main(void) DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); #endif +#ifdef CONFIG_CC_STACKPROTECTOR + DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); +#endif DEFINE(KSP, offsetof(struct thread_struct, ksp)); DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); #ifdef CONFIG_BOOKE diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 3841d749a430..5742dbdbee46 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -674,7 +674,11 @@ BEGIN_FTR_SECTION mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ END_FTR_SECTION_IFSET(CPU_FTR_SPE) #endif /* CONFIG_SPE */ - +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + lwz r0,TSK_STACK_CANARY(r2) + lis r4,__stack_chk_guard@ha + stw r0,__stack_chk_guard@l(r4) +#endif lwz r0,_CCR(r1) mtcrf 0xFF,r0 /* r3-r12 are destroyed -- Cort */ -- cgit From 3382a6220ff3bac886d9d90766f3fe18cf25b468 Mon Sep 17 00:00:00 2001 From: Andrew Donnellan Date: Tue, 22 Nov 2016 21:13:27 +1100 Subject: cxl: Fix coccinelle warnings Fix the following coccinelle warnings: drivers/misc/cxl/debugfs.c:46:0-23: WARNING: fops_io_x64 should be defined with DEFINE_DEBUGFS_ATTRIBUTE drivers/misc/cxl/guest.c:890:5-26: WARNING: Comparison to bool drivers/misc/cxl/irq.c:107:3-23: WARNING: Assignment of bool to 0/1 drivers/misc/cxl/native.c:57:2-3: Unneeded semicolon drivers/misc/cxl/native.c:170:2-3: Unneeded semicolon Signed-off-by: Andrew Donnellan Acked-by: Frederic Barrat Reviewed-by: Matthew R. Ochs Acked-by: Ian Munsie Signed-off-by: Michael Ellerman --- drivers/misc/cxl/debugfs.c | 6 ++++-- drivers/misc/cxl/guest.c | 2 +- drivers/misc/cxl/irq.c | 2 +- drivers/misc/cxl/native.c | 4 ++-- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c index ec7b8a017439..9c06ac8fa5ac 100644 --- a/drivers/misc/cxl/debugfs.c +++ b/drivers/misc/cxl/debugfs.c @@ -43,12 +43,14 @@ static int debugfs_io_u64_set(void *data, u64 val) out_be64((u64 __iomem *)data, val); return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set, "0x%016llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set, + "0x%016llx\n"); static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode, struct dentry *parent, u64 __iomem *value) { - return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64); + return debugfs_create_file_unsafe(name, mode, parent, + (void __force *)value, &fops_io_x64); } void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir) diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index 3e102cd6ed91..e04bc4ddfd74 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c @@ -887,7 +887,7 @@ static void afu_handle_errstate(struct work_struct *work) afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE) return; - if (afu_guest->handle_err == true) + if (afu_guest->handle_err) schedule_delayed_work(&afu_guest->work_err, msecs_to_jiffies(3000)); } diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index dec60f58a767..1a402bbed687 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c @@ -104,7 +104,7 @@ irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_i } else { spin_lock(&ctx->lock); ctx->afu_err = irq_info->afu_err; - ctx->pending_afu_err = 1; + ctx->pending_afu_err = true; spin_unlock(&ctx->lock); wake_up_all(&ctx->wq); diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index c336350ede94..c1216d31796c 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -54,7 +54,7 @@ static int afu_control(struct cxl_afu *afu, u64 command, u64 clear, AFU_Cntl | command); cpu_relax(); AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); - }; + } if (AFU_Cntl & CXL_AFU_Cntl_An_RA) { /* @@ -167,7 +167,7 @@ int cxl_psl_purge(struct cxl_afu *afu) cpu_relax(); } PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); - }; + } end = local_clock(); pr_devel("PSL purged in %lld ns\n", end - start); -- cgit From 18058822768f4665e2132ddd1cb413a9033edf09 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 16 Nov 2016 16:40:30 +0200 Subject: soc/qman: Handle endianness of h/w descriptors The hardware descriptors have big endian (BE) format. Provide proper endianness handling for the remaining descriptor fields, to ensure they are correctly accessed by non-BE CPUs too. Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman.c | 65 ++++++++++++++++++--------------- drivers/soc/fsl/qbman/qman_priv.h | 10 ++--- drivers/soc/fsl/qbman/qman_test_api.c | 4 +- drivers/soc/fsl/qbman/qman_test_stash.c | 5 ++- include/soc/fsl/qman.h | 48 ++++++++++++------------ 5 files changed, 70 insertions(+), 62 deletions(-) diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index acee99da71a7..6f509f68085e 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -140,10 +140,10 @@ enum qm_mr_cmode { /* matches QCSP_CFG::MM */ struct qm_eqcr_entry { u8 _ncw_verb; /* writes to this are non-coherent */ u8 dca; - u16 seqnum; + __be16 seqnum; u8 __reserved[4]; - u32 fqid; /* 24-bit */ - u32 tag; + __be32 fqid; /* 24-bit */ + __be32 tag; struct qm_fd fd; u8 __reserved3[32]; } __packed; @@ -187,7 +187,7 @@ struct qm_mr { struct qm_mcc_fq { u8 _ncw_verb; u8 __reserved1[3]; - u32 fqid; /* 24-bit */ + __be32 fqid; /* 24-bit */ u8 __reserved2[56]; } __packed; @@ -470,7 +470,7 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) { DPAA_ASSERT(eqcr->busy); - DPAA_ASSERT(!(eqcr->cursor->fqid & ~QM_FQID_MASK)); + DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK)); DPAA_ASSERT(eqcr->available >= 1); } @@ -1395,7 +1395,7 @@ static void qm_mr_process_task(struct work_struct *work) break; case QM_MR_VERB_FQPN: /* Parked */ - fq = tag_to_fq(msg->fq.context_b); + fq = tag_to_fq(be32_to_cpu(msg->fq.context_b)); fq_state_change(p, fq, msg, verb); if (fq->cb.fqs) fq->cb.fqs(p, fq, msg); @@ -1409,7 +1409,7 @@ static void qm_mr_process_task(struct work_struct *work) } } else { /* Its a software ERN */ - fq = tag_to_fq(msg->ern.tag); + fq = tag_to_fq(be32_to_cpu(msg->ern.tag)); fq->cb.ern(p, fq, msg); } num++; @@ -1521,7 +1521,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p, clear_vdqcr(p, fq); } else { /* SDQCR: context_b points to the FQ */ - fq = tag_to_fq(dq->context_b); + fq = tag_to_fq(be32_to_cpu(dq->context_b)); /* Now let the callback do its stuff */ res = fq->cb.dqrr(p, fq, dq); /* @@ -1738,9 +1738,9 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) return -EINVAL; #endif - if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { + if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) { /* And can't be set at the same time as TDTHRESH */ - if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) + if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH) return -EINVAL; } /* Issue an INITFQ_[PARKED|SCHED] management command */ @@ -1764,14 +1764,16 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { dma_addr_t phys_fq; - mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; - mcc->initfq.fqd.context_b = fq_to_tag(fq); + mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB); + mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq)); /* * and the physical address - NB, if the user wasn't trying to * set CONTEXTA, clear the stashing settings. */ - if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { - mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; + if (!(be16_to_cpu(mcc->initfq.we_mask) & + QM_INITFQ_WE_CONTEXTA)) { + mcc->initfq.we_mask |= + cpu_to_be16(QM_INITFQ_WE_CONTEXTA); memset(&mcc->initfq.fqd.context_a, 0, sizeof(mcc->initfq.fqd.context_a)); } else { @@ -1791,8 +1793,10 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) if (flags & QMAN_INITFQ_FLAG_LOCAL) { int wq = 0; - if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { - mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; + if (!(be16_to_cpu(mcc->initfq.we_mask) & + QM_INITFQ_WE_DESTWQ)) { + mcc->initfq.we_mask |= + cpu_to_be16(QM_INITFQ_WE_DESTWQ); wq = 4; } qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); @@ -1811,13 +1815,13 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) goto out; } if (opts) { - if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { - if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) + if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) { + if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE) fq_set(fq, QMAN_FQ_STATE_CGR_EN); else fq_clear(fq, QMAN_FQ_STATE_CGR_EN); } - if (opts->we_mask & QM_INITFQ_WE_CGID) + if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID) fq->cgr_groupid = opts->fqd.cgid; } fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? @@ -1937,7 +1941,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) msg.verb = QM_MR_VERB_FQRNI; msg.fq.fqs = mcr->alterfq.fqs; qm_fqid_set(&msg.fq, fq->fqid); - msg.fq.context_b = fq_to_tag(fq); + msg.fq.context_b = cpu_to_be32(fq_to_tag(fq)); fq->cb.fqs(p, fq, &msg); } } else if (res == QM_MCR_RESULT_PENDING) { @@ -2206,7 +2210,7 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) goto out; qm_fqid_set(eq, fq->fqid); - eq->tag = fq_to_tag(fq); + eq->tag = cpu_to_be32(fq_to_tag(fq)); eq->fd = *fd; qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); @@ -2253,17 +2257,18 @@ out: static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val) { if (qman_ip_rev >= QMAN_REV30) - cgr->cscn_targ_upd_ctrl = QM_CGR_TARG_UDP_CTRL_WRITE_BIT | pi; + cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi | + QM_CGR_TARG_UDP_CTRL_WRITE_BIT); else - cgr->cscn_targ = val | QM_CGR_TARG_PORTAL(pi); + cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi)); } static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val) { if (qman_ip_rev >= QMAN_REV30) - cgr->cscn_targ_upd_ctrl = pi; + cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi); else - cgr->cscn_targ = val & ~QM_CGR_TARG_PORTAL(pi); + cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi)); } static u8 qman_cgr_cpus[CGR_NUM]; @@ -2315,8 +2320,8 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, goto out; qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p), - cgr_state.cgr.cscn_targ); - local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; + be32_to_cpu(cgr_state.cgr.cscn_targ)); + local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG); /* send init if flags indicate so */ if (flags & QMAN_CGR_FLAG_USE_INIT) @@ -2383,9 +2388,9 @@ int qman_delete_cgr(struct qman_cgr *cgr) goto release_lock; } - local_opts.we_mask = QM_CGR_WE_CSCN_TARG; + local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG); qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p), - cgr_state.cgr.cscn_targ); + be32_to_cpu(cgr_state.cgr.cscn_targ)); ret = qm_modify_cgr(cgr, 0, &local_opts); if (ret) @@ -2835,7 +2840,7 @@ static int cgr_cleanup(u32 cgrid) err = qman_query_fq(&fq, &fqd); if (WARN_ON(err)) return err; - if ((fqd.fq_ctrl & QM_FQCTRL_CGE) && + if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE && fqd.cgid == cgrid) { pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", cgrid, fq.fqid); diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h index 5606d74c21d4..53685b59718e 100644 --- a/drivers/soc/fsl/qbman/qman_priv.h +++ b/drivers/soc/fsl/qbman/qman_priv.h @@ -73,20 +73,20 @@ struct qm_mcr_querycgr { struct __qm_mc_cgr cgr; /* CGR fields */ u8 __reserved2[6]; u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */ - u32 i_bcnt_lo; /* low 32-bits of 40-bit */ + __be32 i_bcnt_lo; /* low 32-bits of 40-bit */ u8 __reserved3[3]; u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */ - u32 a_bcnt_lo; /* low 32-bits of 40-bit */ - u32 cscn_targ_swp[4]; + __be32 a_bcnt_lo; /* low 32-bits of 40-bit */ + __be32 cscn_targ_swp[4]; } __packed; static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) { - return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo; + return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo); } static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) { - return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo; + return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo); } /* "Query FQ Non-Programmable Fields" */ diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c index dba6a80622ed..2895d062cf51 100644 --- a/drivers/soc/fsl/qbman/qman_test_api.c +++ b/drivers/soc/fsl/qbman/qman_test_api.c @@ -65,7 +65,7 @@ static void fd_init(struct qm_fd *fd) { qm_fd_addr_set64(fd, 0xabdeadbeefLLU); qm_fd_set_contig_big(fd, 0x0000ffff); - fd->cmd = 0xfeedf00d; + fd->cmd = cpu_to_be32(0xfeedf00d); } static void fd_inc(struct qm_fd *fd) @@ -86,7 +86,7 @@ static void fd_inc(struct qm_fd *fd) len--; qm_fd_set_param(fd, fmt, off, len); - fd->cmd++; + fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1); } /* The only part of the 'fd' we can't memcmp() is the ppid */ diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c index f8d25faf4d8a..b9795f39bcc8 100644 --- a/drivers/soc/fsl/qbman/qman_test_stash.c +++ b/drivers/soc/fsl/qbman/qman_test_stash.c @@ -406,8 +406,9 @@ static int init_handler(void *h) goto failed; } memset(&opts, 0, sizeof(opts)); - opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; - opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; + opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | + QM_INITFQ_WE_CONTEXTA); + opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING); qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL); err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | QMAN_INITFQ_FLAG_LOCAL, &opts); diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h index d01d5a358945..3d4df74a96de 100644 --- a/include/soc/fsl/qman.h +++ b/include/soc/fsl/qman.h @@ -244,11 +244,11 @@ static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg) struct qm_dqrr_entry { u8 verb; u8 stat; - u16 seqnum; /* 15-bit */ + __be16 seqnum; /* 15-bit */ u8 tok; u8 __reserved2[3]; - u32 fqid; /* 24-bit */ - u32 context_b; + __be32 fqid; /* 24-bit */ + __be32 context_b; struct qm_fd fd; u8 __reserved4[32]; } __packed; @@ -264,8 +264,8 @@ struct qm_dqrr_entry { /* 'fqid' is a 24-bit field in every h/w descriptor */ #define QM_FQID_MASK GENMASK(23, 0) -#define qm_fqid_set(p, v) ((p)->fqid = ((v) & QM_FQID_MASK)) -#define qm_fqid_get(p) ((p)->fqid & QM_FQID_MASK) +#define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK)) +#define qm_fqid_get(p) (be32_to_cpu((p)->fqid) & QM_FQID_MASK) /* "ERN Message Response" */ /* "FQ State Change Notification" */ @@ -277,11 +277,11 @@ union qm_mr_entry { struct { u8 verb; u8 dca; - u16 seqnum; + __be16 seqnum; u8 rc; /* Rej Code: 8-bit */ u8 __reserved[3]; - u32 fqid; /* 24-bit */ - u32 tag; + __be32 fqid; /* 24-bit */ + __be32 tag; struct qm_fd fd; u8 __reserved1[32]; } __packed ern; @@ -289,8 +289,8 @@ union qm_mr_entry { u8 verb; u8 fqs; /* Frame Queue Status */ u8 __reserved1[6]; - u32 fqid; /* 24-bit */ - u32 context_b; + __be32 fqid; /* 24-bit */ + __be32 context_b; u8 __reserved2[48]; } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ }; @@ -409,8 +409,8 @@ static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd) static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr) { - fqd->context_a.context_hi = upper_32_bits(addr); - fqd->context_a.context_lo = lower_32_bits(addr); + fqd->context_a.context_hi = cpu_to_be16(upper_32_bits(addr)); + fqd->context_a.context_lo = cpu_to_be32(lower_32_bits(addr)); } static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr) @@ -525,7 +525,7 @@ static inline int qm_fqd_get_wq(const struct qm_fqd *fqd) */ struct qm_cgr_wr_parm { /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */ - u32 word; + __be32 word; }; /* * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding @@ -536,7 +536,7 @@ struct qm_cgr_wr_parm { */ struct qm_cgr_cs_thres { /* _res[13-15], TA[5-12], Tn[0-4] */ - u16 word; + __be16 word; }; /* * This identical structure of CGR fields is present in the "Init/Modify CGR" @@ -553,10 +553,10 @@ struct __qm_mc_cgr { u8 cscn_en; /* boolean, use QM_CGR_EN */ union { struct { - u16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */ - u16 cscn_targ_dcp_low; + __be16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */ + __be16 cscn_targ_dcp_low; }; - u32 cscn_targ; /* use QM_CGR_TARG_* */ + __be32 cscn_targ; /* use QM_CGR_TARG_* */ }; u8 cstd_en; /* boolean, use QM_CGR_EN */ u8 cs; /* boolean, only used in query response */ @@ -572,7 +572,9 @@ struct __qm_mc_cgr { /* Convert CGR thresholds to/from "cs_thres" format */ static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th) { - return ((th->word >> 5) & 0xff) << (th->word & 0x1f); + int thres = be16_to_cpu(th->word); + + return ((thres >> 5) & 0xff) << (thres & 0x1f); } static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, @@ -588,23 +590,23 @@ static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, if (roundup && oddbit) val++; } - th->word = ((val & 0xff) << 5) | (e & 0x1f); + th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f)); return 0; } /* "Initialize FQ" */ struct qm_mcc_initfq { u8 __reserved1[2]; - u16 we_mask; /* Write Enable Mask */ - u32 fqid; /* 24-bit */ - u16 count; /* Initialises 'count+1' FQDs */ + __be16 we_mask; /* Write Enable Mask */ + __be32 fqid; /* 24-bit */ + __be16 count; /* Initialises 'count+1' FQDs */ struct qm_fqd fqd; /* the FQD fields go here */ u8 __reserved2[30]; } __packed; /* "Initialize/Modify CGR" */ struct qm_mcc_initcgr { u8 __reserve1[2]; - u16 we_mask; /* Write Enable Mask */ + __be16 we_mask; /* Write Enable Mask */ struct __qm_mc_cgr cgr; /* CGR fields */ u8 __reserved2[2]; u8 cgid; -- cgit From da58b23cb976ab83a80d358102e139afe94f0c56 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 24 Nov 2016 17:08:11 +1100 Subject: powerpc: Fix __cmpxchg() to take a volatile ptr again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit d0563a1297e2 ("powerpc: Implement {cmp}xchg for u8 and u16") we removed the volatile from __cmpxchg(). This is leading to warnings such as: drivers/gpu/drm/drm_lock.c: In function ‘drm_lock_take’: arch/powerpc/include/asm/cmpxchg.h:484:37: warning: passing argument 1 of ‘__cmpxchg’ discards ‘volatile’ qualifier from pointer target (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ There doesn't seem to be consensus across architectures whether the argument is volatile or not, so at least for now put the volatile back. Fixes: d0563a1297e2 ("powerpc: Implement {cmp}xchg for u8 and u16") Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cmpxchg.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h index c12f110261b2..fc46b664c49e 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -14,7 +14,7 @@ #endif #define XCHG_GEN(type, sfx, cl) \ -static inline u32 __xchg_##type##sfx(void *p, u32 val) \ +static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \ { \ unsigned int prev, prev_mask, tmp, bitoff, off; \ \ @@ -40,7 +40,7 @@ static inline u32 __xchg_##type##sfx(void *p, u32 val) \ #define CMPXCHG_GEN(type, sfx, br, br2, cl) \ static inline \ -u32 __cmpxchg_##type##sfx(void *p, u32 old, u32 new) \ +u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \ { \ unsigned int prev, prev_mask, tmp, bitoff, off; \ \ @@ -399,7 +399,7 @@ __cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new) #endif static __always_inline unsigned long -__cmpxchg(void *ptr, unsigned long old, unsigned long new, +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { switch (size) { -- cgit From 7184bc2ddb15d9539c701668d6a5be1574efa6a5 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Wed, 23 Nov 2016 23:27:38 +0800 Subject: cxl: drop duplicate header sched.h Drop duplicate header sched.h from native.c. Signed-off-by: Geliang Tang Reviewed-by: Andrew Donnellan Signed-off-by: Michael Ellerman --- drivers/misc/cxl/native.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index c1216d31796c..09505f432eda 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include -- cgit From ebb242d56bbe14af6ea25cf5e45440df4c26b4b1 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Wed, 23 Nov 2016 22:58:56 +0800 Subject: powerpc/of_platform: Use builtin_platform_driver Use builtin_platform_driver() helper to simplify the code. Signed-off-by: Geliang Tang Acked-by: Russell Currey Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/of_platform.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index b60a67d92ebd..34aeac54f120 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c @@ -114,11 +114,6 @@ static struct platform_driver of_pci_phb_driver = { }, }; -static __init int of_pci_phb_init(void) -{ - return platform_driver_register(&of_pci_phb_driver); -} - -device_initcall(of_pci_phb_init); +builtin_platform_driver(of_pci_phb_driver); #endif /* CONFIG_PPC_OF_PLATFORM_PCI */ -- cgit From 56144ec7c93f6f18aa878560074633ac3ad16896 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 6 Nov 2015 13:21:17 +1100 Subject: powerpc/xmon: Add 'dt' command to dump trace buffers There is a nice interface for asking ftrace to dump all its tracing buffers. The only down side for use in xmon is that it uses printk. Depending on circumstances printk may not work when in xmon, but it also may, so add a 'dt' command which dumps the ftrace buffers, and add a note to the help to mentiont that it uses printk. Calling this routine also disables tracing, which is problematic if you return from xmon and expect the system to keep operating normally. So after we do the dump turn tracing back on. Both functions already have nop versions defined for when ftrace is not enabled, so we don't need any extra #ifdefs. Signed-off-by: Michael Ellerman --- arch/powerpc/xmon/xmon.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 760545519a0b..9c0e17cf6886 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -10,6 +10,8 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ + +#include #include #include #include @@ -225,6 +227,7 @@ Commands:\n\ #endif "\ dr dump stream of raw bytes\n\ + dt dump the tracing buffers (uses printk)\n\ e print exception information\n\ f flush cache\n\ la lookup symbol+offset of specified address\n\ @@ -2364,6 +2367,9 @@ dump(void) dump_log_buf(); } else if (c == 'o') { dump_opal_msglog(); + } else if (c == 't') { + ftrace_dump(DUMP_ALL); + tracing_on(); } else if (c == 'r') { scanhex(&ndump); if (ndump == 0) -- cgit From 378f96d3cd442d5cb8e2692d8767a4c036070594 Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Tue, 22 Nov 2016 23:36:40 +0530 Subject: powernv: Clear SPRN_PSSCR when a POWER9 CPU comes online Ensure that PSSCR is set to a safe value corresponding to no state-loss each time a POWER9 CPU comes online. Signed-off-by: Gautham R. Shenoy Acked-By: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/cpu_setup_power.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index bdfc1c67eb38..fe35ef2efc28 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -96,6 +96,7 @@ _GLOBAL(__setup_cpu_power9) mtlr r11 beqlr li r0,0 + mtspr SPRN_PSSCR,r0 mtspr SPRN_LPID,r0 mfspr r3,SPRN_LPCR ori r3, r3, LPCR_PECEDH @@ -116,6 +117,7 @@ _GLOBAL(__restore_cpu_power9) mtlr r11 beqlr li r0,0 + mtspr SPRN_PSSCR,r0 mtspr SPRN_LPID,r0 mfspr r3,SPRN_LPCR ori r3, r3, LPCR_PECEDH -- cgit From ee97b6b99f42285d29d439f2e5376e03b2760e09 Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Tue, 15 Nov 2016 17:56:14 +1100 Subject: powerpc/mm/radix: Setup AMOR in HV mode to allow key 0 Setup AMOR (Authority Mask Override Register) in HV mode so that the host and guest kernel can in turn setup IAMR. This allows us to enable key 0 in a following patch. Reported-by: Aneesh Kumar K.V Signed-off-by: Balbir Singh Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable-radix.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 8d00ab4222a7..29e502953688 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -312,6 +312,18 @@ static void update_hid_for_radix(void) cpu_relax(); } +static void radix_init_amor(void) +{ + /* + * In HV mode, we init AMOR (Authority Mask Override Register) so that + * the hypervisor and guest can setup IAMR (Instruction Authority Mask + * Register), enable key 0 and set it to 1. + * + * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11) + */ + mtspr(SPRN_AMOR, (3ul << 62)); +} + void __init radix__early_init_mmu(void) { unsigned long lpcr; @@ -368,6 +380,7 @@ void __init radix__early_init_mmu(void) lpcr = mfspr(SPRN_LPCR); mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); radix_init_partition_table(); + radix_init_amor(); } memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); @@ -387,6 +400,7 @@ void radix__early_init_mmu_secondary(void) mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); + radix_init_amor(); } } -- cgit From 1d18ad026844b60d933c25ae38360f86a8cf41eb Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Tue, 15 Nov 2016 17:56:15 +1100 Subject: powerpc/mm: Detect instruction fetch denied and report ISA 3 allows for prevention of instruction fetch and execution of user mode pages. If such an error occurs, SRR1 bit 35 reports the error. We catch and report the error in do_page_fault(). Signed-off-by: Balbir Singh Signed-off-by: Michael Ellerman --- arch/powerpc/mm/fault.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 73932f4a386e..a17029aaf939 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -390,6 +390,12 @@ good_area: #endif /* CONFIG_8xx */ if (is_exec) { + /* + * An execution fault + no execute ? + */ + if (regs->msr & SRR1_ISI_N_OR_G) + goto bad_area; + /* * Allow execution from readable areas if the MMU does not * provide separate controls over reading and executing. @@ -404,6 +410,7 @@ good_area: (cpu_has_feature(CPU_FTR_NOEXECUTE) || !(vma->vm_flags & (VM_READ | VM_WRITE)))) goto bad_area; + #ifdef CONFIG_PPC_STD_MMU /* * protfault should only happen due to us -- cgit From 3b10d0095a1e2d6ce11f6537e04806ff8aba9cdd Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Tue, 15 Nov 2016 17:56:16 +1100 Subject: powerpc/mm/radix: Prevent kernel execution of user space ISA 3 defines new encoded access authority that allows instruction access prevention in privileged mode and allows normal access to problem state. This patch just enables IAMR (Instruction Authority Mask Register), enabling AMR would require more work. I've tested this with a buggy driver and a simple payload. The payload is specific to the build I've tested. mpe: Also tested with LKDTM: # echo EXEC_USERSPACE > /sys/kernel/debug/provoke-crash/DIRECT lkdtm: Performing direct entry EXEC_USERSPACE lkdtm: attempting ok execution at c0000000005bf560 lkdtm: attempting bad execution at 00003fff8d940000 Unable to handle kernel paging request for instruction fetch Faulting instruction address: 0x3fff8d940000 Oops: Kernel access of bad area, sig: 11 [#1] NIP: 00003fff8d940000 LR: c0000000005bfa58 CTR: 00003fff8d940000 REGS: c0000000f1fcf900 TRAP: 0400 Not tainted (4.9.0-rc5-compiler_gcc-6.2.0-00109-g956dbc06232a) MSR: 9000000010009033 CR: 48002222 XER: 00000000 ... Call Trace: lkdtm_EXEC_USERSPACE+0x104/0x120 (unreliable) lkdtm_do_action+0x3c/0x80 direct_entry+0x100/0x1b0 full_proxy_write+0x94/0x100 __vfs_write+0x3c/0x1b0 vfs_write+0xcc/0x230 SyS_write+0x60/0x110 system_call+0x38/0xfc Signed-off-by: Balbir Singh Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable-radix.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 29e502953688..623a0dc9a9fa 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -324,6 +324,26 @@ static void radix_init_amor(void) mtspr(SPRN_AMOR, (3ul << 62)); } +static void radix_init_iamr(void) +{ + unsigned long iamr; + + /* + * The IAMR should set to 0 on DD1. + */ + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) + iamr = 0; + else + iamr = (1ul << 62); + + /* + * Radix always uses key0 of the IAMR to determine if an access is + * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction + * fetch. + */ + mtspr(SPRN_IAMR, iamr); +} + void __init radix__early_init_mmu(void) { unsigned long lpcr; @@ -385,6 +405,7 @@ void __init radix__early_init_mmu(void) memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); + radix_init_iamr(); radix_init_pgtable(); } @@ -402,6 +423,7 @@ void radix__early_init_mmu_secondary(void) __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); radix_init_amor(); } + radix_init_iamr(); } void radix__mmu_cleanup_all(void) -- cgit From f87f253bac3ce4a4eb2a60a1ae604d74e65f9042 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 24 Nov 2016 00:02:07 +1100 Subject: powerpc/64e: Convert cmpi to cmpwi in head_64.S From 80f23935cadb ("powerpc: Convert cmp to cmpd in idle enter sequence"): PowerPC's "cmp" instruction has four operands. Normally people write "cmpw" or "cmpd" for the second cmp operand 0 or 1. But, frequently people forget, and write "cmp" with just three operands. With older binutils this is silently accepted as if this was "cmpw", while often "cmpd" is wanted. With newer binutils GAS will complain about this for 64-bit code. For 32-bit code it still silently assumes "cmpw" is what is meant. In this case, cmpwi is called for, so this is just a build fix for new toolchains. Cc: stable@vger.kernel.org # v3.0+ Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/head_64.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 451a8e1cf57b..bdb4612491a9 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -221,9 +221,9 @@ booting_thread_hwid: */ _GLOBAL(book3e_start_thread) LOAD_REG_IMMEDIATE(r5, MSR_KERNEL) - cmpi 0, r3, 0 + cmpwi r3, 0 beq 10f - cmpi 0, r3, 1 + cmpwi r3, 1 beq 11f /* If the thread id is invalid, just exit. */ b 13f @@ -248,9 +248,9 @@ _GLOBAL(book3e_start_thread) * r3 = the thread physical id */ _GLOBAL(book3e_stop_thread) - cmpi 0, r3, 0 + cmpwi r3, 0 beq 10f - cmpi 0, r3, 1 + cmpwi r3, 1 beq 10f /* If the thread id is invalid, just exit. */ b 13f -- cgit From ae88f7b9af17a1267f5dd5b87a44878e676fa570 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 24 Nov 2016 00:02:09 +1100 Subject: powerpc/64e: Don't branch to dot symbols This converts one that was missed by b1576fec7f4d ("powerpc: No need to use dot symbols when branching to a function"). Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/exceptions-64e.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 38a1f96430e1..45b453e4d0c8 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -923,10 +923,10 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x340) addi r3,r1,STACK_FRAME_OVERHEAD - bl .save_nvgprs + bl save_nvgprs INTS_RESTORE_HARD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except /* * An interrupt came in while soft-disabled; We mark paca->irq_happened -- cgit From bee8b3b56d1dfc4075254a61340ee3898a732e63 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 28 Nov 2016 11:46:58 +0530 Subject: powerpc/mm: Rename hugetlb-radix.h to hugetlb.h We will start moving some book3s specific hugetlb functions there. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/hugetlb-radix.h | 29 ---------------------- arch/powerpc/include/asm/book3s/64/hugetlb.h | 29 ++++++++++++++++++++++ arch/powerpc/include/asm/hugetlb.h | 2 +- 3 files changed, 30 insertions(+), 30 deletions(-) delete mode 100644 arch/powerpc/include/asm/book3s/64/hugetlb-radix.h create mode 100644 arch/powerpc/include/asm/book3s/64/hugetlb.h diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h b/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h deleted file mode 100644 index c45189aa7476..000000000000 --- a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_RADIX_H -#define _ASM_POWERPC_BOOK3S_64_HUGETLB_RADIX_H -/* - * For radix we want generic code to handle hugetlb. But then if we want - * both hash and radix to be enabled together we need to workaround the - * limitations. - */ -void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern unsigned long -radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags); - -static inline int hstate_get_psize(struct hstate *hstate) -{ - unsigned long shift; - - shift = huge_page_shift(hstate); - if (shift == mmu_psize_defs[MMU_PAGE_2M].shift) - return MMU_PAGE_2M; - else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift) - return MMU_PAGE_1G; - else { - WARN(1, "Wrong huge page shift\n"); - return mmu_virtual_psize; - } -} -#endif diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h new file mode 100644 index 000000000000..499268045306 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -0,0 +1,29 @@ +#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H +#define _ASM_POWERPC_BOOK3S_64_HUGETLB_H +/* + * For radix we want generic code to handle hugetlb. But then if we want + * both hash and radix to be enabled together we need to workaround the + * limitations. + */ +void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +extern unsigned long +radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); + +static inline int hstate_get_psize(struct hstate *hstate) +{ + unsigned long shift; + + shift = huge_page_shift(hstate); + if (shift == mmu_psize_defs[MMU_PAGE_2M].shift) + return MMU_PAGE_2M; + else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift) + return MMU_PAGE_1G; + else { + WARN(1, "Wrong huge page shift\n"); + return mmu_virtual_psize; + } +} +#endif diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index c5517f463ec7..c03e0a3dd4d8 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -9,7 +9,7 @@ extern struct kmem_cache *hugepte_cache; #ifdef CONFIG_PPC_BOOK3S_64 -#include +#include /* * This should work for other subarchs too. But right now we use the * new format only for 64bit book3s -- cgit From ccf17c8b5c8465750a6c514be9ef6f5b156b6c67 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 28 Nov 2016 11:46:59 +0530 Subject: powerpc/mm/hugetlb: Handle hugepage size supported by hash config W.r.t hash page table config, we support 16MB and 16GB as the hugepage size. Update the hstate_get_psize to handle 16M and 16G. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/hugetlb.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index 499268045306..d9c283f95e05 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -21,6 +21,10 @@ static inline int hstate_get_psize(struct hstate *hstate) return MMU_PAGE_2M; else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift) return MMU_PAGE_1G; + else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift) + return MMU_PAGE_16M; + else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift) + return MMU_PAGE_16G; else { WARN(1, "Wrong huge page shift\n"); return mmu_virtual_psize; -- cgit From 049d567af209b093eefa9f26eae6e15226db3520 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 28 Nov 2016 11:47:00 +0530 Subject: powerpc/mm: Introduce _PAGE_LARGE software pte bits This patch adds a new software defined pte bit. We use the reserved fields of ISA 3.0 pte definition since we will only be using this on DD1 code paths. We can possibly look at removing this code later. The software bit will be used to differentiate between 64K/4K and 2M ptes. This helps in finding the page size mapping by a pte so that we can do efficient tlb flush. We don't support 1G hugetlb pages yet. So we add a DEBUG WARN_ON to catch wrong usage. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/hugetlb.h | 20 ++++++++++++++++++++ arch/powerpc/include/asm/book3s/64/pgtable.h | 10 ++++++++++ arch/powerpc/include/asm/book3s/64/radix.h | 2 ++ 3 files changed, 32 insertions(+) diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index d9c283f95e05..c62f14d0bec1 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -30,4 +30,24 @@ static inline int hstate_get_psize(struct hstate *hstate) return mmu_virtual_psize; } } + +#define arch_make_huge_pte arch_make_huge_pte +static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, + struct page *page, int writable) +{ + unsigned long page_shift; + + if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) + return entry; + + page_shift = huge_page_shift(hstate_vma(vma)); + /* + * We don't support 1G hugetlb pages yet. + */ + VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift); + if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift) + return __pte(pte_val(entry) | _PAGE_LARGE); + else + return entry; +} #endif diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 9fd77f8794a0..ac66d05a7c01 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -26,6 +26,11 @@ #define _RPAGE_SW1 0x00800 #define _RPAGE_SW2 0x00400 #define _RPAGE_SW3 0x00200 +#define _RPAGE_RSV1 0x1000000000000000UL +#define _RPAGE_RSV2 0x0800000000000000UL +#define _RPAGE_RSV3 0x0400000000000000UL +#define _RPAGE_RSV4 0x0200000000000000UL + #ifdef CONFIG_MEM_SOFT_DIRTY #define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */ #else @@ -33,6 +38,11 @@ #endif #define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */ +/* + * For P9 DD1 only, we need to track whether the pte's huge. + */ +#define _PAGE_LARGE _RPAGE_RSV1 + #define _PAGE_PTE (1ul << 62) /* distinguishes PTEs from pointers */ #define _PAGE_PRESENT (1ul << 63) /* pte contains a translation */ diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 2a46dea8e1b1..d2c5c064e266 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -243,6 +243,8 @@ static inline int radix__pmd_trans_huge(pmd_t pmd) static inline pmd_t radix__pmd_mkhuge(pmd_t pmd) { + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) + return __pmd(pmd_val(pmd) | _PAGE_PTE | _PAGE_LARGE); return __pmd(pmd_val(pmd) | _PAGE_PTE); } static inline void radix__pmdp_huge_split_prepare(struct vm_area_struct *vma, -- cgit From 6d3a0379ebdc8e35662343f5359ac4589b79aec2 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 28 Nov 2016 11:47:01 +0530 Subject: powerpc/mm: Add radix__tlb_flush_pte_p9_dd1() Now that we have page size details encoded in pte using software pte bits, use that to find the page size needed for tlb flush. This function should only be used on P9 DD1, so give it a horrible name to make that clear. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/tlbflush-radix.h | 2 ++ arch/powerpc/mm/tlb-radix.c | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index a9e19cb2f7c5..cc7fbde4f53c 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -42,4 +42,6 @@ extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, unsigned long page_size); extern void radix__flush_tlb_lpid(unsigned long lpid); extern void radix__flush_tlb_all(void); +extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, + unsigned long address); #endif diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index bda8c43be78a..2822a8277f0b 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -424,3 +424,21 @@ void radix__flush_tlb_all(void) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory"); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } + +void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, + unsigned long address) +{ + /* + * We track page size in pte only for DD1, So we can + * call this only on DD1. + */ + if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) { + VM_WARN_ON(1); + return; + } + + if (old_pte & _PAGE_LARGE) + radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M); + else + radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize); +} -- cgit From b3603e174fc81598e8b060d5e9aafe19ee6e65cf Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 28 Nov 2016 11:47:02 +0530 Subject: powerpc/mm: update radix__ptep_set_access_flag to not do full mm tlb flush When we are updating a pte, we just need to flush the tlb mapping that pte. Right now we do a full mm flush because we don't track the page size. Now that we have page size details in pte use that to do the optimized flush Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/32/pgtable.h | 3 ++- arch/powerpc/include/asm/book3s/64/pgtable.h | 5 +++-- arch/powerpc/include/asm/book3s/64/radix.h | 11 +++-------- arch/powerpc/include/asm/nohash/32/pgtable.h | 3 ++- arch/powerpc/include/asm/nohash/64/pgtable.h | 3 ++- arch/powerpc/mm/pgtable-book3s64.c | 3 ++- arch/powerpc/mm/pgtable.c | 2 +- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 6b8b2d57fdc8..dc58980f3ad9 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -224,7 +224,8 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, static inline void __ptep_set_access_flags(struct mm_struct *mm, - pte_t *ptep, pte_t entry) + pte_t *ptep, pte_t entry, + unsigned long address) { unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index ac66d05a7c01..bbea0040320a 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -578,10 +578,11 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev) */ static inline void __ptep_set_access_flags(struct mm_struct *mm, - pte_t *ptep, pte_t entry) + pte_t *ptep, pte_t entry, + unsigned long address) { if (radix_enabled()) - return radix__ptep_set_access_flags(mm, ptep, entry); + return radix__ptep_set_access_flags(mm, ptep, entry, address); return hash__ptep_set_access_flags(ptep, entry); } diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index d2c5c064e266..36f636911ade 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -167,7 +167,8 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm, * function doesn't need to invalidate tlb. */ static inline void radix__ptep_set_access_flags(struct mm_struct *mm, - pte_t *ptep, pte_t entry) + pte_t *ptep, pte_t entry, + unsigned long address) { unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | @@ -183,13 +184,7 @@ static inline void radix__ptep_set_access_flags(struct mm_struct *mm, * new value of pte */ new_pte = old_pte | set; - - /* - * For now let's do heavy pid flush - * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize); - */ - radix__flush_tlb_mm(mm); - + radix__flush_tlb_pte_p9_dd1(old_pte, mm, address); __radix_pte_update(ptep, 0, new_pte); } else __radix_pte_update(ptep, 0, set); diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index c219ef7be53b..65073fbc6707 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -268,7 +268,8 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, static inline void __ptep_set_access_flags(struct mm_struct *mm, - pte_t *ptep, pte_t entry) + pte_t *ptep, pte_t entry, + unsigned long address) { unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index e9e540a804fc..53a41b06a7b9 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -289,7 +289,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, * function doesn't need to flush the hash entry */ static inline void __ptep_set_access_flags(struct mm_struct *mm, - pte_t *ptep, pte_t entry) + pte_t *ptep, pte_t entry, + unsigned long address) { unsigned long bits = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index f4f437cbabf1..ebf9782bacf9 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -35,7 +35,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, #endif changed = !pmd_same(*(pmdp), entry); if (changed) { - __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), pmd_pte(entry)); + __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), + pmd_pte(entry), address); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); } return changed; diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 911fdfb63ec1..cb39c8bd2436 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -224,7 +224,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, if (changed) { if (!is_vm_hugetlb_page(vma)) assert_pte_locked(vma->vm_mm, address); - __ptep_set_access_flags(vma->vm_mm, ptep, entry); + __ptep_set_access_flags(vma->vm_mm, ptep, entry, address); flush_tlb_page(vma, address); } return changed; -- cgit From e58d1cf24309b3b58c7cff7ea1f873e498fdaa39 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 28 Nov 2016 11:47:03 +0530 Subject: powerpc/mm: update radix__pte_update to not do full mm tlb flush When we are updating a pte, we just need to flush the tlb mapping that pte. Right now we do a full mm flush because we don't track page size. Now that we have page size details in pte use that to do the optimized flush Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/radix.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 36f636911ade..f4066cf31b3e 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -145,13 +145,7 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm, * new value of pte */ new_pte = (old_pte | set) & ~clr; - - /* - * For now let's do heavy pid flush - * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize); - */ - radix__flush_tlb_mm(mm); - + radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr); __radix_pte_update(ptep, 0, new_pte); } else old_pte = __radix_pte_update(ptep, clr, set); -- cgit From d522ae1e49a4f0bcbd0efa0a3afb2b8d52d1fbd6 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 28 Nov 2016 11:47:04 +0530 Subject: powerpc/mm: Batch tlb flush when invalidating pte entries This will improve the task exit case, by batching tlb invalidates. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/radix.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index f4066cf31b3e..b4d1302387a3 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -140,13 +140,20 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm, unsigned long new_pte; old_pte = __radix_pte_update(ptep, ~0, 0); - asm volatile("ptesync" : : : "memory"); /* * new value of pte */ new_pte = (old_pte | set) & ~clr; - radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr); - __radix_pte_update(ptep, 0, new_pte); + /* + * If we are trying to clear the pte, we can skip + * the below sequence and batch the tlb flush. The + * tlb flush batching is done by mmu gather code + */ + if (new_pte) { + asm volatile("ptesync" : : : "memory"); + radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr); + __radix_pte_update(ptep, 0, new_pte); + } } else old_pte = __radix_pte_update(ptep, clr, set); asm volatile("ptesync" : : : "memory"); -- cgit From 1196d7aaebf6cdad619310fe2834224c8cc10d50 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 21 Nov 2016 21:14:33 +1100 Subject: powerpc: Stop passing ARCH=ppc64 to boot Makefile Back in 2005 when the ppc/ppc64 merge started, we used to build the kernel code in arch/powerpc but use the boot code from arch/ppc or arch/ppc64 depending on whether we were building for 32 or 64-bit. Originally we called the boot Makefile passing ARCH=$(OLDARCH), where OLDARCH was ppc or ppc64. In commit 20f629549b30 ("powerpc: Make building the boot image work for both 32-bit and 64-bit") (2005-10-11) we split the call for 32/64-bit using an ifeq check, because the two Makefiles took different targets, and explicitly passed ARCH=ppc64 for the 64-bit case and ARCH=ppc for the 32-bit case. Then in commit 94b212c29f68 ("powerpc: Move ppc64 boot wrapper code over to arch/powerpc") (2005-11-16) we moved the boot code into arch/powerpc and dropped the ppc case, but kept passing ARCH=ppc64 to arch/powerpc/boot/Makefile. Since then there have been several more boot targets added, all of which have copied the ARCH=ppc64 setting, such that now we have four targets using it. Currently it seems that nothing actually uses the ARCH value, but that's basically just luck, and in particular it prevents us from using the generic cpp_lds_S rule. It's also clearly wrong, ARCH=ppc64 is dead, buried and cremated. Fix it by dropping the setting of ARCH completely, the correct value is exported by the top level Makefile. Signed-off-by: Michael Ellerman --- arch/powerpc/Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 0ee7be32b112..82ce8c8b63ad 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -276,16 +276,16 @@ zImage: relocs_check endif $(BOOT_TARGETS1): vmlinux - $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) + $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@) $(BOOT_TARGETS2): vmlinux - $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) + $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@) bootwrapper_install: - $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) + $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@) %.dtb: scripts - $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) + $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@) # Used to create 'merged defconfigs' # To use it $(call) it with the first argument as the base defconfig -- cgit From 42d0c932b05b06f04ac66439b4b14be5a83db114 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 21 Nov 2016 21:14:34 +1100 Subject: powerpc/boot: All uses of if_changed should depend on FORCE If we're using if_changed then we must depend on FORCE, so that if_changed gets a chance to check if something changed. Signed-off-by: Michael Ellerman --- arch/powerpc/boot/Makefile | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index eae2dc8bc218..7f3faa6255a8 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -356,17 +356,17 @@ $(addprefix $(obj)/, $(initrd-y)): $(obj)/ramdisk.image.gz # Don't put the ramdisk on the pattern rule; when its missing make will try # the pattern rule with less dependencies that also matches (even with the # hard dependency listed). -$(obj)/zImage.initrd.%: vmlinux $(wrapperbits) +$(obj)/zImage.initrd.%: vmlinux $(wrapperbits) FORCE $(call if_changed,wrap,$*,,,$(obj)/ramdisk.image.gz) -$(addprefix $(obj)/, $(sort $(filter zImage.%, $(image-y)))): vmlinux $(wrapperbits) +$(addprefix $(obj)/, $(sort $(filter zImage.%, $(image-y)))): vmlinux $(wrapperbits) FORCE $(call if_changed,wrap,$(subst $(obj)/zImage.,,$@)) # dtbImage% - a dtbImage is a zImage with an embedded device tree blob -$(obj)/dtbImage.initrd.%: vmlinux $(wrapperbits) $(obj)/%.dtb +$(obj)/dtbImage.initrd.%: vmlinux $(wrapperbits) $(obj)/%.dtb FORCE $(call if_changed,wrap,$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz) -$(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/%.dtb +$(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/%.dtb FORCE $(call if_changed,wrap,$*,,$(obj)/$*.dtb) # This cannot be in the root of $(src) as the zImage rule always adds a $(obj) @@ -374,31 +374,31 @@ $(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/%.dtb $(obj)/vmlinux.strip: vmlinux $(STRIP) -s -R .comment $< -o $@ -$(obj)/uImage: vmlinux $(wrapperbits) +$(obj)/uImage: vmlinux $(wrapperbits) FORCE $(call if_changed,wrap,uboot) -$(obj)/uImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) +$(obj)/uImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE $(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz) -$(obj)/uImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) +$(obj)/uImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE $(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb) -$(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) +$(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE $(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz) -$(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) +$(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE $(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb) -$(obj)/simpleImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) +$(obj)/simpleImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE $(call if_changed,wrap,simpleboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz) -$(obj)/simpleImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) +$(obj)/simpleImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE $(call if_changed,wrap,simpleboot-$*,,$(obj)/$*.dtb) -$(obj)/treeImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) +$(obj)/treeImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE $(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz) -$(obj)/treeImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) +$(obj)/treeImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE $(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb) # Rule to build device tree blobs -- cgit From f0f7fe1ac35330f077357c787ec5bd8958bf300d Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 21 Nov 2016 21:14:35 +1100 Subject: powerpc/boot: Fix rebuild when changing kernel endian Now that we don't set ARCH incorrectly when calling the boot Makefile, we can use the generic cpp_lds_S rule for converting our zImage.lds.S into zImage.lds. The main advantage of using the generic rule is that it correctly uses if_changed, which means we correctly regenerate the linker script when switching endian. Fixing that means we are finally able to build one endian and then rebuild the other endian without requiring to clean between builds. Signed-off-by: Michael Ellerman --- arch/powerpc/boot/Makefile | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 7f3faa6255a8..1d41d5a9d05e 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -171,10 +171,6 @@ $(addprefix $(obj)/,$(libfdt) $(libfdtheader)): $(obj)/%: $(srctree)/scripts/dtc $(obj)/empty.c: $(Q)touch $@ -$(obj)/zImage.lds: $(obj)/%: $(srctree)/$(src)/%.S - $(CROSS32CC) $(cpp_flags) -E -Wp,-MD,$(depfile) -P -Upowerpc \ - -D__ASSEMBLY__ -DLINKER_SCRIPT -o $@ $< - $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S $(Q)cp $< $@ -- cgit From 0ab5171b8971282d7562b77f9b14137a827117fc Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Wed, 30 Nov 2016 11:35:36 +1100 Subject: powerpc/mm: Fix no execute fault handling on pre-POWER5 Aneesh/Ben reported that the change to do_page_fault() we made in commit 1d18ad026844 ("powerpc/mm: Detect instruction fetch denied and report") needs to handle the case where CPU_FTR_COHERENT_ICACHE is missing but we have CPU_FTR_NOEXECUTE. In those cases the check added for SRR1_ISI_N_OR_G might trigger a false positive. This patch adds a check for CPU_FTR_COHERENT_ICACHE in addition to the MSR value. Fixes: 1d18ad026844 ("powerpc/mm: Detect instruction fetch denied and report") Reported-by: Aneesh Kumar K.V Acked-by: Benjamin Herrenschmidt Signed-off-by: Balbir Singh Signed-off-by: Michael Ellerman --- arch/powerpc/mm/fault.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index a17029aaf939..6fd30ac7d14a 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -392,8 +392,16 @@ good_area: if (is_exec) { /* * An execution fault + no execute ? + * + * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we + * deliberately create NX mappings, and use the fault to do the + * cache flush. This is usually handled in hash_page_do_lazy_icache() + * but we could end up here if that races with a concurrent PTE + * update. In that case we need to fall through here to the VMA + * check below. */ - if (regs->msr & SRR1_ISI_N_OR_G) + if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && + (regs->msr & SRR1_ISI_N_OR_G)) goto bad_area; /* -- cgit From 60fe3910bb029e3671ce7ac080a7acb7e032b9e0 Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 29 Nov 2016 23:45:47 +1100 Subject: kexec_file: Allow arch-specific memory walking for kexec_add_buffer Allow architectures to specify a different memory walking function for kexec_add_buffer. x86 uses iomem to track reserved memory ranges, but PowerPC uses the memblock subsystem. Signed-off-by: Thiago Jung Bauermann Acked-by: Dave Young Acked-by: Balbir Singh Signed-off-by: Michael Ellerman --- include/linux/kexec.h | 29 ++++++++++++++++++++++++++++- kernel/kexec_file.c | 30 ++++++++++++++++++++++-------- kernel/kexec_internal.h | 16 ---------------- 3 files changed, 50 insertions(+), 25 deletions(-) diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 406c33dcae13..5e320ddaaa82 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -148,7 +148,34 @@ struct kexec_file_ops { kexec_verify_sig_t *verify_sig; #endif }; -#endif + +/** + * struct kexec_buf - parameters for finding a place for a buffer in memory + * @image: kexec image in which memory to search. + * @buffer: Contents which will be copied to the allocated memory. + * @bufsz: Size of @buffer. + * @mem: On return will have address of the buffer in memory. + * @memsz: Size for the buffer in memory. + * @buf_align: Minimum alignment needed. + * @buf_min: The buffer can't be placed below this address. + * @buf_max: The buffer can't be placed above this address. + * @top_down: Allocate from top of memory. + */ +struct kexec_buf { + struct kimage *image; + char *buffer; + unsigned long bufsz; + unsigned long mem; + unsigned long memsz; + unsigned long buf_align; + unsigned long buf_min; + unsigned long buf_max; + bool top_down; +}; + +int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, + int (*func)(u64, u64, void *)); +#endif /* CONFIG_KEXEC_FILE */ struct kimage { kimage_entry_t head; diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index 037c321c5618..f865674bff51 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -428,6 +428,27 @@ static int locate_mem_hole_callback(u64 start, u64 end, void *arg) return locate_mem_hole_bottom_up(start, end, kbuf); } +/** + * arch_kexec_walk_mem - call func(data) on free memory regions + * @kbuf: Context info for the search. Also passed to @func. + * @func: Function to call for each memory region. + * + * Return: The memory walk will stop when func returns a non-zero value + * and that value will be returned. If all free regions are visited without + * func returning non-zero, then zero will be returned. + */ +int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, + int (*func)(u64, u64, void *)) +{ + if (kbuf->image->type == KEXEC_TYPE_CRASH) + return walk_iomem_res_desc(crashk_res.desc, + IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY, + crashk_res.start, crashk_res.end, + kbuf, func); + else + return walk_system_ram_res(0, ULONG_MAX, kbuf, func); +} + /* * Helper function for placing a buffer in a kexec segment. This assumes * that kexec_mutex is held. @@ -474,14 +495,7 @@ int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz, kbuf->top_down = top_down; /* Walk the RAM ranges and allocate a suitable range for the buffer */ - if (image->type == KEXEC_TYPE_CRASH) - ret = walk_iomem_res_desc(crashk_res.desc, - IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY, - crashk_res.start, crashk_res.end, kbuf, - locate_mem_hole_callback); - else - ret = walk_system_ram_res(0, -1, kbuf, - locate_mem_hole_callback); + ret = arch_kexec_walk_mem(kbuf, locate_mem_hole_callback); if (ret != 1) { /* A suitable memory range could not be found for buffer */ return -EADDRNOTAVAIL; diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h index 0a52315d9c62..4cef7e4706b0 100644 --- a/kernel/kexec_internal.h +++ b/kernel/kexec_internal.h @@ -20,22 +20,6 @@ struct kexec_sha_region { unsigned long len; }; -/* - * Keeps track of buffer parameters as provided by caller for requesting - * memory placement of buffer. - */ -struct kexec_buf { - struct kimage *image; - char *buffer; - unsigned long bufsz; - unsigned long mem; - unsigned long memsz; - unsigned long buf_align; - unsigned long buf_min; - unsigned long buf_max; - bool top_down; /* allocate from top of memory hole */ -}; - void kimage_file_post_load_cleanup(struct kimage *image); #else /* CONFIG_KEXEC_FILE */ static inline void kimage_file_post_load_cleanup(struct kimage *image) { } -- cgit From ec2b9bfaac44ea889625a6b9473d33898b10d35f Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 29 Nov 2016 23:45:48 +1100 Subject: kexec_file: Change kexec_add_buffer to take kexec_buf as argument. This is done to simplify the kexec_add_buffer argument list. Adapt all callers to set up a kexec_buf to pass to kexec_add_buffer. In addition, change the type of kexec_buf.buffer from char * to void *. There is no particular reason for it to be a char *, and the change allows us to get rid of 3 existing casts to char * in the code. Signed-off-by: Thiago Jung Bauermann Acked-by: Dave Young Acked-by: Balbir Singh Signed-off-by: Michael Ellerman --- arch/x86/kernel/crash.c | 37 ++++++++-------- arch/x86/kernel/kexec-bzimage64.c | 48 +++++++++++---------- include/linux/kexec.h | 8 +--- kernel/kexec_file.c | 88 ++++++++++++++++++--------------------- 4 files changed, 87 insertions(+), 94 deletions(-) diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 650830e39e3a..3741461c63a0 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -631,9 +631,9 @@ static int determine_backup_region(u64 start, u64 end, void *arg) int crash_load_segments(struct kimage *image) { - unsigned long src_start, src_sz, elf_sz; - void *elf_addr; int ret; + struct kexec_buf kbuf = { .image = image, .buf_min = 0, + .buf_max = ULONG_MAX, .top_down = false }; /* * Determine and load a segment for backup area. First 640K RAM @@ -647,43 +647,44 @@ int crash_load_segments(struct kimage *image) if (ret < 0) return ret; - src_start = image->arch.backup_src_start; - src_sz = image->arch.backup_src_sz; - /* Add backup segment. */ - if (src_sz) { + if (image->arch.backup_src_sz) { + kbuf.buffer = &crash_zero_bytes; + kbuf.bufsz = sizeof(crash_zero_bytes); + kbuf.memsz = image->arch.backup_src_sz; + kbuf.buf_align = PAGE_SIZE; /* * Ideally there is no source for backup segment. This is * copied in purgatory after crash. Just add a zero filled * segment for now to make sure checksum logic works fine. */ - ret = kexec_add_buffer(image, (char *)&crash_zero_bytes, - sizeof(crash_zero_bytes), src_sz, - PAGE_SIZE, 0, -1, 0, - &image->arch.backup_load_addr); + ret = kexec_add_buffer(&kbuf); if (ret) return ret; + image->arch.backup_load_addr = kbuf.mem; pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n", - image->arch.backup_load_addr, src_start, src_sz); + image->arch.backup_load_addr, + image->arch.backup_src_start, kbuf.memsz); } /* Prepare elf headers and add a segment */ - ret = prepare_elf_headers(image, &elf_addr, &elf_sz); + ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz); if (ret) return ret; - image->arch.elf_headers = elf_addr; - image->arch.elf_headers_sz = elf_sz; + image->arch.elf_headers = kbuf.buffer; + image->arch.elf_headers_sz = kbuf.bufsz; - ret = kexec_add_buffer(image, (char *)elf_addr, elf_sz, elf_sz, - ELF_CORE_HEADER_ALIGN, 0, -1, 0, - &image->arch.elf_load_addr); + kbuf.memsz = kbuf.bufsz; + kbuf.buf_align = ELF_CORE_HEADER_ALIGN; + ret = kexec_add_buffer(&kbuf); if (ret) { vfree((void *)image->arch.elf_headers); return ret; } + image->arch.elf_load_addr = kbuf.mem; pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", - image->arch.elf_load_addr, elf_sz, elf_sz); + image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz); return ret; } diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 3407b148c240..d0a814a9d96a 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -331,17 +331,17 @@ static void *bzImage64_load(struct kimage *image, char *kernel, struct setup_header *header; int setup_sects, kern16_size, ret = 0; - unsigned long setup_header_size, params_cmdline_sz, params_misc_sz; + unsigned long setup_header_size, params_cmdline_sz; struct boot_params *params; unsigned long bootparam_load_addr, kernel_load_addr, initrd_load_addr; unsigned long purgatory_load_addr; - unsigned long kernel_bufsz, kernel_memsz, kernel_align; - char *kernel_buf; struct bzimage64_data *ldata; struct kexec_entry64_regs regs64; void *stack; unsigned int setup_hdr_offset = offsetof(struct boot_params, hdr); unsigned int efi_map_offset, efi_map_sz, efi_setup_data_offset; + struct kexec_buf kbuf = { .image = image, .buf_max = ULONG_MAX, + .top_down = true }; header = (struct setup_header *)(kernel + setup_hdr_offset); setup_sects = header->setup_sects; @@ -402,11 +402,11 @@ static void *bzImage64_load(struct kimage *image, char *kernel, params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + MAX_ELFCOREHDR_STR_LEN; params_cmdline_sz = ALIGN(params_cmdline_sz, 16); - params_misc_sz = params_cmdline_sz + efi_map_sz + + kbuf.bufsz = params_cmdline_sz + efi_map_sz + sizeof(struct setup_data) + sizeof(struct efi_setup_data); - params = kzalloc(params_misc_sz, GFP_KERNEL); + params = kzalloc(kbuf.bufsz, GFP_KERNEL); if (!params) return ERR_PTR(-ENOMEM); efi_map_offset = params_cmdline_sz; @@ -418,37 +418,41 @@ static void *bzImage64_load(struct kimage *image, char *kernel, /* Is there a limit on setup header size? */ memcpy(¶ms->hdr, (kernel + setup_hdr_offset), setup_header_size); - ret = kexec_add_buffer(image, (char *)params, params_misc_sz, - params_misc_sz, 16, MIN_BOOTPARAM_ADDR, - ULONG_MAX, 1, &bootparam_load_addr); + kbuf.buffer = params; + kbuf.memsz = kbuf.bufsz; + kbuf.buf_align = 16; + kbuf.buf_min = MIN_BOOTPARAM_ADDR; + ret = kexec_add_buffer(&kbuf); if (ret) goto out_free_params; + bootparam_load_addr = kbuf.mem; pr_debug("Loaded boot_param, command line and misc at 0x%lx bufsz=0x%lx memsz=0x%lx\n", - bootparam_load_addr, params_misc_sz, params_misc_sz); + bootparam_load_addr, kbuf.bufsz, kbuf.bufsz); /* Load kernel */ - kernel_buf = kernel + kern16_size; - kernel_bufsz = kernel_len - kern16_size; - kernel_memsz = PAGE_ALIGN(header->init_size); - kernel_align = header->kernel_alignment; - - ret = kexec_add_buffer(image, kernel_buf, - kernel_bufsz, kernel_memsz, kernel_align, - MIN_KERNEL_LOAD_ADDR, ULONG_MAX, 1, - &kernel_load_addr); + kbuf.buffer = kernel + kern16_size; + kbuf.bufsz = kernel_len - kern16_size; + kbuf.memsz = PAGE_ALIGN(header->init_size); + kbuf.buf_align = header->kernel_alignment; + kbuf.buf_min = MIN_KERNEL_LOAD_ADDR; + ret = kexec_add_buffer(&kbuf); if (ret) goto out_free_params; + kernel_load_addr = kbuf.mem; pr_debug("Loaded 64bit kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", - kernel_load_addr, kernel_memsz, kernel_memsz); + kernel_load_addr, kbuf.bufsz, kbuf.memsz); /* Load initrd high */ if (initrd) { - ret = kexec_add_buffer(image, initrd, initrd_len, initrd_len, - PAGE_SIZE, MIN_INITRD_LOAD_ADDR, - ULONG_MAX, 1, &initrd_load_addr); + kbuf.buffer = initrd; + kbuf.bufsz = kbuf.memsz = initrd_len; + kbuf.buf_align = PAGE_SIZE; + kbuf.buf_min = MIN_INITRD_LOAD_ADDR; + ret = kexec_add_buffer(&kbuf); if (ret) goto out_free_params; + initrd_load_addr = kbuf.mem; pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n", initrd_load_addr, initrd_len, initrd_len); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 5e320ddaaa82..437ef1b47428 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -163,7 +163,7 @@ struct kexec_file_ops { */ struct kexec_buf { struct kimage *image; - char *buffer; + void *buffer; unsigned long bufsz; unsigned long mem; unsigned long memsz; @@ -175,6 +175,7 @@ struct kexec_buf { int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, int (*func)(u64, u64, void *)); +extern int kexec_add_buffer(struct kexec_buf *kbuf); #endif /* CONFIG_KEXEC_FILE */ struct kimage { @@ -239,11 +240,6 @@ extern asmlinkage long sys_kexec_load(unsigned long entry, struct kexec_segment __user *segments, unsigned long flags); extern int kernel_kexec(void); -extern int kexec_add_buffer(struct kimage *image, char *buffer, - unsigned long bufsz, unsigned long memsz, - unsigned long buf_align, unsigned long buf_min, - unsigned long buf_max, bool top_down, - unsigned long *load_addr); extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order); extern int kexec_load_purgatory(struct kimage *image, unsigned long min, diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index f865674bff51..efd2c094af7e 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -449,25 +449,27 @@ int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, return walk_system_ram_res(0, ULONG_MAX, kbuf, func); } -/* - * Helper function for placing a buffer in a kexec segment. This assumes - * that kexec_mutex is held. +/** + * kexec_add_buffer - place a buffer in a kexec segment + * @kbuf: Buffer contents and memory parameters. + * + * This function assumes that kexec_mutex is held. + * On successful return, @kbuf->mem will have the physical address of + * the buffer in memory. + * + * Return: 0 on success, negative errno on error. */ -int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz, - unsigned long memsz, unsigned long buf_align, - unsigned long buf_min, unsigned long buf_max, - bool top_down, unsigned long *load_addr) +int kexec_add_buffer(struct kexec_buf *kbuf) { struct kexec_segment *ksegment; - struct kexec_buf buf, *kbuf; int ret; /* Currently adding segment this way is allowed only in file mode */ - if (!image->file_mode) + if (!kbuf->image->file_mode) return -EINVAL; - if (image->nr_segments >= KEXEC_SEGMENT_MAX) + if (kbuf->image->nr_segments >= KEXEC_SEGMENT_MAX) return -EINVAL; /* @@ -477,22 +479,14 @@ int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz, * logic goes through list of segments to make sure there are * no destination overlaps. */ - if (!list_empty(&image->control_pages)) { + if (!list_empty(&kbuf->image->control_pages)) { WARN_ON(1); return -EINVAL; } - memset(&buf, 0, sizeof(struct kexec_buf)); - kbuf = &buf; - kbuf->image = image; - kbuf->buffer = buffer; - kbuf->bufsz = bufsz; - - kbuf->memsz = ALIGN(memsz, PAGE_SIZE); - kbuf->buf_align = max(buf_align, PAGE_SIZE); - kbuf->buf_min = buf_min; - kbuf->buf_max = buf_max; - kbuf->top_down = top_down; + /* Ensure minimum alignment needed for segments. */ + kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE); + kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE); /* Walk the RAM ranges and allocate a suitable range for the buffer */ ret = arch_kexec_walk_mem(kbuf, locate_mem_hole_callback); @@ -502,13 +496,12 @@ int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz, } /* Found a suitable memory range */ - ksegment = &image->segment[image->nr_segments]; + ksegment = &kbuf->image->segment[kbuf->image->nr_segments]; ksegment->kbuf = kbuf->buffer; ksegment->bufsz = kbuf->bufsz; ksegment->mem = kbuf->mem; ksegment->memsz = kbuf->memsz; - image->nr_segments++; - *load_addr = ksegment->mem; + kbuf->image->nr_segments++; return 0; } @@ -630,13 +623,15 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min, unsigned long max, int top_down) { struct purgatory_info *pi = &image->purgatory_info; - unsigned long align, buf_align, bss_align, buf_sz, bss_sz, bss_pad; - unsigned long memsz, entry, load_addr, curr_load_addr, bss_addr, offset; + unsigned long align, bss_align, bss_sz, bss_pad; + unsigned long entry, load_addr, curr_load_addr, bss_addr, offset; unsigned char *buf_addr, *src; int i, ret = 0, entry_sidx = -1; const Elf_Shdr *sechdrs_c; Elf_Shdr *sechdrs = NULL; - void *purgatory_buf = NULL; + struct kexec_buf kbuf = { .image = image, .bufsz = 0, .buf_align = 1, + .buf_min = min, .buf_max = max, + .top_down = top_down }; /* * sechdrs_c points to section headers in purgatory and are read @@ -702,9 +697,7 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min, } /* Determine how much memory is needed to load relocatable object. */ - buf_align = 1; bss_align = 1; - buf_sz = 0; bss_sz = 0; for (i = 0; i < pi->ehdr->e_shnum; i++) { @@ -713,10 +706,10 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min, align = sechdrs[i].sh_addralign; if (sechdrs[i].sh_type != SHT_NOBITS) { - if (buf_align < align) - buf_align = align; - buf_sz = ALIGN(buf_sz, align); - buf_sz += sechdrs[i].sh_size; + if (kbuf.buf_align < align) + kbuf.buf_align = align; + kbuf.bufsz = ALIGN(kbuf.bufsz, align); + kbuf.bufsz += sechdrs[i].sh_size; } else { /* bss section */ if (bss_align < align) @@ -728,32 +721,31 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min, /* Determine the bss padding required to align bss properly */ bss_pad = 0; - if (buf_sz & (bss_align - 1)) - bss_pad = bss_align - (buf_sz & (bss_align - 1)); + if (kbuf.bufsz & (bss_align - 1)) + bss_pad = bss_align - (kbuf.bufsz & (bss_align - 1)); - memsz = buf_sz + bss_pad + bss_sz; + kbuf.memsz = kbuf.bufsz + bss_pad + bss_sz; /* Allocate buffer for purgatory */ - purgatory_buf = vzalloc(buf_sz); - if (!purgatory_buf) { + kbuf.buffer = vzalloc(kbuf.bufsz); + if (!kbuf.buffer) { ret = -ENOMEM; goto out; } - if (buf_align < bss_align) - buf_align = bss_align; + if (kbuf.buf_align < bss_align) + kbuf.buf_align = bss_align; /* Add buffer to segment list */ - ret = kexec_add_buffer(image, purgatory_buf, buf_sz, memsz, - buf_align, min, max, top_down, - &pi->purgatory_load_addr); + ret = kexec_add_buffer(&kbuf); if (ret) goto out; + pi->purgatory_load_addr = kbuf.mem; /* Load SHF_ALLOC sections */ - buf_addr = purgatory_buf; + buf_addr = kbuf.buffer; load_addr = curr_load_addr = pi->purgatory_load_addr; - bss_addr = load_addr + buf_sz + bss_pad; + bss_addr = load_addr + kbuf.bufsz + bss_pad; for (i = 0; i < pi->ehdr->e_shnum; i++) { if (!(sechdrs[i].sh_flags & SHF_ALLOC)) @@ -799,11 +791,11 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min, * Used later to identify which section is purgatory and skip it * from checksumming. */ - pi->purgatory_buf = purgatory_buf; + pi->purgatory_buf = kbuf.buffer; return ret; out: vfree(sechdrs); - vfree(purgatory_buf); + vfree(kbuf.buffer); return ret; } -- cgit From e2e806f9e437b46a3fc8f3174a225c73f2e38c3d Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 29 Nov 2016 23:45:49 +1100 Subject: kexec_file: Factor out kexec_locate_mem_hole from kexec_add_buffer. kexec_locate_mem_hole will be used by the PowerPC kexec_file_load implementation to find free memory for the purgatory stack. Signed-off-by: Thiago Jung Bauermann Acked-by: Dave Young Signed-off-by: Michael Ellerman --- include/linux/kexec.h | 1 + kernel/kexec_file.c | 25 ++++++++++++++++++++----- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 437ef1b47428..a33f63351f86 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -176,6 +176,7 @@ struct kexec_buf { int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, int (*func)(u64, u64, void *)); extern int kexec_add_buffer(struct kexec_buf *kbuf); +int kexec_locate_mem_hole(struct kexec_buf *kbuf); #endif /* CONFIG_KEXEC_FILE */ struct kimage { diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index efd2c094af7e..0c2df7f73792 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -449,6 +449,23 @@ int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, return walk_system_ram_res(0, ULONG_MAX, kbuf, func); } +/** + * kexec_locate_mem_hole - find free memory for the purgatory or the next kernel + * @kbuf: Parameters for the memory search. + * + * On success, kbuf->mem will have the start address of the memory region found. + * + * Return: 0 on success, negative errno on error. + */ +int kexec_locate_mem_hole(struct kexec_buf *kbuf) +{ + int ret; + + ret = arch_kexec_walk_mem(kbuf, locate_mem_hole_callback); + + return ret == 1 ? 0 : -EADDRNOTAVAIL; +} + /** * kexec_add_buffer - place a buffer in a kexec segment * @kbuf: Buffer contents and memory parameters. @@ -489,11 +506,9 @@ int kexec_add_buffer(struct kexec_buf *kbuf) kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE); /* Walk the RAM ranges and allocate a suitable range for the buffer */ - ret = arch_kexec_walk_mem(kbuf, locate_mem_hole_callback); - if (ret != 1) { - /* A suitable memory range could not be found for buffer */ - return -EADDRNOTAVAIL; - } + ret = kexec_locate_mem_hole(kbuf); + if (ret) + return ret; /* Found a suitable memory range */ ksegment = &kbuf->image->segment[kbuf->image->nr_segments]; -- cgit From da6658859b9c734fee36570f3a7d51764c6c3838 Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 29 Nov 2016 23:45:50 +1100 Subject: powerpc: Change places using CONFIG_KEXEC to use CONFIG_KEXEC_CORE instead. Commit 2965faa5e03d ("kexec: split kexec_load syscall from kexec core code") introduced CONFIG_KEXEC_CORE so that CONFIG_KEXEC means whether the kexec_load system call should be compiled-in and CONFIG_KEXEC_FILE means whether the kexec_file_load system call should be compiled-in. These options can be set independently from each other. Since until now powerpc only supported kexec_load, CONFIG_KEXEC and CONFIG_KEXEC_CORE were synonyms. That is not the case anymore, so we need to make a distinction. Almost all places where CONFIG_KEXEC was being used should be using CONFIG_KEXEC_CORE instead, since kexec_file_load also needs that code compiled in. Signed-off-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman --- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/debug.h | 2 +- arch/powerpc/include/asm/kexec.h | 6 +++--- arch/powerpc/include/asm/machdep.h | 4 ++-- arch/powerpc/include/asm/smp.h | 2 +- arch/powerpc/kernel/Makefile | 4 ++-- arch/powerpc/kernel/head_64.S | 2 +- arch/powerpc/kernel/misc_32.S | 2 +- arch/powerpc/kernel/misc_64.S | 6 +++--- arch/powerpc/kernel/prom.c | 2 +- arch/powerpc/kernel/setup_64.c | 4 ++-- arch/powerpc/kernel/smp.c | 6 +++--- arch/powerpc/kernel/traps.c | 2 +- arch/powerpc/platforms/85xx/corenet_generic.c | 2 +- arch/powerpc/platforms/85xx/smp.c | 8 ++++---- arch/powerpc/platforms/cell/spu_base.c | 2 +- arch/powerpc/platforms/powernv/setup.c | 6 +++--- arch/powerpc/platforms/ps3/setup.c | 4 ++-- arch/powerpc/platforms/pseries/Makefile | 2 +- arch/powerpc/platforms/pseries/setup.c | 4 ++-- 20 files changed, 36 insertions(+), 36 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index cfc0688a25e9..a063c7235fa9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -508,7 +508,7 @@ config CRASH_DUMP config FA_DUMP bool "Firmware-assisted dump" - depends on PPC64 && PPC_RTAS && CRASH_DUMP && KEXEC + depends on PPC64 && PPC_RTAS && CRASH_DUMP && KEXEC_CORE help A robust mechanism to get reliable kernel crash dump with assistance from firmware. This approach does not use kexec, diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h index a954e4975049..86308f177f2d 100644 --- a/arch/powerpc/include/asm/debug.h +++ b/arch/powerpc/include/asm/debug.h @@ -10,7 +10,7 @@ struct pt_regs; extern struct dentry *powerpc_debugfs_root; -#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) +#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) extern int (*__debugger)(struct pt_regs *regs); extern int (*__debugger_ipi)(struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index a46f5f45570c..eca2f975bf44 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -53,7 +53,7 @@ typedef void (*crash_shutdown_t)(void); -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE /* * This function is responsible for capturing register states if coming @@ -91,7 +91,7 @@ static inline bool kdump_in_progress(void) return crashing_cpu >= 0; } -#else /* !CONFIG_KEXEC */ +#else /* !CONFIG_KEXEC_CORE */ static inline void crash_kexec_secondary(struct pt_regs *regs) { } static inline int overlaps_crashkernel(unsigned long start, unsigned long size) @@ -116,7 +116,7 @@ static inline bool kdump_in_progress(void) return false; } -#endif /* CONFIG_KEXEC */ +#endif /* CONFIG_KEXEC_CORE */ #endif /* ! __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_KEXEC_H */ diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index e02cbc6a6c70..5011b69107a7 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -183,7 +183,7 @@ struct machdep_calls { */ void (*machine_shutdown)(void); -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE void (*kexec_cpu_down)(int crash_shutdown, int secondary); /* Called to do what every setup is needed on image and the @@ -198,7 +198,7 @@ struct machdep_calls { * no return. */ void (*machine_kexec)(struct kimage *image); -#endif /* CONFIG_KEXEC */ +#endif /* CONFIG_KEXEC_CORE */ #ifdef CONFIG_SUSPEND /* These are called to disable and enable, respectively, IRQs when diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 0d02c11dc331..32db16d2e7ad 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -176,7 +176,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys) #endif /* !CONFIG_SMP */ #endif /* !CONFIG_PPC64 */ -#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC)) +#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)) extern void smp_release_cpus(void); #else static inline void smp_release_cpus(void) { }; diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 7a9f7164cc2e..68a6a3c8322e 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -109,7 +109,7 @@ pci64-$(CONFIG_PPC64) += pci_dn.o pci-hotplug.o isa-bridge.o obj-$(CONFIG_PCI) += pci_$(BITS).o $(pci64-y) \ pci-common.o pci_of_scan.o obj-$(CONFIG_PCI_MSI) += msi.o -obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \ +obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o crash.o \ machine_kexec_$(BITS).o obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o @@ -130,7 +130,7 @@ obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM) += tm.o obj-$(CONFIG_PPC64) += $(obj64-y) obj-$(CONFIG_PPC32) += $(obj32-y) -ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),) +ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE),) obj-y += ppc_save_regs.o endif diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index bdb4612491a9..1dc5eae2ced3 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -160,7 +160,7 @@ __secondary_hold: cmpdi 0,r12,0 beq 100b -#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) +#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) #ifdef CONFIG_PPC_BOOK3E tovirt(r12,r12) #endif diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 93cf7a5846a6..1863324c6a3c 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -614,7 +614,7 @@ _GLOBAL(start_secondary_resume) _GLOBAL(__main) blr -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE /* * Must be relocatable PIC code callable as a C function. */ diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 4f178671f230..32be2a844947 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -478,7 +478,7 @@ _GLOBAL(kexec_wait) addi r5,r5,kexec_flag-1b 99: HMT_LOW -#ifdef CONFIG_KEXEC /* use no memory without kexec */ +#ifdef CONFIG_KEXEC_CORE /* use no memory without kexec */ lwz r4,0(r5) cmpwi 0,r4,0 beq 99b @@ -503,7 +503,7 @@ kexec_flag: .long 0 -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_PPC_BOOK3E /* * BOOK3E has no real MMU mode, so we have to setup the initial TLB @@ -716,4 +716,4 @@ _GLOBAL(kexec_sequence) mtlr 4 li r5,0 blr /* image->start(physid, image->start, 0); */ -#endif /* CONFIG_KEXEC */ +#endif /* CONFIG_KEXEC_CORE */ diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index a7b87b6b4ef4..f5d399e46193 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -428,7 +428,7 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node, tce_alloc_end = *lprop; #endif -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); if (lprop) crashk_res.start = *lprop; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 7ac8e6eaab5b..c3e129080c31 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -346,7 +346,7 @@ void early_setup_secondary(void) #endif /* CONFIG_SMP */ -#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) +#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) static bool use_spinloop(void) { if (!IS_ENABLED(CONFIG_PPC_BOOK3E)) @@ -391,7 +391,7 @@ void smp_release_cpus(void) DBG(" <- smp_release_cpus()\n"); } -#endif /* CONFIG_SMP || CONFIG_KEXEC */ +#endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */ /* * Initialize some remaining members of the ppc64_caches and systemcfg diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 9c6f3fd58059..893bd7f79be6 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -193,7 +193,7 @@ int smp_request_message_ipi(int virq, int msg) if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) { return -EINVAL; } -#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC) +#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC_CORE) if (msg == PPC_MSG_DEBUGGER_BREAK) { return 1; } @@ -325,7 +325,7 @@ void tick_broadcast(const struct cpumask *mask) } #endif -#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) +#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) void smp_send_debugger_break(void) { int cpu; @@ -340,7 +340,7 @@ void smp_send_debugger_break(void) } #endif -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) { crash_ipi_function_ptr = crash_ipi_callback; diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index b922c6f4a2fe..5faaed2f2144 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -66,7 +66,7 @@ #include #include -#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) +#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) int (*__debugger)(struct pt_regs *regs) __read_mostly; int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c index 1179115a4b5c..3803b0addf65 100644 --- a/arch/powerpc/platforms/85xx/corenet_generic.c +++ b/arch/powerpc/platforms/85xx/corenet_generic.c @@ -220,7 +220,7 @@ define_machine(corenet_generic) { * * Likewise, problems have been seen with kexec when coreint is enabled. */ -#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC) +#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE) .get_irq = mpic_get_irq, #else .get_irq = mpic_get_coreint_irq, diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index fe9f19e5e935..a83a6d26090d 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -349,13 +349,13 @@ struct smp_ops_t smp_85xx_ops = { .cpu_disable = generic_cpu_disable, .cpu_die = generic_cpu_die, #endif -#if defined(CONFIG_KEXEC) && !defined(CONFIG_PPC64) +#if defined(CONFIG_KEXEC_CORE) && !defined(CONFIG_PPC64) .give_timebase = smp_generic_give_timebase, .take_timebase = smp_generic_take_timebase, #endif }; -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_PPC32 atomic_t kexec_down_cpus = ATOMIC_INIT(0); @@ -458,7 +458,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image) default_machine_kexec(image); } -#endif /* CONFIG_KEXEC */ +#endif /* CONFIG_KEXEC_CORE */ static void smp_85xx_basic_setup(int cpu_nr) { @@ -512,7 +512,7 @@ void __init mpc85xx_smp_init(void) #endif smp_ops = &smp_85xx_ops; -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down; ppc_md.machine_kexec = mpc85xx_smp_machine_kexec; #endif diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index e84d8fbc2e21..96c2b8a40630 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -676,7 +676,7 @@ static ssize_t spu_stat_show(struct device *dev, static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL); -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE struct crash_spu_info { struct spu *spu; diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index efe8b6bb168b..d50c7d99baaf 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -174,7 +174,7 @@ static void pnv_shutdown(void) opal_shutdown(); } -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE static void pnv_kexec_wait_secondaries_down(void) { int my_cpu, i, notified = -1; @@ -245,7 +245,7 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE); } } -#endif /* CONFIG_KEXEC */ +#endif /* CONFIG_KEXEC_CORE */ #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE static unsigned long pnv_memory_block_size(void) @@ -311,7 +311,7 @@ define_machine(powernv) { .machine_shutdown = pnv_shutdown, .power_save = NULL, .calibrate_decr = generic_calibrate_decr, -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE .kexec_cpu_down = pnv_kexec_cpu_down, #endif #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 3a487e7f4a5e..6244bc849469 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -250,7 +250,7 @@ static int __init ps3_probe(void) return 1; } -#if defined(CONFIG_KEXEC) +#if defined(CONFIG_KEXEC_CORE) static void ps3_kexec_cpu_down(int crash_shutdown, int secondary) { int cpu = smp_processor_id(); @@ -276,7 +276,7 @@ define_machine(ps3) { .progress = ps3_progress, .restart = ps3_restart, .halt = ps3_halt, -#if defined(CONFIG_KEXEC) +#if defined(CONFIG_KEXEC_CORE) .kexec_cpu_down = ps3_kexec_cpu_down, #endif }; diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 942fe116a8ba..8f4ba089e802 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -8,7 +8,7 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \ pci.o pci_dlpar.o eeh_pseries.o msi.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SCANLOG) += scanlog.o -obj-$(CONFIG_KEXEC) += kexec.o +obj-$(CONFIG_KEXEC_CORE) += kexec.o obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 97aa3f332f24..7736352f7279 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -367,7 +367,7 @@ void pseries_disable_reloc_on_exc(void) } EXPORT_SYMBOL(pseries_disable_reloc_on_exc); -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE static void pSeries_machine_kexec(struct kimage *image) { if (firmware_has_feature(FW_FEATURE_SET_MODE)) @@ -725,7 +725,7 @@ define_machine(pseries) { .progress = rtas_progress, .system_reset_exception = pSeries_system_reset_exception, .machine_check_exception = pSeries_machine_check_exception, -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE .machine_kexec = pSeries_machine_kexec, .kexec_cpu_down = pseries_kexec_cpu_down, #endif -- cgit From a0458284f0625ade5eff2118bab89b2d4bbacc3e Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 29 Nov 2016 23:45:51 +1100 Subject: powerpc: Add support code for kexec_file_load() This patch adds the support code needed for implementing kexec_file_load() on powerpc. This consists of functions to load the ELF kernel, either big or little endian, and setup the purgatory enviroment which switches from the first kernel to the second kernel. None of this code is built yet, as it depends on CONFIG_KEXEC_FILE which we have not yet defined. Although we could define CONFIG_KEXEC_FILE in this patch, we'd then have a window in history where the kconfig symbol is present but the syscall is not, which would be awkward. Signed-off-by: Josh Sklar Signed-off-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kexec.h | 10 + arch/powerpc/kernel/Makefile | 1 + arch/powerpc/kernel/kexec_elf_64.c | 663 ++++++++++++++++++++++++++++ arch/powerpc/kernel/machine_kexec_file_64.c | 338 ++++++++++++++ 4 files changed, 1012 insertions(+) create mode 100644 arch/powerpc/kernel/kexec_elf_64.c create mode 100644 arch/powerpc/kernel/machine_kexec_file_64.c diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index eca2f975bf44..6c3b71502fbc 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -91,6 +91,16 @@ static inline bool kdump_in_progress(void) return crashing_cpu >= 0; } +#ifdef CONFIG_KEXEC_FILE +extern struct kexec_file_ops kexec_elf64_ops; + +int setup_purgatory(struct kimage *image, const void *slave_code, + const void *fdt, unsigned long kernel_load_addr, + unsigned long fdt_load_addr); +int setup_new_fdt(void *fdt, unsigned long initrd_load_addr, + unsigned long initrd_len, const char *cmdline); +#endif /* CONFIG_KEXEC_FILE */ + #else /* !CONFIG_KEXEC_CORE */ static inline void crash_kexec_secondary(struct pt_regs *regs) { } diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 68a6a3c8322e..a3a6047fd395 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -111,6 +111,7 @@ obj-$(CONFIG_PCI) += pci_$(BITS).o $(pci64-y) \ obj-$(CONFIG_PCI_MSI) += msi.o obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o crash.o \ machine_kexec_$(BITS).o +obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file_$(BITS).o kexec_elf_$(BITS).o obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o diff --git a/arch/powerpc/kernel/kexec_elf_64.c b/arch/powerpc/kernel/kexec_elf_64.c new file mode 100644 index 000000000000..6acffd34a70f --- /dev/null +++ b/arch/powerpc/kernel/kexec_elf_64.c @@ -0,0 +1,663 @@ +/* + * Load ELF vmlinux file for the kexec_file_load syscall. + * + * Copyright (C) 2004 Adam Litke (agl@us.ibm.com) + * Copyright (C) 2004 IBM Corp. + * Copyright (C) 2005 R Sharada (sharada@in.ibm.com) + * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com) + * Copyright (C) 2016 IBM Corporation + * + * Based on kexec-tools' kexec-elf-exec.c and kexec-elf-ppc64.c. + * Heavily modified for the kernel by + * Thiago Jung Bauermann . + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation (version 2 of the License). + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "kexec_elf: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#define PURGATORY_STACK_SIZE (16 * 1024) + +#define elf_addr_to_cpu elf64_to_cpu + +#ifndef Elf_Rel +#define Elf_Rel Elf64_Rel +#endif /* Elf_Rel */ + +struct elf_info { + /* + * Where the ELF binary contents are kept. + * Memory managed by the user of the struct. + */ + const char *buffer; + + const struct elfhdr *ehdr; + const struct elf_phdr *proghdrs; + struct elf_shdr *sechdrs; +}; + +static inline bool elf_is_elf_file(const struct elfhdr *ehdr) +{ + return memcmp(ehdr->e_ident, ELFMAG, SELFMAG) == 0; +} + +static uint64_t elf64_to_cpu(const struct elfhdr *ehdr, uint64_t value) +{ + if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) + value = le64_to_cpu(value); + else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) + value = be64_to_cpu(value); + + return value; +} + +static uint16_t elf16_to_cpu(const struct elfhdr *ehdr, uint16_t value) +{ + if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) + value = le16_to_cpu(value); + else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) + value = be16_to_cpu(value); + + return value; +} + +static uint32_t elf32_to_cpu(const struct elfhdr *ehdr, uint32_t value) +{ + if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) + value = le32_to_cpu(value); + else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) + value = be32_to_cpu(value); + + return value; +} + +/** + * elf_is_ehdr_sane - check that it is safe to use the ELF header + * @buf_len: size of the buffer in which the ELF file is loaded. + */ +static bool elf_is_ehdr_sane(const struct elfhdr *ehdr, size_t buf_len) +{ + if (ehdr->e_phnum > 0 && ehdr->e_phentsize != sizeof(struct elf_phdr)) { + pr_debug("Bad program header size.\n"); + return false; + } else if (ehdr->e_shnum > 0 && + ehdr->e_shentsize != sizeof(struct elf_shdr)) { + pr_debug("Bad section header size.\n"); + return false; + } else if (ehdr->e_ident[EI_VERSION] != EV_CURRENT || + ehdr->e_version != EV_CURRENT) { + pr_debug("Unknown ELF version.\n"); + return false; + } + + if (ehdr->e_phoff > 0 && ehdr->e_phnum > 0) { + size_t phdr_size; + + /* + * e_phnum is at most 65535 so calculating the size of the + * program header cannot overflow. + */ + phdr_size = sizeof(struct elf_phdr) * ehdr->e_phnum; + + /* Sanity check the program header table location. */ + if (ehdr->e_phoff + phdr_size < ehdr->e_phoff) { + pr_debug("Program headers at invalid location.\n"); + return false; + } else if (ehdr->e_phoff + phdr_size > buf_len) { + pr_debug("Program headers truncated.\n"); + return false; + } + } + + if (ehdr->e_shoff > 0 && ehdr->e_shnum > 0) { + size_t shdr_size; + + /* + * e_shnum is at most 65536 so calculating + * the size of the section header cannot overflow. + */ + shdr_size = sizeof(struct elf_shdr) * ehdr->e_shnum; + + /* Sanity check the section header table location. */ + if (ehdr->e_shoff + shdr_size < ehdr->e_shoff) { + pr_debug("Section headers at invalid location.\n"); + return false; + } else if (ehdr->e_shoff + shdr_size > buf_len) { + pr_debug("Section headers truncated.\n"); + return false; + } + } + + return true; +} + +static int elf_read_ehdr(const char *buf, size_t len, struct elfhdr *ehdr) +{ + struct elfhdr *buf_ehdr; + + if (len < sizeof(*buf_ehdr)) { + pr_debug("Buffer is too small to hold ELF header.\n"); + return -ENOEXEC; + } + + memset(ehdr, 0, sizeof(*ehdr)); + memcpy(ehdr->e_ident, buf, sizeof(ehdr->e_ident)); + if (!elf_is_elf_file(ehdr)) { + pr_debug("No ELF header magic.\n"); + return -ENOEXEC; + } + + if (ehdr->e_ident[EI_CLASS] != ELF_CLASS) { + pr_debug("Not a supported ELF class.\n"); + return -ENOEXEC; + } else if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB && + ehdr->e_ident[EI_DATA] != ELFDATA2MSB) { + pr_debug("Not a supported ELF data format.\n"); + return -ENOEXEC; + } + + buf_ehdr = (struct elfhdr *) buf; + if (elf16_to_cpu(ehdr, buf_ehdr->e_ehsize) != sizeof(*buf_ehdr)) { + pr_debug("Bad ELF header size.\n"); + return -ENOEXEC; + } + + ehdr->e_type = elf16_to_cpu(ehdr, buf_ehdr->e_type); + ehdr->e_machine = elf16_to_cpu(ehdr, buf_ehdr->e_machine); + ehdr->e_version = elf32_to_cpu(ehdr, buf_ehdr->e_version); + ehdr->e_entry = elf_addr_to_cpu(ehdr, buf_ehdr->e_entry); + ehdr->e_phoff = elf_addr_to_cpu(ehdr, buf_ehdr->e_phoff); + ehdr->e_shoff = elf_addr_to_cpu(ehdr, buf_ehdr->e_shoff); + ehdr->e_flags = elf32_to_cpu(ehdr, buf_ehdr->e_flags); + ehdr->e_phentsize = elf16_to_cpu(ehdr, buf_ehdr->e_phentsize); + ehdr->e_phnum = elf16_to_cpu(ehdr, buf_ehdr->e_phnum); + ehdr->e_shentsize = elf16_to_cpu(ehdr, buf_ehdr->e_shentsize); + ehdr->e_shnum = elf16_to_cpu(ehdr, buf_ehdr->e_shnum); + ehdr->e_shstrndx = elf16_to_cpu(ehdr, buf_ehdr->e_shstrndx); + + return elf_is_ehdr_sane(ehdr, len) ? 0 : -ENOEXEC; +} + +/** + * elf_is_phdr_sane - check that it is safe to use the program header + * @buf_len: size of the buffer in which the ELF file is loaded. + */ +static bool elf_is_phdr_sane(const struct elf_phdr *phdr, size_t buf_len) +{ + + if (phdr->p_offset + phdr->p_filesz < phdr->p_offset) { + pr_debug("ELF segment location wraps around.\n"); + return false; + } else if (phdr->p_offset + phdr->p_filesz > buf_len) { + pr_debug("ELF segment not in file.\n"); + return false; + } else if (phdr->p_paddr + phdr->p_memsz < phdr->p_paddr) { + pr_debug("ELF segment address wraps around.\n"); + return false; + } + + return true; +} + +static int elf_read_phdr(const char *buf, size_t len, struct elf_info *elf_info, + int idx) +{ + /* Override the const in proghdrs, we are the ones doing the loading. */ + struct elf_phdr *phdr = (struct elf_phdr *) &elf_info->proghdrs[idx]; + const char *pbuf; + struct elf_phdr *buf_phdr; + + pbuf = buf + elf_info->ehdr->e_phoff + (idx * sizeof(*buf_phdr)); + buf_phdr = (struct elf_phdr *) pbuf; + + phdr->p_type = elf32_to_cpu(elf_info->ehdr, buf_phdr->p_type); + phdr->p_offset = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_offset); + phdr->p_paddr = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_paddr); + phdr->p_vaddr = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_vaddr); + phdr->p_flags = elf32_to_cpu(elf_info->ehdr, buf_phdr->p_flags); + + /* + * The following fields have a type equivalent to Elf_Addr + * both in 32 bit and 64 bit ELF. + */ + phdr->p_filesz = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_filesz); + phdr->p_memsz = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_memsz); + phdr->p_align = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_align); + + return elf_is_phdr_sane(phdr, len) ? 0 : -ENOEXEC; +} + +/** + * elf_read_phdrs - read the program headers from the buffer + * + * This function assumes that the program header table was checked for sanity. + * Use elf_is_ehdr_sane() if it wasn't. + */ +static int elf_read_phdrs(const char *buf, size_t len, + struct elf_info *elf_info) +{ + size_t phdr_size, i; + const struct elfhdr *ehdr = elf_info->ehdr; + + /* + * e_phnum is at most 65535 so calculating the size of the + * program header cannot overflow. + */ + phdr_size = sizeof(struct elf_phdr) * ehdr->e_phnum; + + elf_info->proghdrs = kzalloc(phdr_size, GFP_KERNEL); + if (!elf_info->proghdrs) + return -ENOMEM; + + for (i = 0; i < ehdr->e_phnum; i++) { + int ret; + + ret = elf_read_phdr(buf, len, elf_info, i); + if (ret) { + kfree(elf_info->proghdrs); + elf_info->proghdrs = NULL; + return ret; + } + } + + return 0; +} + +/** + * elf_is_shdr_sane - check that it is safe to use the section header + * @buf_len: size of the buffer in which the ELF file is loaded. + */ +static bool elf_is_shdr_sane(const struct elf_shdr *shdr, size_t buf_len) +{ + bool size_ok; + + /* SHT_NULL headers have undefined values, so we can't check them. */ + if (shdr->sh_type == SHT_NULL) + return true; + + /* Now verify sh_entsize */ + switch (shdr->sh_type) { + case SHT_SYMTAB: + size_ok = shdr->sh_entsize == sizeof(Elf_Sym); + break; + case SHT_RELA: + size_ok = shdr->sh_entsize == sizeof(Elf_Rela); + break; + case SHT_DYNAMIC: + size_ok = shdr->sh_entsize == sizeof(Elf_Dyn); + break; + case SHT_REL: + size_ok = shdr->sh_entsize == sizeof(Elf_Rel); + break; + case SHT_NOTE: + case SHT_PROGBITS: + case SHT_HASH: + case SHT_NOBITS: + default: + /* + * This is a section whose entsize requirements + * I don't care about. If I don't know about + * the section I can't care about it's entsize + * requirements. + */ + size_ok = true; + break; + } + + if (!size_ok) { + pr_debug("ELF section with wrong entry size.\n"); + return false; + } else if (shdr->sh_addr + shdr->sh_size < shdr->sh_addr) { + pr_debug("ELF section address wraps around.\n"); + return false; + } + + if (shdr->sh_type != SHT_NOBITS) { + if (shdr->sh_offset + shdr->sh_size < shdr->sh_offset) { + pr_debug("ELF section location wraps around.\n"); + return false; + } else if (shdr->sh_offset + shdr->sh_size > buf_len) { + pr_debug("ELF section not in file.\n"); + return false; + } + } + + return true; +} + +static int elf_read_shdr(const char *buf, size_t len, struct elf_info *elf_info, + int idx) +{ + struct elf_shdr *shdr = &elf_info->sechdrs[idx]; + const struct elfhdr *ehdr = elf_info->ehdr; + const char *sbuf; + struct elf_shdr *buf_shdr; + + sbuf = buf + ehdr->e_shoff + idx * sizeof(*buf_shdr); + buf_shdr = (struct elf_shdr *) sbuf; + + shdr->sh_name = elf32_to_cpu(ehdr, buf_shdr->sh_name); + shdr->sh_type = elf32_to_cpu(ehdr, buf_shdr->sh_type); + shdr->sh_addr = elf_addr_to_cpu(ehdr, buf_shdr->sh_addr); + shdr->sh_offset = elf_addr_to_cpu(ehdr, buf_shdr->sh_offset); + shdr->sh_link = elf32_to_cpu(ehdr, buf_shdr->sh_link); + shdr->sh_info = elf32_to_cpu(ehdr, buf_shdr->sh_info); + + /* + * The following fields have a type equivalent to Elf_Addr + * both in 32 bit and 64 bit ELF. + */ + shdr->sh_flags = elf_addr_to_cpu(ehdr, buf_shdr->sh_flags); + shdr->sh_size = elf_addr_to_cpu(ehdr, buf_shdr->sh_size); + shdr->sh_addralign = elf_addr_to_cpu(ehdr, buf_shdr->sh_addralign); + shdr->sh_entsize = elf_addr_to_cpu(ehdr, buf_shdr->sh_entsize); + + return elf_is_shdr_sane(shdr, len) ? 0 : -ENOEXEC; +} + +/** + * elf_read_shdrs - read the section headers from the buffer + * + * This function assumes that the section header table was checked for sanity. + * Use elf_is_ehdr_sane() if it wasn't. + */ +static int elf_read_shdrs(const char *buf, size_t len, + struct elf_info *elf_info) +{ + size_t shdr_size, i; + + /* + * e_shnum is at most 65536 so calculating + * the size of the section header cannot overflow. + */ + shdr_size = sizeof(struct elf_shdr) * elf_info->ehdr->e_shnum; + + elf_info->sechdrs = kzalloc(shdr_size, GFP_KERNEL); + if (!elf_info->sechdrs) + return -ENOMEM; + + for (i = 0; i < elf_info->ehdr->e_shnum; i++) { + int ret; + + ret = elf_read_shdr(buf, len, elf_info, i); + if (ret) { + kfree(elf_info->sechdrs); + elf_info->sechdrs = NULL; + return ret; + } + } + + return 0; +} + +/** + * elf_read_from_buffer - read ELF file and sets up ELF header and ELF info + * @buf: Buffer to read ELF file from. + * @len: Size of @buf. + * @ehdr: Pointer to existing struct which will be populated. + * @elf_info: Pointer to existing struct which will be populated. + * + * This function allows reading ELF files with different byte order than + * the kernel, byte-swapping the fields as needed. + * + * Return: + * On success returns 0, and the caller should call elf_free_info(elf_info) to + * free the memory allocated for the section and program headers. + */ +int elf_read_from_buffer(const char *buf, size_t len, struct elfhdr *ehdr, + struct elf_info *elf_info) +{ + int ret; + + ret = elf_read_ehdr(buf, len, ehdr); + if (ret) + return ret; + + elf_info->buffer = buf; + elf_info->ehdr = ehdr; + if (ehdr->e_phoff > 0 && ehdr->e_phnum > 0) { + ret = elf_read_phdrs(buf, len, elf_info); + if (ret) + return ret; + } + if (ehdr->e_shoff > 0 && ehdr->e_shnum > 0) { + ret = elf_read_shdrs(buf, len, elf_info); + if (ret) { + kfree(elf_info->proghdrs); + return ret; + } + } + + return 0; +} + +/** + * elf_free_info - free memory allocated by elf_read_from_buffer + */ +void elf_free_info(struct elf_info *elf_info) +{ + kfree(elf_info->proghdrs); + kfree(elf_info->sechdrs); + memset(elf_info, 0, sizeof(*elf_info)); +} +/** + * build_elf_exec_info - read ELF executable and check that we can use it + */ +static int build_elf_exec_info(const char *buf, size_t len, struct elfhdr *ehdr, + struct elf_info *elf_info) +{ + int i; + int ret; + + ret = elf_read_from_buffer(buf, len, ehdr, elf_info); + if (ret) + return ret; + + /* Big endian vmlinux has type ET_DYN. */ + if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) { + pr_err("Not an ELF executable.\n"); + goto error; + } else if (!elf_info->proghdrs) { + pr_err("No ELF program header.\n"); + goto error; + } + + for (i = 0; i < ehdr->e_phnum; i++) { + /* + * Kexec does not support loading interpreters. + * In addition this check keeps us from attempting + * to kexec ordinay executables. + */ + if (elf_info->proghdrs[i].p_type == PT_INTERP) { + pr_err("Requires an ELF interpreter.\n"); + goto error; + } + } + + return 0; +error: + elf_free_info(elf_info); + return -ENOEXEC; +} + +static int elf64_probe(const char *buf, unsigned long len) +{ + struct elfhdr ehdr; + struct elf_info elf_info; + int ret; + + ret = build_elf_exec_info(buf, len, &ehdr, &elf_info); + if (ret) + return ret; + + elf_free_info(&elf_info); + + return elf_check_arch(&ehdr) ? 0 : -ENOEXEC; +} + +/** + * elf_exec_load - load ELF executable image + * @lowest_load_addr: On return, will be the address where the first PT_LOAD + * section will be loaded in memory. + * + * Return: + * 0 on success, negative value on failure. + */ +static int elf_exec_load(struct kimage *image, struct elfhdr *ehdr, + struct elf_info *elf_info, + unsigned long *lowest_load_addr) +{ + unsigned long base = 0, lowest_addr = UINT_MAX; + int ret; + size_t i; + struct kexec_buf kbuf = { .image = image, .buf_max = ppc64_rma_size, + .top_down = false }; + + /* Read in the PT_LOAD segments. */ + for (i = 0; i < ehdr->e_phnum; i++) { + unsigned long load_addr; + size_t size; + const struct elf_phdr *phdr; + + phdr = &elf_info->proghdrs[i]; + if (phdr->p_type != PT_LOAD) + continue; + + size = phdr->p_filesz; + if (size > phdr->p_memsz) + size = phdr->p_memsz; + + kbuf.buffer = (void *) elf_info->buffer + phdr->p_offset; + kbuf.bufsz = size; + kbuf.memsz = phdr->p_memsz; + kbuf.buf_align = phdr->p_align; + kbuf.buf_min = phdr->p_paddr + base; + ret = kexec_add_buffer(&kbuf); + if (ret) + goto out; + load_addr = kbuf.mem; + + if (load_addr < lowest_addr) + lowest_addr = load_addr; + } + + /* Update entry point to reflect new load address. */ + ehdr->e_entry += base; + + *lowest_load_addr = lowest_addr; + ret = 0; + out: + return ret; +} + +static void *elf64_load(struct kimage *image, char *kernel_buf, + unsigned long kernel_len, char *initrd, + unsigned long initrd_len, char *cmdline, + unsigned long cmdline_len) +{ + int ret; + unsigned int fdt_size; + unsigned long kernel_load_addr, purgatory_load_addr; + unsigned long initrd_load_addr = 0, fdt_load_addr; + void *fdt; + const void *slave_code; + struct elfhdr ehdr; + struct elf_info elf_info; + struct kexec_buf kbuf = { .image = image, .buf_min = 0, + .buf_max = ppc64_rma_size }; + + ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info); + if (ret) + goto out; + + ret = elf_exec_load(image, &ehdr, &elf_info, &kernel_load_addr); + if (ret) + goto out; + + pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr); + + ret = kexec_load_purgatory(image, 0, ppc64_rma_size, true, + &purgatory_load_addr); + if (ret) { + pr_err("Loading purgatory failed.\n"); + goto out; + } + + pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr); + + if (initrd != NULL) { + kbuf.buffer = initrd; + kbuf.bufsz = kbuf.memsz = initrd_len; + kbuf.buf_align = PAGE_SIZE; + kbuf.top_down = false; + ret = kexec_add_buffer(&kbuf); + if (ret) + goto out; + initrd_load_addr = kbuf.mem; + + pr_debug("Loaded initrd at 0x%lx\n", initrd_load_addr); + } + + fdt_size = fdt_totalsize(initial_boot_params) * 2; + fdt = kmalloc(fdt_size, GFP_KERNEL); + if (!fdt) { + pr_err("Not enough memory for the device tree.\n"); + ret = -ENOMEM; + goto out; + } + ret = fdt_open_into(initial_boot_params, fdt, fdt_size); + if (ret < 0) { + pr_err("Error setting up the new device tree.\n"); + ret = -EINVAL; + goto out; + } + + ret = setup_new_fdt(fdt, initrd_load_addr, initrd_len, cmdline); + if (ret) + goto out; + + fdt_pack(fdt); + + kbuf.buffer = fdt; + kbuf.bufsz = kbuf.memsz = fdt_size; + kbuf.buf_align = PAGE_SIZE; + kbuf.top_down = true; + ret = kexec_add_buffer(&kbuf); + if (ret) + goto out; + fdt_load_addr = kbuf.mem; + + pr_debug("Loaded device tree at 0x%lx\n", fdt_load_addr); + + slave_code = elf_info.buffer + elf_info.proghdrs[0].p_offset; + ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr, + fdt_load_addr); + if (ret) + pr_err("Error setting up the purgatory.\n"); + +out: + elf_free_info(&elf_info); + + /* Make kimage_file_post_load_cleanup free the fdt buffer for us. */ + return ret ? ERR_PTR(ret) : fdt; +} + +struct kexec_file_ops kexec_elf64_ops = { + .probe = elf64_probe, + .load = elf64_load, +}; diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c new file mode 100644 index 000000000000..7abc8a75ee48 --- /dev/null +++ b/arch/powerpc/kernel/machine_kexec_file_64.c @@ -0,0 +1,338 @@ +/* + * ppc64 code to implement the kexec_file_load syscall + * + * Copyright (C) 2004 Adam Litke (agl@us.ibm.com) + * Copyright (C) 2004 IBM Corp. + * Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation + * Copyright (C) 2005 R Sharada (sharada@in.ibm.com) + * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com) + * Copyright (C) 2016 IBM Corporation + * + * Based on kexec-tools' kexec-elf-ppc64.c, fs2dt.c. + * Heavily modified for the kernel by + * Thiago Jung Bauermann . + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation (version 2 of the License). + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#define SLAVE_CODE_SIZE 256 + +static struct kexec_file_ops *kexec_file_loaders[] = { + &kexec_elf64_ops, +}; + +int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, + unsigned long buf_len) +{ + int i, ret = -ENOEXEC; + struct kexec_file_ops *fops; + + /* We don't support crash kernels yet. */ + if (image->type == KEXEC_TYPE_CRASH) + return -ENOTSUPP; + + for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) { + fops = kexec_file_loaders[i]; + if (!fops || !fops->probe) + continue; + + ret = fops->probe(buf, buf_len); + if (!ret) { + image->fops = fops; + return ret; + } + } + + return ret; +} + +void *arch_kexec_kernel_image_load(struct kimage *image) +{ + if (!image->fops || !image->fops->load) + return ERR_PTR(-ENOEXEC); + + return image->fops->load(image, image->kernel_buf, + image->kernel_buf_len, image->initrd_buf, + image->initrd_buf_len, image->cmdline_buf, + image->cmdline_buf_len); +} + +int arch_kimage_file_post_load_cleanup(struct kimage *image) +{ + if (!image->fops || !image->fops->cleanup) + return 0; + + return image->fops->cleanup(image->image_loader_data); +} + +/** + * arch_kexec_walk_mem - call func(data) for each unreserved memory block + * @kbuf: Context info for the search. Also passed to @func. + * @func: Function to call for each memory block. + * + * This function is used by kexec_add_buffer and kexec_locate_mem_hole + * to find unreserved memory to load kexec segments into. + * + * Return: The memory walk will stop when func returns a non-zero value + * and that value will be returned. If all free regions are visited without + * func returning non-zero, then zero will be returned. + */ +int arch_kexec_walk_mem(struct kexec_buf *kbuf, int (*func)(u64, u64, void *)) +{ + int ret = 0; + u64 i; + phys_addr_t mstart, mend; + + if (kbuf->top_down) { + for_each_free_mem_range_reverse(i, NUMA_NO_NODE, 0, + &mstart, &mend, NULL) { + /* + * In memblock, end points to the first byte after the + * range while in kexec, end points to the last byte + * in the range. + */ + ret = func(mstart, mend - 1, kbuf); + if (ret) + break; + } + } else { + for_each_free_mem_range(i, NUMA_NO_NODE, 0, &mstart, &mend, + NULL) { + /* + * In memblock, end points to the first byte after the + * range while in kexec, end points to the last byte + * in the range. + */ + ret = func(mstart, mend - 1, kbuf); + if (ret) + break; + } + } + + return ret; +} + +/** + * setup_purgatory - initialize the purgatory's global variables + * @image: kexec image. + * @slave_code: Slave code for the purgatory. + * @fdt: Flattened device tree for the next kernel. + * @kernel_load_addr: Address where the kernel is loaded. + * @fdt_load_addr: Address where the flattened device tree is loaded. + * + * Return: 0 on success, or negative errno on error. + */ +int setup_purgatory(struct kimage *image, const void *slave_code, + const void *fdt, unsigned long kernel_load_addr, + unsigned long fdt_load_addr) +{ + unsigned int *slave_code_buf, master_entry; + int ret; + + slave_code_buf = kmalloc(SLAVE_CODE_SIZE, GFP_KERNEL); + if (!slave_code_buf) + return -ENOMEM; + + /* Get the slave code from the new kernel and put it in purgatory. */ + ret = kexec_purgatory_get_set_symbol(image, "purgatory_start", + slave_code_buf, SLAVE_CODE_SIZE, + true); + if (ret) { + kfree(slave_code_buf); + return ret; + } + + master_entry = slave_code_buf[0]; + memcpy(slave_code_buf, slave_code, SLAVE_CODE_SIZE); + slave_code_buf[0] = master_entry; + ret = kexec_purgatory_get_set_symbol(image, "purgatory_start", + slave_code_buf, SLAVE_CODE_SIZE, + false); + kfree(slave_code_buf); + + ret = kexec_purgatory_get_set_symbol(image, "kernel", &kernel_load_addr, + sizeof(kernel_load_addr), false); + if (ret) + return ret; + ret = kexec_purgatory_get_set_symbol(image, "dt_offset", &fdt_load_addr, + sizeof(fdt_load_addr), false); + if (ret) + return ret; + + return 0; +} + +/** + * delete_fdt_mem_rsv - delete memory reservation with given address and size + * + * Return: 0 on success, or negative errno on error. + */ +static int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size) +{ + int i, ret, num_rsvs = fdt_num_mem_rsv(fdt); + + for (i = 0; i < num_rsvs; i++) { + uint64_t rsv_start, rsv_size; + + ret = fdt_get_mem_rsv(fdt, i, &rsv_start, &rsv_size); + if (ret) { + pr_err("Malformed device tree.\n"); + return -EINVAL; + } + + if (rsv_start == start && rsv_size == size) { + ret = fdt_del_mem_rsv(fdt, i); + if (ret) { + pr_err("Error deleting device tree reservation.\n"); + return -EINVAL; + } + + return 0; + } + } + + return -ENOENT; +} + +/* + * setup_new_fdt - modify /chosen and memory reservation for the next kernel + * @fdt: Flattened device tree for the next kernel. + * @initrd_load_addr: Address where the next initrd will be loaded. + * @initrd_len: Size of the next initrd, or 0 if there will be none. + * @cmdline: Command line for the next kernel, or NULL if there will + * be none. + * + * Return: 0 on success, or negative errno on error. + */ +int setup_new_fdt(void *fdt, unsigned long initrd_load_addr, + unsigned long initrd_len, const char *cmdline) +{ + int ret, chosen_node; + const void *prop; + + /* Remove memory reservation for the current device tree. */ + ret = delete_fdt_mem_rsv(fdt, __pa(initial_boot_params), + fdt_totalsize(initial_boot_params)); + if (ret == 0) + pr_debug("Removed old device tree reservation.\n"); + else if (ret != -ENOENT) + return ret; + + chosen_node = fdt_path_offset(fdt, "/chosen"); + if (chosen_node == -FDT_ERR_NOTFOUND) { + chosen_node = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), + "chosen"); + if (chosen_node < 0) { + pr_err("Error creating /chosen.\n"); + return -EINVAL; + } + } else if (chosen_node < 0) { + pr_err("Malformed device tree: error reading /chosen.\n"); + return -EINVAL; + } + + /* Did we boot using an initrd? */ + prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", NULL); + if (prop) { + uint64_t tmp_start, tmp_end, tmp_size; + + tmp_start = fdt64_to_cpu(*((const fdt64_t *) prop)); + + prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", NULL); + if (!prop) { + pr_err("Malformed device tree.\n"); + return -EINVAL; + } + tmp_end = fdt64_to_cpu(*((const fdt64_t *) prop)); + + /* + * kexec reserves exact initrd size, while firmware may + * reserve a multiple of PAGE_SIZE, so check for both. + */ + tmp_size = tmp_end - tmp_start; + ret = delete_fdt_mem_rsv(fdt, tmp_start, tmp_size); + if (ret == -ENOENT) + ret = delete_fdt_mem_rsv(fdt, tmp_start, + round_up(tmp_size, PAGE_SIZE)); + if (ret == 0) + pr_debug("Removed old initrd reservation.\n"); + else if (ret != -ENOENT) + return ret; + + /* If there's no new initrd, delete the old initrd's info. */ + if (initrd_len == 0) { + ret = fdt_delprop(fdt, chosen_node, + "linux,initrd-start"); + if (ret) { + pr_err("Error deleting linux,initrd-start.\n"); + return -EINVAL; + } + + ret = fdt_delprop(fdt, chosen_node, "linux,initrd-end"); + if (ret) { + pr_err("Error deleting linux,initrd-end.\n"); + return -EINVAL; + } + } + } + + if (initrd_len) { + ret = fdt_setprop_u64(fdt, chosen_node, + "linux,initrd-start", + initrd_load_addr); + if (ret < 0) { + pr_err("Error setting up the new device tree.\n"); + return -EINVAL; + } + + /* initrd-end is the first address after the initrd image. */ + ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-end", + initrd_load_addr + initrd_len); + if (ret < 0) { + pr_err("Error setting up the new device tree.\n"); + return -EINVAL; + } + + ret = fdt_add_mem_rsv(fdt, initrd_load_addr, initrd_len); + if (ret) { + pr_err("Error reserving initrd memory: %s\n", + fdt_strerror(ret)); + return -EINVAL; + } + } + + if (cmdline != NULL) { + ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline); + if (ret < 0) { + pr_err("Error setting up the new device tree.\n"); + return -EINVAL; + } + } else { + ret = fdt_delprop(fdt, chosen_node, "bootargs"); + if (ret && ret != -FDT_ERR_NOTFOUND) { + pr_err("Error deleting bootargs.\n"); + return -EINVAL; + } + } + + ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0); + if (ret) { + pr_err("Error setting up the new device tree.\n"); + return -EINVAL; + } + + return 0; +} -- cgit From 0d97631392c24a9573dafb7b6962cc40b5ef2281 Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 29 Nov 2016 23:45:52 +1100 Subject: powerpc: Add purgatory for kexec_file_load() implementation. This purgatory implementation is based on the versions from kexec-tools and kexec-lite, with additional changes. Signed-off-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman --- arch/powerpc/Makefile | 1 + arch/powerpc/kernel/machine_kexec_64.c | 2 +- arch/powerpc/purgatory/.gitignore | 2 + arch/powerpc/purgatory/Makefile | 15 ++++ arch/powerpc/purgatory/trampoline.S | 128 +++++++++++++++++++++++++++++++++ 5 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/purgatory/.gitignore create mode 100644 arch/powerpc/purgatory/Makefile create mode 100644 arch/powerpc/purgatory/trampoline.S diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 82ce8c8b63ad..31286fa7873c 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -250,6 +250,7 @@ core-y += arch/powerpc/kernel/ \ core-$(CONFIG_XMON) += arch/powerpc/xmon/ core-$(CONFIG_KVM) += arch/powerpc/kvm/ core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/ +core-$(CONFIG_KEXEC_FILE) += arch/powerpc/purgatory/ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index a205fa3d9bf3..5c12e21d0d1a 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -310,7 +310,7 @@ void default_machine_kexec(struct kimage *image) if (!kdump_in_progress()) kexec_prepare_cpus(); - pr_debug("kexec: Starting switchover sequence.\n"); + printk("kexec: Starting switchover sequence.\n"); /* switch to a staticly allocated stack. Based on irq stack code. * We setup preempt_count to avoid using VMX in memcpy. diff --git a/arch/powerpc/purgatory/.gitignore b/arch/powerpc/purgatory/.gitignore new file mode 100644 index 000000000000..e9e66f178a6d --- /dev/null +++ b/arch/powerpc/purgatory/.gitignore @@ -0,0 +1,2 @@ +kexec-purgatory.c +purgatory.ro diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile new file mode 100644 index 000000000000..ac8793c13348 --- /dev/null +++ b/arch/powerpc/purgatory/Makefile @@ -0,0 +1,15 @@ +targets += trampoline.o purgatory.ro kexec-purgatory.c + +LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined + +$(obj)/purgatory.ro: $(obj)/trampoline.o FORCE + $(call if_changed,ld) + +CMD_BIN2C = $(objtree)/scripts/basic/bin2c +quiet_cmd_bin2c = BIN2C $@ + cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@ + +$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE + $(call if_changed,bin2c) + +obj-y += kexec-purgatory.o diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S new file mode 100644 index 000000000000..f9760ccf4032 --- /dev/null +++ b/arch/powerpc/purgatory/trampoline.S @@ -0,0 +1,128 @@ +/* + * kexec trampoline + * + * Based on code taken from kexec-tools and kexec-lite. + * + * Copyright (C) 2004 - 2005, Milton D Miller II, IBM Corporation + * Copyright (C) 2006, Mohan Kumar M, IBM Corporation + * Copyright (C) 2013, Anton Blanchard, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation (version 2 of the License). + */ + +#if defined(__LITTLE_ENDIAN__) +#define STWX_BE stwbrx +#define LWZX_BE lwbrx +#elif defined(__BIG_ENDIAN__) +#define STWX_BE stwx +#define LWZX_BE lwzx +#else +#error no endianness defined! +#endif + + .machine ppc64 + .balign 256 + .globl purgatory_start +purgatory_start: + b master + + /* ABI: possible run_at_load flag at 0x5c */ + .org purgatory_start + 0x5c + .globl run_at_load +run_at_load: + .long 0 + .size run_at_load, . - run_at_load + + /* ABI: slaves start at 60 with r3=phys */ + .org purgatory_start + 0x60 +slave: + b . + /* ABI: end of copied region */ + .org purgatory_start + 0x100 + .size purgatory_start, . - purgatory_start + +/* + * The above 0x100 bytes at purgatory_start are replaced with the + * code from the kernel (or next stage) by setup_purgatory(). + */ + +master: + or %r1,%r1,%r1 /* low priority to let other threads catchup */ + isync + mr %r17,%r3 /* save cpu id to r17 */ + mr %r15,%r4 /* save physical address in reg15 */ + + or %r3,%r3,%r3 /* ok now to high priority, lets boot */ + lis %r6,0x1 + mtctr %r6 /* delay a bit for slaves to catch up */ + bdnz . /* before we overwrite 0-100 again */ + + bl 0f /* Work out where we're running */ +0: mflr %r18 + + /* load device-tree address */ + ld %r3, (dt_offset - 0b)(%r18) + mr %r16,%r3 /* save dt address in reg16 */ + li %r4,20 + LWZX_BE %r6,%r3,%r4 /* fetch __be32 version number at byte 20 */ + cmpwi %r0,%r6,2 /* v2 or later? */ + blt 1f + li %r4,28 + STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */ +1: + /* load the kernel address */ + ld %r4,(kernel - 0b)(%r18) + + /* load the run_at_load flag */ + /* possibly patched by kexec */ + ld %r6,(run_at_load - 0b)(%r18) + /* and patch it into the kernel */ + stw %r6,(0x5c)(%r4) + + mr %r3,%r16 /* restore dt address */ + + li %r5,0 /* r5 will be 0 for kernel */ + + mfmsr %r11 + andi. %r10,%r11,1 /* test MSR_LE */ + bne .Little_endian + + mtctr %r4 /* prepare branch to */ + bctr /* start kernel */ + +.Little_endian: + mtsrr0 %r4 /* prepare branch to */ + + clrrdi %r11,%r11,1 /* clear MSR_LE */ + mtsrr1 %r11 + + rfid /* update MSR and start kernel */ + + + .balign 8 + .globl kernel +kernel: + .llong 0x0 + .size kernel, . - kernel + + .balign 8 + .globl dt_offset +dt_offset: + .llong 0x0 + .size dt_offset, . - dt_offset + + + .data + .balign 8 +.globl sha256_digest +sha256_digest: + .skip 32 + .size sha256_digest, . - sha256_digest + + .balign 8 +.globl sha_regions +sha_regions: + .skip 8 * 2 * 16 + .size sha_regions, . - sha_regions -- cgit From 80f60e509a03ff9ff2bdbf9cd1e935c6360b8bd9 Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 29 Nov 2016 23:45:53 +1100 Subject: powerpc/kexec: Enable kexec_file_load() syscall Define the Kconfig symbol so that the kexec_file_load() code can be built, and wire up the syscall so that it can be called. Signed-off-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman --- arch/powerpc/Kconfig | 13 +++++++++++++ arch/powerpc/include/asm/systbl.h | 1 + arch/powerpc/include/asm/unistd.h | 2 +- arch/powerpc/include/uapi/asm/unistd.h | 1 + 4 files changed, 16 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a063c7235fa9..48923c349ccb 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -465,6 +465,19 @@ config KEXEC interface is strongly in flux, so no good recommendation can be made. +config KEXEC_FILE + bool "kexec file based system call" + select KEXEC_CORE + select BUILD_BIN2C + depends on PPC64 + depends on CRYPTO=y + depends on CRYPTO_SHA256=y + help + This is a new version of the kexec system call. This call is + file based and takes in file descriptors as system call arguments + for kernel and initramfs as opposed to a list of segments as is the + case for the older kexec call. + config RELOCATABLE bool "Build a relocatable kernel" depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE)) diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 2fc5d4db503c..4b369d83fe9c 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -386,3 +386,4 @@ SYSCALL(mlock2) SYSCALL(copy_file_range) COMPAT_SYS_SPU(preadv2) COMPAT_SYS_SPU(pwritev2) +SYSCALL(kexec_file_load) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index e8cdfec8d512..eb1acee91a20 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include -#define NR_syscalls 382 +#define NR_syscalls 383 #define __NR__exit __NR_exit diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index e9f5f41aa55a..2f26335a3c42 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -392,5 +392,6 @@ #define __NR_copy_file_range 379 #define __NR_preadv2 380 #define __NR_pwritev2 381 +#define __NR_kexec_file_load 382 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ -- cgit From 500c7ab1a9db4aaf318eba711899804f8ca34eaf Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 29 Nov 2016 23:45:54 +1100 Subject: powerpc: Enable CONFIG_KEXEC_FILE in powerpc server defconfigs. Enable CONFIG_KEXEC_FILE in powernv_defconfig, ppc64_defconfig and pseries_defconfig. It depends on CONFIG_CRYPTO_SHA256=y, so add that as well. Signed-off-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman --- arch/powerpc/configs/powernv_defconfig | 2 ++ arch/powerpc/configs/ppc64_defconfig | 2 ++ arch/powerpc/configs/pseries_defconfig | 2 ++ 3 files changed, 6 insertions(+) diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index 4bdc966687cc..e4d53fe5976a 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -49,6 +49,7 @@ CONFIG_BINFMT_MISC=m CONFIG_PPC_TRANSACTIONAL_MEM=y CONFIG_HOTPLUG_CPU=y CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y CONFIG_IRQ_ALL_CPUS=y CONFIG_NUMA=y CONFIG_MEMORY_HOTPLUG=y @@ -299,6 +300,7 @@ CONFIG_CRYPTO_HMAC=y CONFIG_CRYPT_CRC32C_VPMSUM=m CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index a11a68c7236e..0396126ba6a8 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -46,6 +46,7 @@ CONFIG_HZ_100=y CONFIG_BINFMT_MISC=m CONFIG_PPC_TRANSACTIONAL_MEM=y CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y CONFIG_CRASH_DUMP=y CONFIG_IRQ_ALL_CPUS=y CONFIG_MEMORY_HOTREMOVE=y @@ -335,6 +336,7 @@ CONFIG_CRYPTO_HMAC=y CONFIG_CRYPT_CRC32C_VPMSUM=m CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index e8cc508d53c6..5a06bdde1674 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -52,6 +52,7 @@ CONFIG_HZ_100=y CONFIG_BINFMT_MISC=m CONFIG_PPC_TRANSACTIONAL_MEM=y CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y CONFIG_IRQ_ALL_CPUS=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y @@ -298,6 +299,7 @@ CONFIG_CRYPTO_HMAC=y CONFIG_CRYPT_CRC32C_VPMSUM=m CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m -- cgit From 53ce299615e587e900548eb6b384c3081b71bbfa Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 8 Nov 2016 17:08:06 +1100 Subject: powerpc/pseries: add definitions for new H_SIGNAL_SYS_RESET hcall This has not made its way to a PAPR release yet, but we have an hcall number assigned. H_SIGNAL_SYS_RESET = 0x380 Syntax: hcall(uint64 H_SIGNAL_SYS_RESET, int64 target); Generate a system reset NMI on the threads indicated by target. Values for target: -1 = target all online threads including the caller -2 = target all online threads except for the caller All other negative values: reserved Positive values: The thread to be targeted, obtained from the value of the "ibm,ppc-interrupt-server#s" property of the CPU in the OF device tree. Semantics: - Invalid target: return H_Parameter. - Otherwise: Generate a system reset NMI on target thread(s), return H_Success. This will be used by crash/debug code to get stuck CPUs into a known state. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/hvcall.h | 8 +++++++- arch/powerpc/include/asm/plpar_wrappers.h | 5 +++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 8d5f8352afd7..77ff1ba99d1f 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -276,7 +276,8 @@ #define H_GET_MPP_X 0x314 #define H_SET_MODE 0x31C #define H_CLEAR_HPT 0x358 -#define MAX_HCALL_OPCODE H_CLEAR_HPT +#define H_SIGNAL_SYS_RESET 0x380 +#define MAX_HCALL_OPCODE H_SIGNAL_SYS_RESET /* H_VIOCTL functions */ #define H_GET_VIOA_DUMP_SIZE 0x01 @@ -307,6 +308,11 @@ #define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3 #define H_SET_MODE_RESOURCE_LE 4 +/* Values for argument to H_SIGNAL_SYS_RESET */ +#define H_SIGNAL_SYS_RESET_ALL -1 +#define H_SIGNAL_SYS_RESET_ALL_OTHERS -2 +/* >= 0 values are CPU number */ + #ifndef __ASSEMBLY__ /** diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 034a588b122c..0bcc75e295e3 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -308,4 +308,9 @@ static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawr return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0); } +static inline long plapr_signal_sys_reset(long cpu) +{ + return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu); +} + #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ -- cgit From d03d1d65b5570ab6fba462a09c2b3274dca80ba9 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 18 Nov 2016 23:15:41 +1100 Subject: powerpc/prom: Define structs for client architecture vectors The "client architecture vectors" are a series of structures we pass to firmware to define various things, such as what processors we support and many other options. Each structure is entirely different so we have to define a different struct for each one, but that's OK. Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/prom_init.c | 53 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 05d2556ebb9f..e41c0b6637ab 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -740,6 +740,59 @@ unsigned char ibm_architecture_vec[] = { OV6_LINUX, }; +struct option_vector1 { + u8 byte1; + u8 arch_versions; +} __packed; + +struct option_vector2 { + u8 byte1; + __be16 reserved; + __be32 real_base; + __be32 real_size; + __be32 virt_base; + __be32 virt_size; + __be32 load_base; + __be32 min_rma; + __be32 min_load; + u8 min_rma_percent; + u8 max_pft_size; +} __packed; + +struct option_vector3 { + u8 byte1; + u8 byte2; +} __packed; + +struct option_vector4 { + u8 byte1; + u8 min_vp_cap; +} __packed; + +struct option_vector5 { + u8 byte1; + u8 byte2; + u8 byte3; + u8 cmo; + u8 associativity; + u8 bin_opts; + u8 micro_checkpoint; + u8 reserved0; + __be32 max_cpus; + __be16 papr_level; + __be16 reserved1; + u8 platform_facilities; + u8 reserved2; + __be16 reserved3; + u8 subprocessors; +} __packed; + +struct option_vector6 { + u8 reserved; + u8 secondary_pteg; + u8 os_name; +} __packed; + /* Old method - ELF header with PT_NOTE sections only works on BE */ #ifdef __BIG_ENDIAN__ static struct fake_elf { -- cgit From 76ffb5785047b3924da20969eb3f658b363c20f0 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 18 Nov 2016 23:15:42 +1100 Subject: powerpc/prom: Switch to using structs for ibm_architecture_vec Now that we've defined structures to describe each of the client architecture vectors, we can use those to construct the value we pass to firmware. This avoids the tricks we previously played with the W() macro, allows us to properly endian annotate fields, and should help to avoid bugs introduced by failing to have the correct number of zero pad bytes between fields. It also means we can avoid hard coding IBM_ARCH_VEC_NRCORES_OFFSET in order to update the max_cpus value and instead just set it. Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/prom.h | 6 - arch/powerpc/kernel/prom_init.c | 274 ++++++++++++++++++++++------------------ 2 files changed, 154 insertions(+), 126 deletions(-) diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 7f436ba1b56f..5e57705b4759 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -159,11 +159,5 @@ struct of_drconf_cell { /* Option Vector 6: IBM PAPR hints */ #define OV6_LINUX 0x02 /* Linux is our OS */ -/* - * The architecture vector has an array of PVR mask/value pairs, - * followed by # option vectors - 1, followed by the option vectors. - */ -extern unsigned char ibm_architecture_vec[]; - #endif /* __KERNEL__ */ #endif /* _POWERPC_PROM_H */ diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index e41c0b6637ab..ec47a939cbdd 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -635,13 +635,7 @@ static void __init early_cmdline_parse(void) * * See prom.h for the definition of the bits specified in the * architecture vector. - * - * Because the description vector contains a mix of byte and word - * values, we declare it as an unsigned char array, and use this - * macro to put word values in. */ -#define W(x) ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \ - ((x) >> 8) & 0xff, (x) & 0xff /* Firmware expects the value to be n - 1, where n is the # of vectors */ #define NUM_VECTORS(n) ((n) - 1) @@ -652,94 +646,6 @@ static void __init early_cmdline_parse(void) */ #define VECTOR_LENGTH(n) (1 + (n) - 2) -unsigned char ibm_architecture_vec[] = { - W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */ - W(0xffff0000), W(0x003e0000), /* POWER6 */ - W(0xffff0000), W(0x003f0000), /* POWER7 */ - W(0xffff0000), W(0x004b0000), /* POWER8E */ - W(0xffff0000), W(0x004c0000), /* POWER8NVL */ - W(0xffff0000), W(0x004d0000), /* POWER8 */ - W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ - W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ - W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ - W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */ - NUM_VECTORS(6), /* 6 option vectors */ - - /* option vector 1: processor architectures supported */ - VECTOR_LENGTH(2), /* length */ - 0, /* don't ignore, don't halt */ - OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | - OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, - - /* option vector 2: Open Firmware options supported */ - VECTOR_LENGTH(33), /* length */ - OV2_REAL_MODE, - 0, 0, - W(0xffffffff), /* real_base */ - W(0xffffffff), /* real_size */ - W(0xffffffff), /* virt_base */ - W(0xffffffff), /* virt_size */ - W(0xffffffff), /* load_base */ - W(256), /* 256MB min RMA */ - W(0xffffffff), /* full client load */ - 0, /* min RMA percentage of total RAM */ - 48, /* max log_2(hash table size) */ - - /* option vector 3: processor options supported */ - VECTOR_LENGTH(2), /* length */ - 0, /* don't ignore, don't halt */ - OV3_FP | OV3_VMX | OV3_DFP, - - /* option vector 4: IBM PAPR implementation */ - VECTOR_LENGTH(2), /* length */ - 0, /* don't halt */ - OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ - - /* option vector 5: PAPR/OF options */ - VECTOR_LENGTH(21), /* length */ - 0, /* don't ignore, don't halt */ - OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | - OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | -#ifdef CONFIG_PCI_MSI - /* PCIe/MSI support. Without MSI full PCIe is not supported */ - OV5_FEAT(OV5_MSI), -#else - 0, -#endif - 0, -#ifdef CONFIG_PPC_SMLPAR - OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO), -#else - 0, -#endif - OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), - 0, - 0, - 0, - /* WARNING: The offset of the "number of cores" field below - * must match by the macro below. Update the definition if - * the structure layout changes. - */ -#define IBM_ARCH_VEC_NRCORES_OFFSET 133 - W(NR_CPUS), /* number of cores supported */ - 0, - 0, - 0, - 0, - OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | - OV5_FEAT(OV5_PFO_HW_842), /* Byte 17 */ - 0, /* Byte 18 */ - 0, /* Byte 19 */ - 0, /* Byte 20 */ - OV5_FEAT(OV5_SUB_PROCESSORS), /* Byte 21 */ - - /* option vector 6: IBM PAPR hints */ - VECTOR_LENGTH(3), /* length */ - 0, - 0, - OV6_LINUX, -}; - struct option_vector1 { u8 byte1; u8 arch_versions; @@ -793,6 +699,154 @@ struct option_vector6 { u8 os_name; } __packed; +struct ibm_arch_vec { + struct { u32 mask, val; } pvrs[10]; + + u8 num_vectors; + + u8 vec1_len; + struct option_vector1 vec1; + + u8 vec2_len; + struct option_vector2 vec2; + + u8 vec3_len; + struct option_vector3 vec3; + + u8 vec4_len; + struct option_vector4 vec4; + + u8 vec5_len; + struct option_vector5 vec5; + + u8 vec6_len; + struct option_vector6 vec6; +} __packed; + +struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { + .pvrs = { + { + .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */ + .val = cpu_to_be32(0x003a0000), + }, + { + .mask = cpu_to_be32(0xffff0000), /* POWER6 */ + .val = cpu_to_be32(0x003e0000), + }, + { + .mask = cpu_to_be32(0xffff0000), /* POWER7 */ + .val = cpu_to_be32(0x003f0000), + }, + { + .mask = cpu_to_be32(0xffff0000), /* POWER8E */ + .val = cpu_to_be32(0x004b0000), + }, + { + .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */ + .val = cpu_to_be32(0x004c0000), + }, + { + .mask = cpu_to_be32(0xffff0000), /* POWER8 */ + .val = cpu_to_be32(0x004d0000), + }, + { + .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */ + .val = cpu_to_be32(0x0f000004), + }, + { + .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */ + .val = cpu_to_be32(0x0f000003), + }, + { + .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */ + .val = cpu_to_be32(0x0f000002), + }, + { + .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */ + .val = cpu_to_be32(0x0f000001), + }, + }, + + .num_vectors = NUM_VECTORS(6), + + .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)), + .vec1 = { + .byte1 = 0, + .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | + OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, + }, + + .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)), + /* option vector 2: Open Firmware options supported */ + .vec2 = { + .byte1 = OV2_REAL_MODE, + .reserved = 0, + .real_base = cpu_to_be32(0xffffffff), + .real_size = cpu_to_be32(0xffffffff), + .virt_base = cpu_to_be32(0xffffffff), + .virt_size = cpu_to_be32(0xffffffff), + .load_base = cpu_to_be32(0xffffffff), + .min_rma = cpu_to_be32(256), /* 256MB min RMA */ + .min_load = cpu_to_be32(0xffffffff), /* full client load */ + .min_rma_percent = 0, /* min RMA percentage of total RAM */ + .max_pft_size = 48, /* max log_2(hash table size) */ + }, + + .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)), + /* option vector 3: processor options supported */ + .vec3 = { + .byte1 = 0, /* don't ignore, don't halt */ + .byte2 = OV3_FP | OV3_VMX | OV3_DFP, + }, + + .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)), + /* option vector 4: IBM PAPR implementation */ + .vec4 = { + .byte1 = 0, /* don't halt */ + .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ + }, + + .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)), + /* option vector 5: PAPR/OF options */ + .vec5 = { + .byte1 = 0, /* don't ignore, don't halt */ + .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | + OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | +#ifdef CONFIG_PCI_MSI + /* PCIe/MSI support. Without MSI full PCIe is not supported */ + OV5_FEAT(OV5_MSI), +#else + 0, +#endif + .byte3 = 0, + .cmo = +#ifdef CONFIG_PPC_SMLPAR + OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO), +#else + 0, +#endif + .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), + .bin_opts = 0, + .micro_checkpoint = 0, + .reserved0 = 0, + .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ + .papr_level = 0, + .reserved1 = 0, + .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842), + .reserved2 = 0, + .reserved3 = 0, + .subprocessors = 1, + }, + + /* option vector 6: IBM PAPR hints */ + .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)), + .vec6 = { + .reserved = 0, + .secondary_pteg = 0, + .os_name = OV6_LINUX, + }, +}; + /* Old method - ELF header with PT_NOTE sections only works on BE */ #ifdef __BIG_ENDIAN__ static struct fake_elf { @@ -926,7 +980,6 @@ static void __init prom_send_capabilities(void) ihandle root; prom_arg_t ret; u32 cores; - unsigned char *ptcores; root = call_prom("open", 1, 1, ADDR("/")); if (root != 0) { @@ -937,37 +990,18 @@ static void __init prom_send_capabilities(void) * divide NR_CPUS. */ - /* The core value may start at an odd address. If such a word - * access is made at a cache line boundary, this leads to an - * exception which may not be handled at this time. - * Forcing a per byte access to avoid exception. - */ - ptcores = &ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET]; - cores = 0; - cores |= ptcores[0] << 24; - cores |= ptcores[1] << 16; - cores |= ptcores[2] << 8; - cores |= ptcores[3]; - if (cores != NR_CPUS) { - prom_printf("WARNING ! " - "ibm_architecture_vec structure inconsistent: %lu!\n", - cores); - } else { - cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); - prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n", - cores, NR_CPUS); - ptcores[0] = (cores >> 24) & 0xff; - ptcores[1] = (cores >> 16) & 0xff; - ptcores[2] = (cores >> 8) & 0xff; - ptcores[3] = cores & 0xff; - } + cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); + prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n", + cores, NR_CPUS); + + ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores); /* try calling the ibm,client-architecture-support method */ prom_printf("Calling ibm,client-architecture-support..."); if (call_prom_ret("call-method", 3, 2, &ret, ADDR("ibm,client-architecture-support"), root, - ADDR(ibm_architecture_vec)) == 0) { + ADDR(&ibm_architecture_vec)) == 0) { /* the call exists... */ if (ret) prom_printf("\nWARNING: ibm,client-architecture" -- cgit From 6dff5b67054e17c91bd630bcdda17cfca5aa4215 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Tue, 29 Nov 2016 10:47:32 -0800 Subject: powerpc/ps3: Fix system hang with GCC 5 builds GCC 5 generates different code for this bootwrapper null check that causes the PS3 to hang very early in its bootup. This check is of limited value, so just get rid of it. Cc: stable@vger.kernel.org Signed-off-by: Geoff Levand Signed-off-by: Michael Ellerman --- arch/powerpc/boot/ps3-head.S | 5 ----- arch/powerpc/boot/ps3.c | 8 +------- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S index b6fcbaf5027b..3dc44b05fb97 100644 --- a/arch/powerpc/boot/ps3-head.S +++ b/arch/powerpc/boot/ps3-head.S @@ -57,11 +57,6 @@ __system_reset_overlay: bctr 1: - /* Save the value at addr zero for a null pointer write check later. */ - - li r4, 0 - lwz r3, 0(r4) - /* Primary delays then goes to _zimage_start in wrapper. */ or 31, 31, 31 /* db16cyc */ diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c index 4ec2d86d3c50..a05558a7e51a 100644 --- a/arch/powerpc/boot/ps3.c +++ b/arch/powerpc/boot/ps3.c @@ -119,13 +119,12 @@ void ps3_copy_vectors(void) flush_cache((void *)0x100, 512); } -void platform_init(unsigned long null_check) +void platform_init(void) { const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */ void *chosen; unsigned long ft_addr; u64 rm_size; - unsigned long val; console_ops.write = ps3_console_write; platform_ops.exit = ps3_exit; @@ -153,11 +152,6 @@ void platform_init(unsigned long null_check) printf(" flat tree at 0x%lx\n\r", ft_addr); - val = *(unsigned long *)0; - - if (val != null_check) - printf("null check failed: %lx != %lx\n\r", val, null_check); - ((kernel_entry_t)0)(ft_addr, 0, NULL); ps3_exit(); -- cgit From dd5ac03e09554b7504f64990cfbcec884e3ec430 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 30 Nov 2016 19:41:02 +1100 Subject: powerpc/mm: Fix page table dump build on non-Book3S In the recent commit 1515ab932156 ("powerpc/mm: Dump hash table") we added code to dump the hage page table. Currently this can be selected to build on any platform. However it breaks the build if we're building for a non-Book3S platform, because none of the hash page table related defines and so on exist. So restrict it to building only on Book3S. Similarly in commit 8eb07b187000 ("powerpc/mm: Dump linux pagetables") we added code to dump the Linux page tables, which uses some constants which are only defined on Book3S - so guard those with an #ifdef. Fixes: 1515ab932156 ("powerpc/mm: Dump hash table") Fixes: 8eb07b187000 ("powerpc/mm: Dump linux pagetables") Signed-off-by: Michael Ellerman --- arch/powerpc/Kconfig.debug | 4 ++++ arch/powerpc/mm/Makefile | 4 ++-- arch/powerpc/mm/dump_linuxpagetables.c | 2 ++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 20cf770611ec..949258d412d0 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -366,4 +366,8 @@ config PPC_PTDUMP If you are unsure, say N. +config PPC_HTDUMP + def_bool y + depends on PPC_PTDUMP && PPC_BOOK3S + endmenu diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index f4ffe1f68ce9..5784682d7b63 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -42,5 +42,5 @@ obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o obj-$(CONFIG_SPAPR_TCE_IOMMU) += mmu_context_iommu.o -obj-$(CONFIG_PPC_PTDUMP) += dump_linuxpagetables.o \ - dump_hashpagetable.o +obj-$(CONFIG_PPC_PTDUMP) += dump_linuxpagetables.o +obj-$(CONFIG_PPC_HTDUMP) += dump_hashpagetable.o diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c index d242bc79ae64..49abaf4dc8e3 100644 --- a/arch/powerpc/mm/dump_linuxpagetables.c +++ b/arch/powerpc/mm/dump_linuxpagetables.c @@ -159,6 +159,7 @@ static const struct flag_info flag_array[] = { .set = "no cache", .clear = " ", }, { +#ifdef CONFIG_PPC_BOOK3S_64 .mask = H_PAGE_BUSY, .val = H_PAGE_BUSY, .set = "busy", @@ -183,6 +184,7 @@ static const struct flag_info flag_array[] = { .val = H_PAGE_F_SECOND, .set = "f_second", }, { +#endif .mask = _PAGE_SPECIAL, .val = _PAGE_SPECIAL, .set = "special", -- cgit From 2db029ef58ab7c47cecab5fe73fa6e3ae6663ecf Mon Sep 17 00:00:00 2001 From: Nathan Fontenot Date: Mon, 28 Nov 2016 11:50:45 -0500 Subject: powerpc/pseries: Use lmb_is_removable() to check removability We should be using lmb_is_removable() to validate that enough LMBs are available to remove when doing a remove by count. This will check that the LMB is owned by the system and it is considered removable. This patch also adds a pr_info() notification to report the LMB count to remove was not satisfied. What we do now is just check that there are enough LMBs owned by the system when validating there are enough LMBs to remove. This can lead to situations where there are enough LMBs owned by the system but not enough that are considered removable. This results in having to bail out of the remove operation instead of just failing the request that we should have known wouldn't succeed. Signed-off-by: Nathan Fontenot Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/hotplug-memory.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 76ec104e88be..2617f9f356bd 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -472,12 +472,15 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove, /* Validate that there are enough LMBs to satisfy the request */ for (i = 0; i < num_lmbs; i++) { - if (lmbs[i].flags & DRCONF_MEM_ASSIGNED) + if (lmb_is_removable(&lmbs[i])) lmbs_available++; } - if (lmbs_available < lmbs_to_remove) + if (lmbs_available < lmbs_to_remove) { + pr_info("Not enough LMBs available (%d of %d) to satisfy request\n", + lmbs_available, lmbs_to_remove); return -EINVAL; + } for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) { rc = dlpar_remove_lmb(&lmbs[i]); -- cgit From 577ec789a79eb34f85a26c01f3851671b0d80e2e Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 1 Dec 2016 20:50:45 +1100 Subject: powerpc/cell: Drop select of MEMORY_HOTPLUG SPU_FS selects MEMORY_HOTPLUG, which is problematic because MEMORY_HOTPLUG is user selectable, meaning we can end up with a broken .config where MEMORY_HOTPLUG is enabled but its dependencies are not, leading to build breakages. The select of MEMORY_HOTPLUG for SPU_FS was added back in 2006, in commit 4da30d15b6d5 ("[POWERPC] spufs: fix memory hotplug dependency"). However we reworked the spufs code and removed the dependency on memory hotplug in 2007 in commit 78bde53e351b ("[POWERPC] spufs: remove need for struct page for SPEs"). So drop the select as it's no longer needed and causes problems. Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/cell/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index d9088f0b8fcc..441ad3bad3db 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -46,7 +46,6 @@ config SPU_FS default m depends on PPC_CELL select SPU_BASE - select MEMORY_HOTPLUG help The SPU file system is used to access Synergistic Processing Units on machines implementing the Broadband Processor -- cgit From 335967276bb6c9ca6a16e5aa0e61ae5e22ddff6f Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 1 Dec 2016 20:50:46 +1100 Subject: powerpc: Make selects of IBM_EMAC_* depend on IBM_EMAC We have a bunch of Kconfig symbols which select various IBM_EMAC_* symbols. These all cause warnings when IBM_EMAC is not selected. eg. warning: (PPC_CELL_NATIVE && BLUESTONE && CANYONLANDS && GLACIER && EIGER && 440EPX && 440GRX && 440GX && 460SX && 405EX) selects IBM_EMAC_RGMII which has unmet direct dependencies (NETDEVICES && ETHERNET && NET_VENDOR_IBM) So make them all depend on IBM_EMAC being enabled first. Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/40x/Kconfig | 12 ++++---- arch/powerpc/platforms/44x/Kconfig | 56 ++++++++++++++++++------------------- arch/powerpc/platforms/cell/Kconfig | 8 +++--- 3 files changed, 38 insertions(+), 38 deletions(-) diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index e3257f24a8a1..f8d1410aa5bb 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -102,18 +102,18 @@ config 405GP bool select IBM405_ERR77 select IBM405_ERR51 - select IBM_EMAC_ZMII + select IBM_EMAC_ZMII if IBM_EMAC config 405EX bool - select IBM_EMAC_EMAC4 - select IBM_EMAC_RGMII + select IBM_EMAC_EMAC4 if IBM_EMAC + select IBM_EMAC_RGMII if IBM_EMAC config 405EZ bool - select IBM_EMAC_NO_FLOW_CTRL - select IBM_EMAC_MAL_CLR_ICINTSTAT - select IBM_EMAC_MAL_COMMON_ERR + select IBM_EMAC_NO_FLOW_CTRL if IBM_EMAC + select IBM_EMAC_MAL_CLR_ICINTSTAT if IBM_EMAC + select IBM_EMAC_MAL_COMMON_ERR if IBM_EMAC config XILINX_VIRTEX bool diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 48fc18041ff6..8d18669856f9 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -26,7 +26,7 @@ config BLUESTONE select PCI_MSI select PPC4xx_MSI select PPC4xx_PCI_EXPRESS - select IBM_EMAC_RGMII + select IBM_EMAC_RGMII if IBM_EMAC help This option enables support for the APM APM821xx Evaluation board. @@ -125,8 +125,8 @@ config CANYONLANDS select PPC4xx_PCI_EXPRESS select PCI_MSI select PPC4xx_MSI - select IBM_EMAC_RGMII - select IBM_EMAC_ZMII + select IBM_EMAC_RGMII if IBM_EMAC + select IBM_EMAC_ZMII if IBM_EMAC help This option enables support for the AMCC PPC460EX evaluation board. @@ -138,8 +138,8 @@ config GLACIER select 460EX # Odd since it uses 460GT but the effects are the same select PCI select PPC4xx_PCI_EXPRESS - select IBM_EMAC_RGMII - select IBM_EMAC_ZMII + select IBM_EMAC_RGMII if IBM_EMAC + select IBM_EMAC_ZMII if IBM_EMAC help This option enables support for the AMCC PPC460GT evaluation board. @@ -164,7 +164,7 @@ config EIGER select 460SX select PCI select PPC4xx_PCI_EXPRESS - select IBM_EMAC_RGMII + select IBM_EMAC_RGMII if IBM_EMAC help This option enables support for the AMCC PPC460SX evaluation board. @@ -213,7 +213,7 @@ config AKEBONO select NETDEVICES select ETHERNET select NET_VENDOR_IBM - select IBM_EMAC_EMAC4 + select IBM_EMAC_EMAC4 if IBM_EMAC select USB if USB_SUPPORT select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD @@ -290,54 +290,54 @@ config 440EP bool select PPC_FPU select IBM440EP_ERR42 - select IBM_EMAC_ZMII + select IBM_EMAC_ZMII if IBM_EMAC config 440EPX bool select PPC_FPU - select IBM_EMAC_EMAC4 - select IBM_EMAC_RGMII - select IBM_EMAC_ZMII + select IBM_EMAC_EMAC4 if IBM_EMAC + select IBM_EMAC_RGMII if IBM_EMAC + select IBM_EMAC_ZMII if IBM_EMAC select USB_EHCI_BIG_ENDIAN_MMIO select USB_EHCI_BIG_ENDIAN_DESC config 440GRX bool - select IBM_EMAC_EMAC4 - select IBM_EMAC_RGMII - select IBM_EMAC_ZMII + select IBM_EMAC_EMAC4 if IBM_EMAC + select IBM_EMAC_RGMII if IBM_EMAC + select IBM_EMAC_ZMII if IBM_EMAC config 440GP bool - select IBM_EMAC_ZMII + select IBM_EMAC_ZMII if IBM_EMAC config 440GX bool - select IBM_EMAC_EMAC4 - select IBM_EMAC_RGMII - select IBM_EMAC_ZMII #test only - select IBM_EMAC_TAH #test only + select IBM_EMAC_EMAC4 if IBM_EMAC + select IBM_EMAC_RGMII if IBM_EMAC + select IBM_EMAC_ZMII if IBM_EMAC #test only + select IBM_EMAC_TAH if IBM_EMAC #test only config 440SP bool config 440SPe bool - select IBM_EMAC_EMAC4 + select IBM_EMAC_EMAC4 if IBM_EMAC config 460EX bool select PPC_FPU - select IBM_EMAC_EMAC4 - select IBM_EMAC_TAH + select IBM_EMAC_EMAC4 if IBM_EMAC + select IBM_EMAC_TAH if IBM_EMAC config 460SX bool select PPC_FPU - select IBM_EMAC_EMAC4 - select IBM_EMAC_RGMII - select IBM_EMAC_ZMII - select IBM_EMAC_TAH + select IBM_EMAC_EMAC4 if IBM_EMAC + select IBM_EMAC_RGMII if IBM_EMAC + select IBM_EMAC_ZMII if IBM_EMAC + select IBM_EMAC_TAH if IBM_EMAC config 476FPE bool @@ -346,8 +346,8 @@ config 476FPE config APM821xx bool select PPC_FPU - select IBM_EMAC_EMAC4 - select IBM_EMAC_TAH + select IBM_EMAC_EMAC4 if IBM_EMAC + select IBM_EMAC_TAH if IBM_EMAC config 476FPE_ERR46 depends on 476FPE diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 441ad3bad3db..a4522f09d65e 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -17,10 +17,10 @@ config PPC_CELL_NATIVE select PPC_CELL_COMMON select MPIC select PPC_IO_WORKAROUNDS - select IBM_EMAC_EMAC4 - select IBM_EMAC_RGMII - select IBM_EMAC_ZMII #test only - select IBM_EMAC_TAH #test only + select IBM_EMAC_EMAC4 if IBM_EMAC + select IBM_EMAC_RGMII if IBM_EMAC + select IBM_EMAC_ZMII if IBM_EMAC #test only + select IBM_EMAC_TAH if IBM_EMAC #test only default n config PPC_IBM_CELL_BLADE -- cgit From 93c2ec0f7019a155769d1ecfc08a7de83b47e03d Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Wed, 30 Nov 2016 17:45:09 +1100 Subject: powerpc Don't print misleading facility name in facility unavailable exception The current facility_strings[] are correct when the trap address is 0xf80 (hypervisor facility unavailable). When the trap address is 0xf60 (facility unavailable) IC (Interruption Cause) a.k.a status in the code is undefined for values 0 and 1. Add a check to prevent printing the (misleading) facility name for IC 0 and 1 when we came in via 0xf60. In all cases, print the actual IC value, to avoid any confusion. This hasn't been seen on real hardware, on only qemu which was misreporting an exception. Signed-off-by: Balbir Singh [mpe: Fix indentation, combine printks(), massage change log] Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/traps.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 5faaed2f2144..4239aaf74886 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1519,7 +1519,8 @@ void facility_unavailable_exception(struct pt_regs *regs) return; } - if ((status < ARRAY_SIZE(facility_strings)) && + if ((hv || status >= 2) && + (status < ARRAY_SIZE(facility_strings)) && facility_strings[status]) facility = facility_strings[status]; @@ -1527,9 +1528,8 @@ void facility_unavailable_exception(struct pt_regs *regs) if (!arch_irq_disabled_regs(regs)) local_irq_enable(); - pr_err_ratelimited( - "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", - hv ? "Hypervisor " : "", facility, regs->nip, regs->msr); + pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n", + hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr); out: if (user_mode(regs)) { -- cgit From f6853eb561fb9bfd56dfe0009f0ea27540937bd5 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 15 Nov 2016 21:59:38 +1100 Subject: powerpc/64: Define ILLEGAL_POINTER_VALUE for 64-bit This is used in poison.h to offset poison values so that they don't point directly into user space. The value we choose sits roughly between user and kernel space, which means on their own the poison values don't point anywhere useful. If an attacker can cause an access at some offset from the poison value then we may still be in trouble, but by putting the poison values between user and kernel space we maximise the required size of that offset. Signed-off-by: Michael Ellerman --- arch/powerpc/Kconfig | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 48923c349ccb..48001e754ff2 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -580,6 +580,13 @@ config ARCH_SPARSEMEM_DEFAULT config SYS_SUPPORTS_HUGETLBFS bool +config ILLEGAL_POINTER_VALUE + hex + # This is roughly half way between the top of user space and the bottom + # of kernel space, which seems about as good as we can get. + default 0x5deadbeef0000000 if PPC64 + default 0 + source "mm/Kconfig" config ARCH_MEMORY_PROBE -- cgit From 88f54a3581eb9deaa3bd1aade40aef266d782385 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 30 Nov 2016 17:51:59 +1100 Subject: powerpc/iommu: Pass mm_struct to init/cleanup helpers We are going to get rid of @current references in mmu_context_boos3s64.c and cache mm_struct in the VFIO container. Since mm_context_t does not have reference counting, we will be using mm_struct which does have the reference counter. This changes mm_iommu_init/mm_iommu_cleanup to receive mm_struct rather than mm_context_t (which is embedded into mm). This should not cause any behavioral change. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/mmu_context.h | 4 ++-- arch/powerpc/kernel/setup-common.c | 2 +- arch/powerpc/mm/mmu_context_book3s64.c | 4 ++-- arch/powerpc/mm/mmu_context_iommu.c | 9 +++++---- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 5c451140660a..424844bc2a57 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -23,8 +23,8 @@ extern bool mm_iommu_preregistered(void); extern long mm_iommu_get(unsigned long ua, unsigned long entries, struct mm_iommu_table_group_mem_t **pmem); extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem); -extern void mm_iommu_init(mm_context_t *ctx); -extern void mm_iommu_cleanup(mm_context_t *ctx); +extern void mm_iommu_init(struct mm_struct *mm); +extern void mm_iommu_cleanup(struct mm_struct *mm); extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, unsigned long size); extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 270ee30abdcf..f516ac508ae3 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -915,7 +915,7 @@ void __init setup_arch(char **cmdline_p) init_mm.context.pte_frag = NULL; #endif #ifdef CONFIG_SPAPR_TCE_IOMMU - mm_iommu_init(&init_mm.context); + mm_iommu_init(&init_mm); #endif irqstack_early_init(); exc_lvl_early_init(); diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index b114f8b93ec9..ad8273590975 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -115,7 +115,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) mm->context.pte_frag = NULL; #endif #ifdef CONFIG_SPAPR_TCE_IOMMU - mm_iommu_init(&mm->context); + mm_iommu_init(mm); #endif return 0; } @@ -160,7 +160,7 @@ static inline void destroy_pagetable_page(struct mm_struct *mm) void destroy_context(struct mm_struct *mm) { #ifdef CONFIG_SPAPR_TCE_IOMMU - mm_iommu_cleanup(&mm->context); + mm_iommu_cleanup(mm); #endif #ifdef CONFIG_PPC_ICSWX diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index e0f1c33601dd..ad2e575fd418 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -373,16 +373,17 @@ void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem) } EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec); -void mm_iommu_init(mm_context_t *ctx) +void mm_iommu_init(struct mm_struct *mm) { - INIT_LIST_HEAD_RCU(&ctx->iommu_group_mem_list); + INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list); } -void mm_iommu_cleanup(mm_context_t *ctx) +void mm_iommu_cleanup(struct mm_struct *mm) { struct mm_iommu_table_group_mem_t *mem, *tmp; - list_for_each_entry_safe(mem, tmp, &ctx->iommu_group_mem_list, next) { + list_for_each_entry_safe(mem, tmp, &mm->context.iommu_group_mem_list, + next) { list_del_rcu(&mem->next); mm_iommu_do_free(mem); } -- cgit From d7baee6901b34c4895eb78efdbf13a49079d7404 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 30 Nov 2016 17:52:00 +1100 Subject: powerpc/iommu: Stop using @current in mm_iommu_xxx This changes mm_iommu_xxx helpers to take mm_struct as a parameter instead of getting it from @current which in some situations may not have a valid reference to mm. This changes helpers to receive @mm and moves all references to @current to the caller, including checks for !current and !current->mm; checks in mm_iommu_preregistered() are removed as there is no caller yet. This moves the mm_iommu_adjust_locked_vm() call to the caller as it receives mm_iommu_table_group_mem_t but it needs mm. This should cause no behavioral change. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Acked-by: Alex Williamson Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/mmu_context.h | 16 ++++++------ arch/powerpc/mm/mmu_context_iommu.c | 46 +++++++++++++--------------------- drivers/vfio/vfio_iommu_spapr_tce.c | 14 ++++++++--- 3 files changed, 36 insertions(+), 40 deletions(-) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 424844bc2a57..b9e3f0aca261 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -19,16 +19,18 @@ extern void destroy_context(struct mm_struct *mm); struct mm_iommu_table_group_mem_t; extern int isolate_lru_page(struct page *page); /* from internal.h */ -extern bool mm_iommu_preregistered(void); -extern long mm_iommu_get(unsigned long ua, unsigned long entries, +extern bool mm_iommu_preregistered(struct mm_struct *mm); +extern long mm_iommu_get(struct mm_struct *mm, + unsigned long ua, unsigned long entries, struct mm_iommu_table_group_mem_t **pmem); -extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem); +extern long mm_iommu_put(struct mm_struct *mm, + struct mm_iommu_table_group_mem_t *mem); extern void mm_iommu_init(struct mm_struct *mm); extern void mm_iommu_cleanup(struct mm_struct *mm); -extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, - unsigned long size); -extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, - unsigned long entries); +extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, + unsigned long ua, unsigned long size); +extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, + unsigned long ua, unsigned long entries); extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, unsigned long ua, unsigned long *hpa); extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index ad2e575fd418..4c6db09e77ad 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -56,7 +56,7 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, } pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n", - current->pid, + current ? current->pid : 0, incr ? '+' : '-', npages << PAGE_SHIFT, mm->locked_vm << PAGE_SHIFT, @@ -66,12 +66,9 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, return ret; } -bool mm_iommu_preregistered(void) +bool mm_iommu_preregistered(struct mm_struct *mm) { - if (!current || !current->mm) - return false; - - return !list_empty(¤t->mm->context.iommu_group_mem_list); + return !list_empty(&mm->context.iommu_group_mem_list); } EXPORT_SYMBOL_GPL(mm_iommu_preregistered); @@ -124,19 +121,16 @@ static int mm_iommu_move_page_from_cma(struct page *page) return 0; } -long mm_iommu_get(unsigned long ua, unsigned long entries, +long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, struct mm_iommu_table_group_mem_t **pmem) { struct mm_iommu_table_group_mem_t *mem; long i, j, ret = 0, locked_entries = 0; struct page *page = NULL; - if (!current || !current->mm) - return -ESRCH; /* process exited */ - mutex_lock(&mem_list_mutex); - list_for_each_entry_rcu(mem, ¤t->mm->context.iommu_group_mem_list, + list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { if ((mem->ua == ua) && (mem->entries == entries)) { ++mem->used; @@ -154,7 +148,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries, } - ret = mm_iommu_adjust_locked_vm(current->mm, entries, true); + ret = mm_iommu_adjust_locked_vm(mm, entries, true); if (ret) goto unlock_exit; @@ -215,11 +209,11 @@ populate: mem->entries = entries; *pmem = mem; - list_add_rcu(&mem->next, ¤t->mm->context.iommu_group_mem_list); + list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); unlock_exit: if (locked_entries && ret) - mm_iommu_adjust_locked_vm(current->mm, locked_entries, false); + mm_iommu_adjust_locked_vm(mm, locked_entries, false); mutex_unlock(&mem_list_mutex); @@ -264,17 +258,13 @@ static void mm_iommu_free(struct rcu_head *head) static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem) { list_del_rcu(&mem->next); - mm_iommu_adjust_locked_vm(current->mm, mem->entries, false); call_rcu(&mem->rcu, mm_iommu_free); } -long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) +long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) { long ret = 0; - if (!current || !current->mm) - return -ESRCH; /* process exited */ - mutex_lock(&mem_list_mutex); if (mem->used == 0) { @@ -297,6 +287,8 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) /* @mapped became 0 so now mappings are disabled, release the region */ mm_iommu_release(mem); + mm_iommu_adjust_locked_vm(mm, mem->entries, false); + unlock_exit: mutex_unlock(&mem_list_mutex); @@ -304,14 +296,12 @@ unlock_exit: } EXPORT_SYMBOL_GPL(mm_iommu_put); -struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, - unsigned long size) +struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, + unsigned long ua, unsigned long size) { struct mm_iommu_table_group_mem_t *mem, *ret = NULL; - list_for_each_entry_rcu(mem, - ¤t->mm->context.iommu_group_mem_list, - next) { + list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { if ((mem->ua <= ua) && (ua + size <= mem->ua + (mem->entries << PAGE_SHIFT))) { @@ -324,14 +314,12 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, } EXPORT_SYMBOL_GPL(mm_iommu_lookup); -struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, - unsigned long entries) +struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, + unsigned long ua, unsigned long entries) { struct mm_iommu_table_group_mem_t *mem, *ret = NULL; - list_for_each_entry_rcu(mem, - ¤t->mm->context.iommu_group_mem_list, - next) { + list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { if ((mem->ua == ua) && (mem->entries == entries)) { ret = mem; break; diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 80378ddadc5c..d0c38b201267 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -107,14 +107,17 @@ static long tce_iommu_unregister_pages(struct tce_container *container, { struct mm_iommu_table_group_mem_t *mem; + if (!current || !current->mm) + return -ESRCH; /* process exited */ + if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK)) return -EINVAL; - mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT); + mem = mm_iommu_find(current->mm, vaddr, size >> PAGE_SHIFT); if (!mem) return -ENOENT; - return mm_iommu_put(mem); + return mm_iommu_put(current->mm, mem); } static long tce_iommu_register_pages(struct tce_container *container, @@ -124,11 +127,14 @@ static long tce_iommu_register_pages(struct tce_container *container, struct mm_iommu_table_group_mem_t *mem = NULL; unsigned long entries = size >> PAGE_SHIFT; + if (!current || !current->mm) + return -ESRCH; /* process exited */ + if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) || ((vaddr + size) < vaddr)) return -EINVAL; - ret = mm_iommu_get(vaddr, entries, &mem); + ret = mm_iommu_get(current->mm, vaddr, entries, &mem); if (ret) return ret; @@ -375,7 +381,7 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size, long ret = 0; struct mm_iommu_table_group_mem_t *mem; - mem = mm_iommu_lookup(tce, size); + mem = mm_iommu_lookup(current->mm, tce, size); if (!mem) return -EINVAL; -- cgit From 39701e56f5f16ea0cf8fc9e8472e645f8de91d23 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 30 Nov 2016 17:52:01 +1100 Subject: vfio/spapr: Postpone allocation of userspace version of TCE table The iommu_table struct manages a hardware TCE table and a vmalloc'd table with corresponding userspace addresses. Both are allocated when the default DMA window is created and this happens when the very first group is attached to a container. As we are going to allow the userspace to configure container in one memory context and pas container fd to another, we have to postpones such allocations till a container fd is passed to the destination user process so we would account locked memory limit against the actual container user constrainsts. This postpones the it_userspace array allocation till it is used first time for mapping. The unmapping patch already checks if the array is allocated. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Acked-by: Alex Williamson Signed-off-by: Michael Ellerman --- drivers/vfio/vfio_iommu_spapr_tce.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index d0c38b201267..4efd2b20c35c 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -515,6 +515,12 @@ static long tce_iommu_build_v2(struct tce_container *container, unsigned long hpa; enum dma_data_direction dirtmp; + if (!tbl->it_userspace) { + ret = tce_iommu_userspace_view_alloc(tbl); + if (ret) + return ret; + } + for (i = 0; i < pages; ++i) { struct mm_iommu_table_group_mem_t *mem = NULL; unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, @@ -588,15 +594,6 @@ static long tce_iommu_create_table(struct tce_container *container, WARN_ON(!ret && !(*ptbl)->it_ops->free); WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size)); - if (!ret && container->v2) { - ret = tce_iommu_userspace_view_alloc(*ptbl); - if (ret) - (*ptbl)->it_ops->free(*ptbl); - } - - if (ret) - decrement_locked_vm(table_size >> PAGE_SHIFT); - return ret; } @@ -1068,10 +1065,7 @@ static int tce_iommu_take_ownership(struct tce_container *container, if (!tbl || !tbl->it_map) continue; - rc = tce_iommu_userspace_view_alloc(tbl); - if (!rc) - rc = iommu_take_ownership(tbl); - + rc = iommu_take_ownership(tbl); if (rc) { for (j = 0; j < i; ++j) iommu_release_ownership( -- cgit From 6f01cc692a16405235d5c34056455b182682123c Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 30 Nov 2016 17:52:02 +1100 Subject: vfio/spapr: Add a helper to create default DMA window There is already a helper to create a DMA window which does allocate a table and programs it to the IOMMU group. However tce_iommu_take_ownership_ddw() did not use it and did these 2 calls itself to simplify error path. Since we are going to delay the default window creation till the default window is accessed/removed or new window is added, we need a helper to create a default window from all these cases. This adds tce_iommu_create_default_window(). Since it relies on a VFIO container to have at least one IOMMU group (for future use), this changes tce_iommu_attach_group() to add a group to the container first and then call the new helper. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Acked-by: Alex Williamson Signed-off-by: Michael Ellerman --- drivers/vfio/vfio_iommu_spapr_tce.c | 87 ++++++++++++++++++------------------- 1 file changed, 42 insertions(+), 45 deletions(-) diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 4efd2b20c35c..a67bbfdd86d5 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -710,6 +710,29 @@ static long tce_iommu_remove_window(struct tce_container *container, return 0; } +static long tce_iommu_create_default_window(struct tce_container *container) +{ + long ret; + __u64 start_addr = 0; + struct tce_iommu_group *tcegrp; + struct iommu_table_group *table_group; + + if (!tce_groups_attached(container)) + return -ENODEV; + + tcegrp = list_first_entry(&container->group_list, + struct tce_iommu_group, next); + table_group = iommu_group_get_iommudata(tcegrp->grp); + if (!table_group) + return -ENODEV; + + ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K, + table_group->tce32_size, 1, &start_addr); + WARN_ON_ONCE(!ret && start_addr); + + return ret; +} + static long tce_iommu_ioctl(void *iommu_data, unsigned int cmd, unsigned long arg) { @@ -1100,9 +1123,6 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container, static long tce_iommu_take_ownership_ddw(struct tce_container *container, struct iommu_table_group *table_group) { - long i, ret = 0; - struct iommu_table *tbl = NULL; - if (!table_group->ops->create_table || !table_group->ops->set_window || !table_group->ops->release_ownership) { WARN_ON_ONCE(1); @@ -1111,47 +1131,7 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container, table_group->ops->take_ownership(table_group); - /* - * If it the first group attached, check if there is - * a default DMA window and create one if none as - * the userspace expects it to exist. - */ - if (!tce_groups_attached(container) && !container->tables[0]) { - ret = tce_iommu_create_table(container, - table_group, - 0, /* window number */ - IOMMU_PAGE_SHIFT_4K, - table_group->tce32_size, - 1, /* default levels */ - &tbl); - if (ret) - goto release_exit; - else - container->tables[0] = tbl; - } - - /* Set all windows to the new group */ - for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { - tbl = container->tables[i]; - - if (!tbl) - continue; - - /* Set the default window to a new group */ - ret = table_group->ops->set_window(table_group, i, tbl); - if (ret) - goto release_exit; - } - return 0; - -release_exit: - for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) - table_group->ops->unset_window(table_group, i); - - table_group->ops->release_ownership(table_group); - - return ret; } static int tce_iommu_attach_group(void *iommu_data, @@ -1161,6 +1141,7 @@ static int tce_iommu_attach_group(void *iommu_data, struct tce_container *container = iommu_data; struct iommu_table_group *table_group; struct tce_iommu_group *tcegrp = NULL; + bool create_default_window = false; mutex_lock(&container->lock); @@ -1203,14 +1184,30 @@ static int tce_iommu_attach_group(void *iommu_data, } if (!table_group->ops || !table_group->ops->take_ownership || - !table_group->ops->release_ownership) + !table_group->ops->release_ownership) { ret = tce_iommu_take_ownership(container, table_group); - else + } else { ret = tce_iommu_take_ownership_ddw(container, table_group); + if (!tce_groups_attached(container) && !container->tables[0]) + create_default_window = true; + } if (!ret) { tcegrp->grp = iommu_group; list_add(&tcegrp->next, &container->group_list); + /* + * If it the first group attached, check if there is + * a default DMA window and create one if none as + * the userspace expects it to exist. + */ + if (create_default_window) { + ret = tce_iommu_create_default_window(container); + if (ret) { + list_del(&tcegrp->next); + tce_iommu_release_ownership_ddw(container, + table_group); + } + } } unlock_exit: -- cgit From d9c728949ddc9de5734bf3b12ea906ca8a77f2a0 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 30 Nov 2016 17:52:03 +1100 Subject: vfio/spapr: Postpone default window creation We are going to allow the userspace to configure container in one memory context and pass container fd to another so we are postponing memory allocations accounted against the locked memory limit. One of previous patches took care of it_userspace. At the moment we create the default DMA window when the first group is attached to a container; this is done for the userspace which is not DDW-aware but familiar with the SPAPR TCE IOMMU v2 in the part of memory pre-registration - such client expects the default DMA window to exist. This postpones the default DMA window allocation till one of the folliwing happens: 1. first map/unmap request arrives; 2. new window is requested; This adds noop for the case when the userspace requested removal of the default window which has not been created yet. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Acked-by: Alex Williamson Signed-off-by: Michael Ellerman --- drivers/vfio/vfio_iommu_spapr_tce.c | 40 +++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index a67bbfdd86d5..88622be0d6b5 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -97,6 +97,7 @@ struct tce_container { struct mutex lock; bool enabled; bool v2; + bool def_window_pending; unsigned long locked_pages; struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; struct list_head group_list; @@ -717,6 +718,9 @@ static long tce_iommu_create_default_window(struct tce_container *container) struct tce_iommu_group *tcegrp; struct iommu_table_group *table_group; + if (!container->def_window_pending) + return 0; + if (!tce_groups_attached(container)) return -ENODEV; @@ -730,6 +734,9 @@ static long tce_iommu_create_default_window(struct tce_container *container) table_group->tce32_size, 1, &start_addr); WARN_ON_ONCE(!ret && start_addr); + if (!ret) + container->def_window_pending = false; + return ret; } @@ -823,6 +830,10 @@ static long tce_iommu_ioctl(void *iommu_data, VFIO_DMA_MAP_FLAG_WRITE)) return -EINVAL; + ret = tce_iommu_create_default_window(container); + if (ret) + return ret; + num = tce_iommu_find_table(container, param.iova, &tbl); if (num < 0) return -ENXIO; @@ -886,6 +897,10 @@ static long tce_iommu_ioctl(void *iommu_data, if (param.flags) return -EINVAL; + ret = tce_iommu_create_default_window(container); + if (ret) + return ret; + num = tce_iommu_find_table(container, param.iova, &tbl); if (num < 0) return -ENXIO; @@ -1012,6 +1027,10 @@ static long tce_iommu_ioctl(void *iommu_data, mutex_lock(&container->lock); + ret = tce_iommu_create_default_window(container); + if (ret) + return ret; + ret = tce_iommu_create_window(container, create.page_shift, create.window_size, create.levels, &create.start_addr); @@ -1044,6 +1063,11 @@ static long tce_iommu_ioctl(void *iommu_data, if (remove.flags) return -EINVAL; + if (container->def_window_pending && !remove.start_addr) { + container->def_window_pending = false; + return 0; + } + mutex_lock(&container->lock); ret = tce_iommu_remove_window(container, remove.start_addr); @@ -1141,7 +1165,6 @@ static int tce_iommu_attach_group(void *iommu_data, struct tce_container *container = iommu_data; struct iommu_table_group *table_group; struct tce_iommu_group *tcegrp = NULL; - bool create_default_window = false; mutex_lock(&container->lock); @@ -1189,25 +1212,12 @@ static int tce_iommu_attach_group(void *iommu_data, } else { ret = tce_iommu_take_ownership_ddw(container, table_group); if (!tce_groups_attached(container) && !container->tables[0]) - create_default_window = true; + container->def_window_pending = true; } if (!ret) { tcegrp->grp = iommu_group; list_add(&tcegrp->next, &container->group_list); - /* - * If it the first group attached, check if there is - * a default DMA window and create one if none as - * the userspace expects it to exist. - */ - if (create_default_window) { - ret = tce_iommu_create_default_window(container); - if (ret) { - list_del(&tcegrp->next); - tce_iommu_release_ownership_ddw(container, - table_group); - } - } } unlock_exit: -- cgit From bc82d122ae4a0e9f971f13403995898fcfa0c09e Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 30 Nov 2016 17:52:04 +1100 Subject: vfio/spapr: Reference mm in tce_container In some situations the userspace memory context may live longer than the userspace process itself so if we need to do proper memory context cleanup, we better have tce_container take a reference to mm_struct and use it later when the process is gone (@current or @current->mm is NULL). This references mm and stores the pointer in the container; this is done in a new helper - tce_iommu_mm_set() - when one of the following happens: - a container is enabled (IOMMU v1); - a first attempt to pre-register memory is made (IOMMU v2); - a DMA window is created (IOMMU v2). The @mm stays referenced till the container is destroyed. This replaces current->mm with container->mm everywhere except debug prints. This adds a check that current->mm is the same as the one stored in the container to prevent userspace from making changes to a memory context of other processes. DMA map/unmap ioctls() do not check for @mm as they already check for @enabled which is set after tce_iommu_mm_set() is called. This does not reference a task as multiple threads within the same mm are allowed to ioctl() to vfio and supposedly they will have same limits and capabilities and if they do not, we'll just fail with no harm made. Signed-off-by: Alexey Kardashevskiy Acked-by: Alex Williamson Reviewed-by: David Gibson Signed-off-by: Michael Ellerman --- drivers/vfio/vfio_iommu_spapr_tce.c | 160 ++++++++++++++++++++++-------------- 1 file changed, 100 insertions(+), 60 deletions(-) diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 88622be0d6b5..4c03c8525c26 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -31,49 +31,49 @@ static void tce_iommu_detach_group(void *iommu_data, struct iommu_group *iommu_group); -static long try_increment_locked_vm(long npages) +static long try_increment_locked_vm(struct mm_struct *mm, long npages) { long ret = 0, locked, lock_limit; - if (!current || !current->mm) - return -ESRCH; /* process exited */ + if (WARN_ON_ONCE(!mm)) + return -EPERM; if (!npages) return 0; - down_write(¤t->mm->mmap_sem); - locked = current->mm->locked_vm + npages; + down_write(&mm->mmap_sem); + locked = mm->locked_vm + npages; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) ret = -ENOMEM; else - current->mm->locked_vm += npages; + mm->locked_vm += npages; pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid, npages << PAGE_SHIFT, - current->mm->locked_vm << PAGE_SHIFT, + mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK), ret ? " - exceeded" : ""); - up_write(¤t->mm->mmap_sem); + up_write(&mm->mmap_sem); return ret; } -static void decrement_locked_vm(long npages) +static void decrement_locked_vm(struct mm_struct *mm, long npages) { - if (!current || !current->mm || !npages) - return; /* process exited */ + if (!mm || !npages) + return; - down_write(¤t->mm->mmap_sem); - if (WARN_ON_ONCE(npages > current->mm->locked_vm)) - npages = current->mm->locked_vm; - current->mm->locked_vm -= npages; + down_write(&mm->mmap_sem); + if (WARN_ON_ONCE(npages > mm->locked_vm)) + npages = mm->locked_vm; + mm->locked_vm -= npages; pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid, npages << PAGE_SHIFT, - current->mm->locked_vm << PAGE_SHIFT, + mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK)); - up_write(¤t->mm->mmap_sem); + up_write(&mm->mmap_sem); } /* @@ -99,26 +99,38 @@ struct tce_container { bool v2; bool def_window_pending; unsigned long locked_pages; + struct mm_struct *mm; struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; struct list_head group_list; }; +static long tce_iommu_mm_set(struct tce_container *container) +{ + if (container->mm) { + if (container->mm == current->mm) + return 0; + return -EPERM; + } + BUG_ON(!current->mm); + container->mm = current->mm; + atomic_inc(&container->mm->mm_count); + + return 0; +} + static long tce_iommu_unregister_pages(struct tce_container *container, __u64 vaddr, __u64 size) { struct mm_iommu_table_group_mem_t *mem; - if (!current || !current->mm) - return -ESRCH; /* process exited */ - if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK)) return -EINVAL; - mem = mm_iommu_find(current->mm, vaddr, size >> PAGE_SHIFT); + mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT); if (!mem) return -ENOENT; - return mm_iommu_put(current->mm, mem); + return mm_iommu_put(container->mm, mem); } static long tce_iommu_register_pages(struct tce_container *container, @@ -128,14 +140,11 @@ static long tce_iommu_register_pages(struct tce_container *container, struct mm_iommu_table_group_mem_t *mem = NULL; unsigned long entries = size >> PAGE_SHIFT; - if (!current || !current->mm) - return -ESRCH; /* process exited */ - if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) || ((vaddr + size) < vaddr)) return -EINVAL; - ret = mm_iommu_get(current->mm, vaddr, entries, &mem); + ret = mm_iommu_get(container->mm, vaddr, entries, &mem); if (ret) return ret; @@ -144,7 +153,8 @@ static long tce_iommu_register_pages(struct tce_container *container, return 0; } -static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) +static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl, + struct mm_struct *mm) { unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * tbl->it_size, PAGE_SIZE); @@ -153,13 +163,13 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) BUG_ON(tbl->it_userspace); - ret = try_increment_locked_vm(cb >> PAGE_SHIFT); + ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT); if (ret) return ret; uas = vzalloc(cb); if (!uas) { - decrement_locked_vm(cb >> PAGE_SHIFT); + decrement_locked_vm(mm, cb >> PAGE_SHIFT); return -ENOMEM; } tbl->it_userspace = uas; @@ -167,7 +177,8 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) return 0; } -static void tce_iommu_userspace_view_free(struct iommu_table *tbl) +static void tce_iommu_userspace_view_free(struct iommu_table *tbl, + struct mm_struct *mm) { unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * tbl->it_size, PAGE_SIZE); @@ -177,7 +188,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl) vfree(tbl->it_userspace); tbl->it_userspace = NULL; - decrement_locked_vm(cb >> PAGE_SHIFT); + decrement_locked_vm(mm, cb >> PAGE_SHIFT); } static bool tce_page_is_contained(struct page *page, unsigned page_shift) @@ -237,9 +248,6 @@ static int tce_iommu_enable(struct tce_container *container) struct iommu_table_group *table_group; struct tce_iommu_group *tcegrp; - if (!current->mm) - return -ESRCH; /* process exited */ - if (container->enabled) return -EBUSY; @@ -284,8 +292,12 @@ static int tce_iommu_enable(struct tce_container *container) if (!table_group->tce32_size) return -EPERM; + ret = tce_iommu_mm_set(container); + if (ret) + return ret; + locked = table_group->tce32_size >> PAGE_SHIFT; - ret = try_increment_locked_vm(locked); + ret = try_increment_locked_vm(container->mm, locked); if (ret) return ret; @@ -303,10 +315,8 @@ static void tce_iommu_disable(struct tce_container *container) container->enabled = false; - if (!current->mm) - return; - - decrement_locked_vm(container->locked_pages); + BUG_ON(!container->mm); + decrement_locked_vm(container->mm, container->locked_pages); } static void *tce_iommu_open(unsigned long arg) @@ -333,7 +343,8 @@ static void *tce_iommu_open(unsigned long arg) static int tce_iommu_clear(struct tce_container *container, struct iommu_table *tbl, unsigned long entry, unsigned long pages); -static void tce_iommu_free_table(struct iommu_table *tbl); +static void tce_iommu_free_table(struct tce_container *container, + struct iommu_table *tbl); static void tce_iommu_release(void *iommu_data) { @@ -358,10 +369,12 @@ static void tce_iommu_release(void *iommu_data) continue; tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); - tce_iommu_free_table(tbl); + tce_iommu_free_table(container, tbl); } tce_iommu_disable(container); + if (container->mm) + mmdrop(container->mm); mutex_destroy(&container->lock); kfree(container); @@ -376,13 +389,14 @@ static void tce_iommu_unuse_page(struct tce_container *container, put_page(page); } -static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size, +static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, + unsigned long tce, unsigned long size, unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) { long ret = 0; struct mm_iommu_table_group_mem_t *mem; - mem = mm_iommu_lookup(current->mm, tce, size); + mem = mm_iommu_lookup(container->mm, tce, size); if (!mem) return -EINVAL; @@ -395,18 +409,18 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size, return 0; } -static void tce_iommu_unuse_page_v2(struct iommu_table *tbl, - unsigned long entry) +static void tce_iommu_unuse_page_v2(struct tce_container *container, + struct iommu_table *tbl, unsigned long entry) { struct mm_iommu_table_group_mem_t *mem = NULL; int ret; unsigned long hpa = 0; unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); - if (!pua || !current || !current->mm) + if (!pua) return; - ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl), + ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl), &hpa, &mem); if (ret) pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", @@ -436,7 +450,7 @@ static int tce_iommu_clear(struct tce_container *container, continue; if (container->v2) { - tce_iommu_unuse_page_v2(tbl, entry); + tce_iommu_unuse_page_v2(container, tbl, entry); continue; } @@ -517,7 +531,7 @@ static long tce_iommu_build_v2(struct tce_container *container, enum dma_data_direction dirtmp; if (!tbl->it_userspace) { - ret = tce_iommu_userspace_view_alloc(tbl); + ret = tce_iommu_userspace_view_alloc(tbl, container->mm); if (ret) return ret; } @@ -527,8 +541,8 @@ static long tce_iommu_build_v2(struct tce_container *container, unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i); - ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl), - &hpa, &mem); + ret = tce_iommu_prereg_ua_to_hpa(container, + tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem); if (ret) break; @@ -549,7 +563,7 @@ static long tce_iommu_build_v2(struct tce_container *container, ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp); if (ret) { /* dirtmp cannot be DMA_NONE here */ - tce_iommu_unuse_page_v2(tbl, entry + i); + tce_iommu_unuse_page_v2(container, tbl, entry + i); pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", __func__, entry << tbl->it_page_shift, tce, ret); @@ -557,7 +571,7 @@ static long tce_iommu_build_v2(struct tce_container *container, } if (dirtmp != DMA_NONE) - tce_iommu_unuse_page_v2(tbl, entry + i); + tce_iommu_unuse_page_v2(container, tbl, entry + i); *pua = tce; @@ -585,7 +599,7 @@ static long tce_iommu_create_table(struct tce_container *container, if (!table_size) return -EINVAL; - ret = try_increment_locked_vm(table_size >> PAGE_SHIFT); + ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT); if (ret) return ret; @@ -598,13 +612,14 @@ static long tce_iommu_create_table(struct tce_container *container, return ret; } -static void tce_iommu_free_table(struct iommu_table *tbl) +static void tce_iommu_free_table(struct tce_container *container, + struct iommu_table *tbl) { unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; - tce_iommu_userspace_view_free(tbl); + tce_iommu_userspace_view_free(tbl, container->mm); tbl->it_ops->free(tbl); - decrement_locked_vm(pages); + decrement_locked_vm(container->mm, pages); } static long tce_iommu_create_window(struct tce_container *container, @@ -667,7 +682,7 @@ unset_exit: table_group = iommu_group_get_iommudata(tcegrp->grp); table_group->ops->unset_window(table_group, num); } - tce_iommu_free_table(tbl); + tce_iommu_free_table(container, tbl); return ret; } @@ -705,7 +720,7 @@ static long tce_iommu_remove_window(struct tce_container *container, /* Free table */ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); - tce_iommu_free_table(tbl); + tce_iommu_free_table(container, tbl); container->tables[num] = NULL; return 0; @@ -760,7 +775,17 @@ static long tce_iommu_ioctl(void *iommu_data, } return (ret < 0) ? 0 : ret; + } + + /* + * Sanity check to prevent one userspace from manipulating + * another userspace mm. + */ + BUG_ON(!container); + if (container->mm && container->mm != current->mm) + return -EPERM; + switch (cmd) { case VFIO_IOMMU_SPAPR_TCE_GET_INFO: { struct vfio_iommu_spapr_tce_info info; struct tce_iommu_group *tcegrp; @@ -929,6 +954,10 @@ static long tce_iommu_ioctl(void *iommu_data, minsz = offsetofend(struct vfio_iommu_spapr_register_memory, size); + ret = tce_iommu_mm_set(container); + if (ret) + return ret; + if (copy_from_user(¶m, (void __user *)arg, minsz)) return -EFAULT; @@ -952,6 +981,9 @@ static long tce_iommu_ioctl(void *iommu_data, if (!container->v2) break; + if (!container->mm) + return -EPERM; + minsz = offsetofend(struct vfio_iommu_spapr_register_memory, size); @@ -1010,6 +1042,10 @@ static long tce_iommu_ioctl(void *iommu_data, if (!container->v2) break; + ret = tce_iommu_mm_set(container); + if (ret) + return ret; + if (!tce_groups_attached(container)) return -ENXIO; @@ -1048,6 +1084,10 @@ static long tce_iommu_ioctl(void *iommu_data, if (!container->v2) break; + ret = tce_iommu_mm_set(container); + if (ret) + return ret; + if (!tce_groups_attached(container)) return -ENXIO; @@ -1093,7 +1133,7 @@ static void tce_iommu_release_ownership(struct tce_container *container, continue; tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); - tce_iommu_userspace_view_free(tbl); + tce_iommu_userspace_view_free(tbl, container->mm); if (tbl->it_map) iommu_release_ownership(tbl); -- cgit From 4b6fad7097f883335b6d9627c883cb7f276d94c9 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 30 Nov 2016 17:52:05 +1100 Subject: powerpc/mm/iommu, vfio/spapr: Put pages on VFIO container shutdown At the moment the userspace tool is expected to request pinning of the entire guest RAM when VFIO IOMMU SPAPR v2 driver is present. When the userspace process finishes, all the pinned pages need to be put; this is done as a part of the userspace memory context (MM) destruction which happens on the very last mmdrop(). This approach has a problem that a MM of the userspace process may live longer than the userspace process itself as kernel threads use userspace process MMs which was runnning on a CPU where the kernel thread was scheduled to. If this happened, the MM remains referenced until this exact kernel thread wakes up again and releases the very last reference to the MM, on an idle system this can take even hours. This moves preregistered regions tracking from MM to VFIO; insteads of using mm_iommu_table_group_mem_t::used, tce_container::prereg_list is added so each container releases regions which it has pre-registered. This changes the userspace interface to return EBUSY if a memory region is already registered in a container. However it should not have any practical effect as the only userspace tool available now does register memory region once per container anyway. As tce_iommu_register_pages/tce_iommu_unregister_pages are called under container->lock, this does not need additional locking. Signed-off-by: Alexey Kardashevskiy Reviewed-by: Nicholas Piggin Acked-by: Alex Williamson Reviewed-by: David Gibson Signed-off-by: Michael Ellerman --- arch/powerpc/mm/mmu_context_book3s64.c | 4 +-- arch/powerpc/mm/mmu_context_iommu.c | 11 ------ drivers/vfio/vfio_iommu_spapr_tce.c | 61 +++++++++++++++++++++++++++++++++- 3 files changed, 61 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index ad8273590975..73bf6e14c3aa 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -156,13 +156,11 @@ static inline void destroy_pagetable_page(struct mm_struct *mm) } #endif - void destroy_context(struct mm_struct *mm) { #ifdef CONFIG_SPAPR_TCE_IOMMU - mm_iommu_cleanup(mm); + WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list)); #endif - #ifdef CONFIG_PPC_ICSWX drop_cop(mm->context.acop, mm); kfree(mm->context.cop_lockp); diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index 4c6db09e77ad..104bad029ce9 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -365,14 +365,3 @@ void mm_iommu_init(struct mm_struct *mm) { INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list); } - -void mm_iommu_cleanup(struct mm_struct *mm) -{ - struct mm_iommu_table_group_mem_t *mem, *tmp; - - list_for_each_entry_safe(mem, tmp, &mm->context.iommu_group_mem_list, - next) { - list_del_rcu(&mem->next); - mm_iommu_do_free(mem); - } -} diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 4c03c8525c26..c8823578a1b2 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -88,6 +88,15 @@ struct tce_iommu_group { struct iommu_group *grp; }; +/* + * A container needs to remember which preregistered region it has + * referenced to do proper cleanup at the userspace process exit. + */ +struct tce_iommu_prereg { + struct list_head next; + struct mm_iommu_table_group_mem_t *mem; +}; + /* * The container descriptor supports only a single group per container. * Required by the API as the container is not supplied with the IOMMU group @@ -102,6 +111,7 @@ struct tce_container { struct mm_struct *mm; struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; struct list_head group_list; + struct list_head prereg_list; }; static long tce_iommu_mm_set(struct tce_container *container) @@ -118,10 +128,27 @@ static long tce_iommu_mm_set(struct tce_container *container) return 0; } +static long tce_iommu_prereg_free(struct tce_container *container, + struct tce_iommu_prereg *tcemem) +{ + long ret; + + ret = mm_iommu_put(container->mm, tcemem->mem); + if (ret) + return ret; + + list_del(&tcemem->next); + kfree(tcemem); + + return 0; +} + static long tce_iommu_unregister_pages(struct tce_container *container, __u64 vaddr, __u64 size) { struct mm_iommu_table_group_mem_t *mem; + struct tce_iommu_prereg *tcemem; + bool found = false; if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK)) return -EINVAL; @@ -130,7 +157,17 @@ static long tce_iommu_unregister_pages(struct tce_container *container, if (!mem) return -ENOENT; - return mm_iommu_put(container->mm, mem); + list_for_each_entry(tcemem, &container->prereg_list, next) { + if (tcemem->mem == mem) { + found = true; + break; + } + } + + if (!found) + return -ENOENT; + + return tce_iommu_prereg_free(container, tcemem); } static long tce_iommu_register_pages(struct tce_container *container, @@ -138,16 +175,29 @@ static long tce_iommu_register_pages(struct tce_container *container, { long ret = 0; struct mm_iommu_table_group_mem_t *mem = NULL; + struct tce_iommu_prereg *tcemem; unsigned long entries = size >> PAGE_SHIFT; if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) || ((vaddr + size) < vaddr)) return -EINVAL; + mem = mm_iommu_find(container->mm, vaddr, entries); + if (mem) { + list_for_each_entry(tcemem, &container->prereg_list, next) { + if (tcemem->mem == mem) + return -EBUSY; + } + } + ret = mm_iommu_get(container->mm, vaddr, entries, &mem); if (ret) return ret; + tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL); + tcemem->mem = mem; + list_add(&tcemem->next, &container->prereg_list); + container->enabled = true; return 0; @@ -334,6 +384,7 @@ static void *tce_iommu_open(unsigned long arg) mutex_init(&container->lock); INIT_LIST_HEAD_RCU(&container->group_list); + INIT_LIST_HEAD_RCU(&container->prereg_list); container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU; @@ -372,6 +423,14 @@ static void tce_iommu_release(void *iommu_data) tce_iommu_free_table(container, tbl); } + while (!list_empty(&container->prereg_list)) { + struct tce_iommu_prereg *tcemem; + + tcemem = list_first_entry(&container->prereg_list, + struct tce_iommu_prereg, next); + WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem)); + } + tce_iommu_disable(container); if (container->mm) mmdrop(container->mm); -- cgit From 60b00025641e2921dcfba4d54b6cf7f0c5903677 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Fri, 2 Dec 2016 06:04:59 +0530 Subject: powerpc/perf: factor out the event format field Factor out the format field structure for PowerISA v2.07. Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/perf/isa207-common.c | 34 ++++++++++++++++++++++++++++++++++ arch/powerpc/perf/power8-pmu.c | 39 ++++----------------------------------- arch/powerpc/perf/power9-pmu.c | 39 ++++----------------------------------- 3 files changed, 42 insertions(+), 70 deletions(-) diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 6143c99f3ec5..2a2040ea5f99 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -12,6 +12,40 @@ */ #include "isa207-common.h" +PMU_FORMAT_ATTR(event, "config:0-49"); +PMU_FORMAT_ATTR(pmcxsel, "config:0-7"); +PMU_FORMAT_ATTR(mark, "config:8"); +PMU_FORMAT_ATTR(combine, "config:11"); +PMU_FORMAT_ATTR(unit, "config:12-15"); +PMU_FORMAT_ATTR(pmc, "config:16-19"); +PMU_FORMAT_ATTR(cache_sel, "config:20-23"); +PMU_FORMAT_ATTR(sample_mode, "config:24-28"); +PMU_FORMAT_ATTR(thresh_sel, "config:29-31"); +PMU_FORMAT_ATTR(thresh_stop, "config:32-35"); +PMU_FORMAT_ATTR(thresh_start, "config:36-39"); +PMU_FORMAT_ATTR(thresh_cmp, "config:40-49"); + +struct attribute *isa207_pmu_format_attr[] = { + &format_attr_event.attr, + &format_attr_pmcxsel.attr, + &format_attr_mark.attr, + &format_attr_combine.attr, + &format_attr_unit.attr, + &format_attr_pmc.attr, + &format_attr_cache_sel.attr, + &format_attr_sample_mode.attr, + &format_attr_thresh_sel.attr, + &format_attr_thresh_stop.attr, + &format_attr_thresh_start.attr, + &format_attr_thresh_cmp.attr, + NULL, +}; + +struct attribute_group isa207_pmu_format_group = { + .name = "format", + .attrs = isa207_pmu_format_attr, +}; + static inline bool event_is_fab_match(u64 event) { /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */ diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index ab830d106ec5..d07186382f3a 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -30,6 +30,9 @@ enum { #define POWER8_MMCRA_IFM2 0x0000000080000000UL #define POWER8_MMCRA_IFM3 0x00000000C0000000UL +/* PowerISA v2.07 format attribute structure*/ +extern struct attribute_group isa207_pmu_format_group; + /* Table of alternatives, sorted by column 0 */ static const unsigned int event_alternatives[][MAX_ALT] = { { PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT }, @@ -175,42 +178,8 @@ static struct attribute_group power8_pmu_events_group = { .attrs = power8_events_attr, }; -PMU_FORMAT_ATTR(event, "config:0-49"); -PMU_FORMAT_ATTR(pmcxsel, "config:0-7"); -PMU_FORMAT_ATTR(mark, "config:8"); -PMU_FORMAT_ATTR(combine, "config:11"); -PMU_FORMAT_ATTR(unit, "config:12-15"); -PMU_FORMAT_ATTR(pmc, "config:16-19"); -PMU_FORMAT_ATTR(cache_sel, "config:20-23"); -PMU_FORMAT_ATTR(sample_mode, "config:24-28"); -PMU_FORMAT_ATTR(thresh_sel, "config:29-31"); -PMU_FORMAT_ATTR(thresh_stop, "config:32-35"); -PMU_FORMAT_ATTR(thresh_start, "config:36-39"); -PMU_FORMAT_ATTR(thresh_cmp, "config:40-49"); - -static struct attribute *power8_pmu_format_attr[] = { - &format_attr_event.attr, - &format_attr_pmcxsel.attr, - &format_attr_mark.attr, - &format_attr_combine.attr, - &format_attr_unit.attr, - &format_attr_pmc.attr, - &format_attr_cache_sel.attr, - &format_attr_sample_mode.attr, - &format_attr_thresh_sel.attr, - &format_attr_thresh_stop.attr, - &format_attr_thresh_start.attr, - &format_attr_thresh_cmp.attr, - NULL, -}; - -static struct attribute_group power8_pmu_format_group = { - .name = "format", - .attrs = power8_pmu_format_attr, -}; - static const struct attribute_group *power8_pmu_attr_groups[] = { - &power8_pmu_format_group, + &isa207_pmu_format_group, &power8_pmu_events_group, NULL, }; diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 8e9a81967ff8..443511b18bc5 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -31,6 +31,9 @@ enum { #define POWER9_MMCRA_IFM2 0x0000000080000000UL #define POWER9_MMCRA_IFM3 0x00000000C0000000UL +/* PowerISA v2.07 format attribute structure*/ +extern struct attribute_group isa207_pmu_format_group; + GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC); GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL); @@ -90,42 +93,8 @@ static struct attribute_group power9_pmu_events_group = { .attrs = power9_events_attr, }; -PMU_FORMAT_ATTR(event, "config:0-49"); -PMU_FORMAT_ATTR(pmcxsel, "config:0-7"); -PMU_FORMAT_ATTR(mark, "config:8"); -PMU_FORMAT_ATTR(combine, "config:11"); -PMU_FORMAT_ATTR(unit, "config:12-15"); -PMU_FORMAT_ATTR(pmc, "config:16-19"); -PMU_FORMAT_ATTR(cache_sel, "config:20-23"); -PMU_FORMAT_ATTR(sample_mode, "config:24-28"); -PMU_FORMAT_ATTR(thresh_sel, "config:29-31"); -PMU_FORMAT_ATTR(thresh_stop, "config:32-35"); -PMU_FORMAT_ATTR(thresh_start, "config:36-39"); -PMU_FORMAT_ATTR(thresh_cmp, "config:40-49"); - -static struct attribute *power9_pmu_format_attr[] = { - &format_attr_event.attr, - &format_attr_pmcxsel.attr, - &format_attr_mark.attr, - &format_attr_combine.attr, - &format_attr_unit.attr, - &format_attr_pmc.attr, - &format_attr_cache_sel.attr, - &format_attr_sample_mode.attr, - &format_attr_thresh_sel.attr, - &format_attr_thresh_stop.attr, - &format_attr_thresh_start.attr, - &format_attr_thresh_cmp.attr, - NULL, -}; - -static struct attribute_group power9_pmu_format_group = { - .name = "format", - .attrs = power9_pmu_format_attr, -}; - static const struct attribute_group *power9_pmu_attr_groups[] = { - &power9_pmu_format_group, + &isa207_pmu_format_group, &power9_pmu_events_group, NULL, }; -- cgit From 520ed5b04f3e807e5bf1b6981ffb50524632fc5f Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Fri, 2 Dec 2016 06:05:00 +0530 Subject: powerpc/perf: update attribute_group data structure Rename the power_pmu and attribute_group variables that support PowerISA v2.07. Add a cpu feature flag check to pick the PowerISA v2.07 format structures to support. Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/perf/power9-pmu.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 443511b18bc5..0bdbee132105 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -93,7 +93,7 @@ static struct attribute_group power9_pmu_events_group = { .attrs = power9_events_attr, }; -static const struct attribute_group *power9_pmu_attr_groups[] = { +static const struct attribute_group *power9_isa207_pmu_attr_groups[] = { &isa207_pmu_format_group, &power9_pmu_events_group, NULL, @@ -260,7 +260,7 @@ static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { #undef C -static struct power_pmu power9_pmu = { +static struct power_pmu power9_isa207_pmu = { .name = "POWER9", .n_counter = MAX_PMU_COUNTERS, .add_fields = ISA207_ADD_FIELDS, @@ -274,20 +274,23 @@ static struct power_pmu power9_pmu = { .n_generic = ARRAY_SIZE(power9_generic_events), .generic_events = power9_generic_events, .cache_events = &power9_cache_events, - .attr_groups = power9_pmu_attr_groups, + .attr_groups = power9_isa207_pmu_attr_groups, .bhrb_nr = 32, }; static int __init init_power9_pmu(void) { - int rc; + int rc = 0; /* Comes from cpu_specs[] */ if (!cur_cpu_spec->oprofile_cpu_type || strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power9")) return -ENODEV; - rc = register_power_pmu(&power9_pmu); + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { + rc = register_power_pmu(&power9_isa207_pmu); + } + if (rc) return rc; -- cgit From 18201b204286a1ef478ef52b00ab9f6c5739b4f6 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Fri, 2 Dec 2016 06:05:01 +0530 Subject: powerpc/perf: power9 raw event format encoding Patch to update the power9 raw event encoding format information and add support for the same in power9-pmu.c. Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/perf/power9-pmu.c | 134 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 0bdbee132105..346010e8d463 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -15,6 +15,78 @@ #include "isa207-common.h" +/* + * Raw event encoding for Power9: + * + * 60 56 52 48 44 40 36 32 + * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | + * | | [ ] [ ] [ thresh_cmp ] [ thresh_ctl ] + * | | | | | + * | | *- IFM (Linux) | thresh start/stop OR FAB match -* + * | *- BHRB (Linux) *sm + * *- EBB (Linux) + * + * 28 24 20 16 12 8 4 0 + * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | + * [ ] [ sample ] [cache] [ pmc ] [unit ] [] m [ pmcxsel ] + * | | | | | + * | | | | *- mark + * | | *- L1/L2/L3 cache_sel | + * | | | + * | *- sampling mode for marked events *- combine + * | + * *- thresh_sel + * + * Below uses IBM bit numbering. + * + * MMCR1[x:y] = unit (PMCxUNIT) + * MMCR1[24] = pmc1combine[0] + * MMCR1[25] = pmc1combine[1] + * MMCR1[26] = pmc2combine[0] + * MMCR1[27] = pmc2combine[1] + * MMCR1[28] = pmc3combine[0] + * MMCR1[29] = pmc3combine[1] + * MMCR1[30] = pmc4combine[0] + * MMCR1[31] = pmc4combine[1] + * + * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011 + * # PM_MRK_FAB_RSP_MATCH + * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) + * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001 + * # PM_MRK_FAB_RSP_MATCH_CYC + * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) + * else + * MMCRA[48:55] = thresh_ctl (THRESH START/END) + * + * if thresh_sel: + * MMCRA[45:47] = thresh_sel + * + * if thresh_cmp: + * MMCRA[9:11] = thresh_cmp[0:2] + * MMCRA[12:18] = thresh_cmp[3:9] + * + * if unit == 6 or unit == 7 + * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL) + * else if unit == 8 or unit == 9: + * if cache_sel[0] == 0: # L3 bank + * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0) + * else if cache_sel[0] == 1: + * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1) + * else if cache_sel[1]: # L1 event + * MMCR1[16] = cache_sel[2] + * MMCR1[17] = cache_sel[3] + * + * if mark: + * MMCRA[63] = 1 (SAMPLE_ENABLE) + * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG) + * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE) + * + * if EBB and BHRB: + * MMCRA[32:33] = IFM + * + * MMCRA[SDAR_MODE] = sm + */ + /* * Some power9 event codes. */ @@ -99,6 +171,48 @@ static const struct attribute_group *power9_isa207_pmu_attr_groups[] = { NULL, }; +PMU_FORMAT_ATTR(event, "config:0-51"); +PMU_FORMAT_ATTR(pmcxsel, "config:0-7"); +PMU_FORMAT_ATTR(mark, "config:8"); +PMU_FORMAT_ATTR(combine, "config:10-11"); +PMU_FORMAT_ATTR(unit, "config:12-15"); +PMU_FORMAT_ATTR(pmc, "config:16-19"); +PMU_FORMAT_ATTR(cache_sel, "config:20-23"); +PMU_FORMAT_ATTR(sample_mode, "config:24-28"); +PMU_FORMAT_ATTR(thresh_sel, "config:29-31"); +PMU_FORMAT_ATTR(thresh_stop, "config:32-35"); +PMU_FORMAT_ATTR(thresh_start, "config:36-39"); +PMU_FORMAT_ATTR(thresh_cmp, "config:40-49"); +PMU_FORMAT_ATTR(sdar_mode, "config:50-51"); + +static struct attribute *power9_pmu_format_attr[] = { + &format_attr_event.attr, + &format_attr_pmcxsel.attr, + &format_attr_mark.attr, + &format_attr_combine.attr, + &format_attr_unit.attr, + &format_attr_pmc.attr, + &format_attr_cache_sel.attr, + &format_attr_sample_mode.attr, + &format_attr_thresh_sel.attr, + &format_attr_thresh_stop.attr, + &format_attr_thresh_start.attr, + &format_attr_thresh_cmp.attr, + &format_attr_sdar_mode.attr, + NULL, +}; + +static struct attribute_group power9_pmu_format_group = { + .name = "format", + .attrs = power9_pmu_format_attr, +}; + +static const struct attribute_group *power9_pmu_attr_groups[] = { + &power9_pmu_format_group, + &power9_pmu_events_group, + NULL, +}; + static int power9_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC, @@ -278,6 +392,24 @@ static struct power_pmu power9_isa207_pmu = { .bhrb_nr = 32, }; +static struct power_pmu power9_pmu = { + .name = "POWER9", + .n_counter = MAX_PMU_COUNTERS, + .add_fields = ISA207_ADD_FIELDS, + .test_adder = ISA207_TEST_ADDER, + .compute_mmcr = isa207_compute_mmcr, + .config_bhrb = power9_config_bhrb, + .bhrb_filter_map = power9_bhrb_filter_map, + .get_constraint = isa207_get_constraint, + .disable_pmc = isa207_disable_pmc, + .flags = PPMU_HAS_SIER | PPMU_ARCH_207S, + .n_generic = ARRAY_SIZE(power9_generic_events), + .generic_events = power9_generic_events, + .cache_events = &power9_cache_events, + .attr_groups = power9_pmu_attr_groups, + .bhrb_nr = 32, +}; + static int __init init_power9_pmu(void) { int rc = 0; @@ -289,6 +421,8 @@ static int __init init_power9_pmu(void) if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { rc = register_power_pmu(&power9_isa207_pmu); + } else { + rc = register_power_pmu(&power9_pmu); } if (rc) -- cgit From c7c3f568beff2b72f02a7807ec48b0bc66a7ead6 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Fri, 2 Dec 2016 06:05:02 +0530 Subject: powerpc/perf: macros for power9 format encoding Patch to add macros and contants to support the power9 raw event encoding format. Couple of functions added since some of the bits fields like PMCxCOMB and THRESH_CMP has different width and location within MMCR* in power9. Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/perf/isa207-common.c | 57 ++++++++++++++++++++++++++++++++++----- arch/powerpc/perf/isa207-common.h | 30 ++++++++++++++++++++- 2 files changed, 79 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 2a2040ea5f99..50e598cf644b 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -55,6 +55,48 @@ static inline bool event_is_fab_match(u64 event) return (event == 0x30056 || event == 0x4f052); } +static bool is_event_valid(u64 event) +{ + u64 valid_mask = EVENT_VALID_MASK; + + if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1)) + valid_mask = p9_EVENT_VALID_MASK; + + return !(event & ~valid_mask); +} + +static u64 mmcra_sdar_mode(u64 event) +{ + if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1)) + return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT; + + return MMCRA_SDAR_MODE_TLB; +} + +static u64 thresh_cmp_val(u64 value) +{ + if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1)) + return value << p9_MMCRA_THR_CMP_SHIFT; + + return value << MMCRA_THR_CMP_SHIFT; +} + +static unsigned long combine_from_event(u64 event) +{ + if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1)) + return p9_EVENT_COMBINE(event); + + return EVENT_COMBINE(event); +} + +static unsigned long combine_shift(unsigned long pmc) +{ + if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1)) + return p9_MMCR1_COMBINE_SHIFT(pmc); + + return MMCR1_COMBINE_SHIFT(pmc); +} + int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) { unsigned int unit, pmc, cache, ebb; @@ -62,7 +104,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) mask = value = 0; - if (event & ~EVENT_VALID_MASK) + if (!is_event_valid(event)) return -1; pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; @@ -189,15 +231,13 @@ int isa207_compute_mmcr(u64 event[], int n_ev, pmc_inuse |= 1 << pmc; } - /* In continuous sampling mode, update SDAR on TLB miss */ - mmcra = MMCRA_SDAR_MODE_TLB; - mmcr1 = mmcr2 = 0; + mmcra = mmcr1 = mmcr2 = 0; /* Second pass: assign PMCs, set all MMCR1 fields */ for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; - combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK; + combine = combine_from_event(event[i]); psel = event[i] & EVENT_PSEL_MASK; if (!pmc) { @@ -211,10 +251,13 @@ int isa207_compute_mmcr(u64 event[], int n_ev, if (pmc <= 4) { mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc); - mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc); + mmcr1 |= combine << combine_shift(pmc); mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc); } + /* In continuous sampling mode, update SDAR on TLB miss */ + mmcra |= mmcra_sdar_mode(event[i]); + if (event[i] & EVENT_IS_L1) { cache = event[i] >> EVENT_CACHE_SEL_SHIFT; mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT; @@ -245,7 +288,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev, val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK; mmcra |= val << MMCRA_THR_SEL_SHIFT; val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; - mmcra |= val << MMCRA_THR_CMP_SHIFT; + mmcra |= thresh_cmp_val(val); } if (event[i] & EVENT_WANTS_BHRB) { diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index 4d0a4e5017c2..90495f1580c7 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h @@ -107,6 +107,7 @@ #define EVENT_UNIT_MASK 0xf #define EVENT_COMBINE_SHIFT 11 /* Combine bit */ #define EVENT_COMBINE_MASK 0x1 +#define EVENT_COMBINE(v) (((v) >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK) #define EVENT_MARKED_SHIFT 8 /* Marked bit */ #define EVENT_MARKED_MASK 0x1 #define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) @@ -134,6 +135,26 @@ PERF_SAMPLE_BRANCH_KERNEL |\ PERF_SAMPLE_BRANCH_HV) +/* Contants to support power9 raw encoding format */ +#define p9_EVENT_COMBINE_SHIFT 10 /* Combine bit */ +#define p9_EVENT_COMBINE_MASK 0x3ull +#define p9_EVENT_COMBINE(v) (((v) >> p9_EVENT_COMBINE_SHIFT) & p9_EVENT_COMBINE_MASK) +#define p9_SDAR_MODE_SHIFT 50 +#define p9_SDAR_MODE_MASK 0x3ull +#define p9_SDAR_MODE(v) (((v) >> p9_SDAR_MODE_SHIFT) & p9_SDAR_MODE_MASK) + +#define p9_EVENT_VALID_MASK \ + ((p9_SDAR_MODE_MASK << p9_SDAR_MODE_SHIFT | \ + (EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \ + (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \ + (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \ + (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \ + (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ + (p9_EVENT_COMBINE_MASK << p9_EVENT_COMBINE_SHIFT) | \ + (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ + EVENT_LINUX_MASK | \ + EVENT_PSEL_MASK)) + /* * Layout of constraint bits: * @@ -210,15 +231,22 @@ #define MMCR1_DC_QUAL_SHIFT 47 #define MMCR1_IC_QUAL_SHIFT 46 +/* MMCR1 Combine bits macro for power9 */ +#define p9_MMCR1_COMBINE_SHIFT(pmc) (38 - ((pmc - 1) * 2)) + /* Bits in MMCRA for PowerISA v2.07 */ #define MMCRA_SAMP_MODE_SHIFT 1 #define MMCRA_SAMP_ELIG_SHIFT 4 #define MMCRA_THR_CTL_SHIFT 8 #define MMCRA_THR_SEL_SHIFT 16 #define MMCRA_THR_CMP_SHIFT 32 -#define MMCRA_SDAR_MODE_TLB (1ull << 42) +#define MMCRA_SDAR_MODE_SHIFT 42 +#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT) #define MMCRA_IFM_SHIFT 30 +/* MMCR1 Threshold Compare bit constant for power9 */ +#define p9_MMCRA_THR_CMP_SHIFT 45 + /* Bits in MMCR2 for PowerISA v2.07 */ #define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9))) #define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9))) -- cgit From c02e0349d7e9eec8a6414840dd34b8d26e0c047b Mon Sep 17 00:00:00 2001 From: Libin Date: Sun, 6 Dec 2015 10:02:56 +0800 Subject: powerpc/ftrace: Fix the comments for ftrace_modify_code There is no need to worry about module and __init text disappearing case, because that ftrace has a module notifier that is called when a module is being unloaded and before the text goes away and this code grabs the ftrace_lock mutex and removes the module functions from the ftrace list, such that it will no longer do any modifications to that module's text, the update to make functions be traced or not is done under the ftrace_lock mutex as well. And by now, __init section codes should not been modified by ftrace, because it is black listed in recordmcount.c and ignored by ftrace. Suggested-by: Steven Rostedt Signed-off-by: Li Bin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/ftrace.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index a95639b8d4ac..5c9f50c1aa99 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -47,13 +47,11 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) unsigned int replaced; /* - * Note: Due to modules and __init, code can - * disappear and change, we need to protect against faulting - * as well as code changing. We do this by using the - * probe_kernel_* functions. - * - * No real locking needed, this code is run through - * kstop_machine, or before SMP starts. + * Note: + * We are paranoid about modifying text, as if a bug was to happen, it + * could cause us to read or write to someplace that could cause harm. + * Carefully read and modify the code with probe_kernel_*(), and make + * sure what we read is what we expected it to be before modifying it. */ /* read the text we want to modify */ -- cgit From 53a46e8188e0782e84090bd35c01d506d9ffcfaf Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Wed, 23 Nov 2016 23:00:45 +0800 Subject: powerpc/83xx/suspend: use builtin_platform_driver Use builtin_platform_driver() helper to simplify the code. Signed-off-by: Geliang Tang Signed-off-by: Scott Wood --- arch/powerpc/platforms/83xx/suspend.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 24717d060008..08f92f6ed228 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -441,8 +441,4 @@ static struct platform_driver pmc_driver = { .remove = pmc_remove }; -static int pmc_init(void) -{ - return platform_driver_register(&pmc_driver); -} -device_initcall(pmc_init); +builtin_platform_driver(pmc_driver); -- cgit From 16c6cb46a7716c49f5dd44b5a1ccbaa74900eb89 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Wed, 23 Nov 2016 23:02:35 +0800 Subject: powerpc/fsl_pmc: use builtin_platform_driver Use builtin_platform_driver() helper to simplify the code. Signed-off-by: Geliang Tang Signed-off-by: Scott Wood --- arch/powerpc/sysdev/fsl_pmc.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c index 1d6fd7c59fe9..232225e7f863 100644 --- a/arch/powerpc/sysdev/fsl_pmc.c +++ b/arch/powerpc/sysdev/fsl_pmc.c @@ -85,8 +85,4 @@ static struct platform_driver pmc_driver = { .probe = pmc_probe, }; -static int __init pmc_init(void) -{ - return platform_driver_register(&pmc_driver); -} -device_initcall(pmc_init); +builtin_platform_driver(pmc_driver); -- cgit From c9492b4bea2630dacd18e16f8e8b0dcf91a6edcf Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Wed, 23 Nov 2016 23:04:21 +0800 Subject: soc/fsl/qe: use builtin_platform_driver Use builtin_platform_driver() helper to simplify the code. Signed-off-by: Geliang Tang Signed-off-by: Scott Wood --- drivers/soc/fsl/qe/qe.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 2707a827261b..ade168f5328e 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c @@ -717,9 +717,5 @@ static struct platform_driver qe_driver = { .resume = qe_resume, }; -static int __init qe_drv_init(void) -{ - return platform_driver_register(&qe_driver); -} -device_initcall(qe_drv_init); +builtin_platform_driver(qe_driver); #endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */ -- cgit From d7544424169e0ba1ffdc39edbdfa822711a59c5f Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 17 Oct 2016 15:13:59 +0000 Subject: soc/fsl/bman: Use resource_size instead of computation Use resource_size function on resource object instead of explicit computation. Generated by: scripts/coccinelle/api/resource_size.cocci Signed-off-by: Wei Yongjun Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/bman_ccsr.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c index 9deb0524543f..a8e8389a6894 100644 --- a/drivers/soc/fsl/qbman/bman_ccsr.c +++ b/drivers/soc/fsl/qbman/bman_ccsr.c @@ -181,8 +181,7 @@ static int fsl_bman_probe(struct platform_device *pdev) node->full_name); return -ENXIO; } - bm_ccsr_start = devm_ioremap(dev, res->start, - res->end - res->start + 1); + bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res)); if (!bm_ccsr_start) return -ENXIO; -- cgit From ff45000fcb56b5b0f1a14a865d3541746d838a0a Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 28 Nov 2016 12:42:26 +1100 Subject: powerpc/boot: Request no dynamic linker for boot wrapper The boot wrapper performs its own relocations and does not require PT_INTERP segment. However currently we don't tell the linker that. Prior to binutils 2.28 that works OK. But since binutils commit 1a9ccd70f9a7 ("Fix the linker so that it will not silently generate ELF binaries with invalid program headers. Fix readelf to report such invalid binaries.") binutils tries to create a program header segment due to PT_INTERP, and the link fails because there is no space for it: ld: arch/powerpc/boot/zImage.pseries: Not enough room for program headers, try linking with -N ld: final link failed: Bad value So tell the linker not to do that, by passing --no-dynamic-linker. Cc: stable@vger.kernel.org Reported-by: Anton Blanchard Signed-off-by: Nicholas Piggin [mpe: Drop dependency on ld-version.sh and massage change log] Signed-off-by: Michael Ellerman --- arch/powerpc/boot/wrapper | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index 404b3aabdb4d..76fe3ccfd381 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -181,6 +181,28 @@ case "$elfformat" in elf32-powerpc) format=elf32ppc ;; esac +ld_version() +{ + # Poached from scripts/ld-version.sh, but we don't want to call that because + # this script (wrapper) is distributed separately from the kernel source. + # Extract linker version number from stdin and turn into single number. + awk '{ + gsub(".*\\)", ""); + gsub(".*version ", ""); + gsub("-.*", ""); + split($1,a, "."); + print a[1]*100000000 + a[2]*1000000 + a[3]*10000; + exit + }' +} + +# Do not include PT_INTERP segment when linking pie. Non-pie linking +# just ignores this option. +LD_VERSION=$(${CROSS}ld --version | ld_version) +LD_NO_DL_MIN_VERSION=$(echo 2.26 | ld_version) +if [ "$LD_VERSION" -ge "$LD_NO_DL_MIN_VERSION" ] ; then + nodl="--no-dynamic-linker" +fi platformo=$object/"$platform".o lds=$object/zImage.lds @@ -446,7 +468,7 @@ if [ "$platform" != "miboot" ]; then text_start="-Ttext $link_address" fi #link everything - ${CROSS}ld -m $format -T $lds $text_start $pie -o "$ofile" \ + ${CROSS}ld -m $format -T $lds $text_start $pie $nodl -o "$ofile" \ $platformo $tmp $object/wrapper.a rm $tmp fi -- cgit From 9b081e10805cd8e356f30ded1cb2008d67af26c9 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 7 Dec 2016 08:47:24 +0100 Subject: powerpc: port 64 bits pgtable_cache to 32 bits Today powerpc64 uses a set of pgtable_caches while powerpc32 uses standard pages when using 4k pages and a single pgtable_cache if using other size pages. In preparation of implementing huge pages on the 8xx, this patch replaces the specific powerpc32 handling by the 64 bits approach. This is done by: * moving 64 bits pgtable_cache_add() and pgtable_cache_init() in a new file called init-common.c * modifying pgtable_cache_init() to also handle the case without PMD * removing the 32 bits version of pgtable_cache_add() and pgtable_cache_init() * copying related header contents from 64 bits into both the book3s/32 and nohash/32 header files On the 8xx, the following cache sizes will be used: * 4k pages mode: - PGT_CACHE(10) for PGD - PGT_CACHE(3) for 512k hugepage tables * 16k pages mode: - PGT_CACHE(6) for PGD - PGT_CACHE(7) for 512k hugepage tables - PGT_CACHE(3) for 8M hugepage tables Signed-off-by: Christophe Leroy Reviewed-by: Aneesh Kumar K.V Signed-off-by: Scott Wood --- arch/powerpc/include/asm/book3s/32/pgalloc.h | 44 +++++++++-- arch/powerpc/include/asm/book3s/32/pgtable.h | 40 +++++----- arch/powerpc/include/asm/book3s/64/pgtable.h | 3 - arch/powerpc/include/asm/nohash/32/pgalloc.h | 44 +++++++++-- arch/powerpc/include/asm/nohash/32/pgtable.h | 42 +++++------ arch/powerpc/include/asm/nohash/64/pgtable.h | 2 - arch/powerpc/include/asm/pgtable.h | 2 + arch/powerpc/mm/Makefile | 3 +- arch/powerpc/mm/init-common.c | 107 +++++++++++++++++++++++++++ arch/powerpc/mm/init_64.c | 77 ------------------- arch/powerpc/mm/pgtable_32.c | 37 --------- 11 files changed, 227 insertions(+), 174 deletions(-) create mode 100644 arch/powerpc/mm/init-common.c diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index 8e21bb492dca..d310546e5d9d 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h @@ -2,14 +2,42 @@ #define _ASM_POWERPC_BOOK3S_32_PGALLOC_H #include +#include -/* For 32-bit, all levels of page tables are just drawn from get_free_page() */ -#define MAX_PGTABLE_INDEX_SIZE 0 +/* + * Functions that deal with pagetables that could be at any level of + * the table need to be passed an "index_size" so they know how to + * handle allocation. For PTE pages (which are linked to a struct + * page for now, and drawn from the main get_free_pages() pool), the + * allocation size will be (2^index_size * sizeof(pointer)) and + * allocations are drawn from the kmem_cache in PGT_CACHE(index_size). + * + * The maximum index size needs to be big enough to allow any + * pagetable sizes we need, but small enough to fit in the low bits of + * any page table pointer. In other words all pagetables, even tiny + * ones, must be aligned to allow at least enough low 0 bits to + * contain this value. This value is also used as a mask, so it must + * be one less than a power of two. + */ +#define MAX_PGTABLE_INDEX_SIZE 0xf extern void __bad_pte(pmd_t *pmd); -extern pgd_t *pgd_alloc(struct mm_struct *mm); -extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); +extern struct kmem_cache *pgtable_cache[]; +#define PGT_CACHE(shift) ({ \ + BUG_ON(!(shift)); \ + pgtable_cache[(shift) - 1]; \ + }) + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL); +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); +} /* * We don't have any real pmd's, and this code never triggers because @@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) static inline void pgtable_free(void *table, unsigned index_size) { - BUG_ON(index_size); /* 32-bit doesn't use this */ - free_page((unsigned long)table); + if (!index_size) { + free_page((unsigned long)table); + } else { + BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); + kmem_cache_free(PGT_CACHE(index_size), table); + } } #define check_pgt_cache() do { } while (0) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 6b8b2d57fdc8..388b0522f748 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -8,6 +8,23 @@ /* And here we include common definitions */ #include +#define PTE_INDEX_SIZE PTE_SHIFT +#define PMD_INDEX_SIZE 0 +#define PUD_INDEX_SIZE 0 +#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) + +#define PMD_CACHE_INDEX PMD_INDEX_SIZE + +#ifndef __ASSEMBLY__ +#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) +#define PMD_TABLE_SIZE 0 +#define PUD_TABLE_SIZE 0 +#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) +#endif /* __ASSEMBLY__ */ + +#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) +#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) + /* * The normal case is that PTEs are 32-bits and we have a 1-page * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus @@ -19,14 +36,10 @@ * -Matt */ /* PGDIR_SHIFT determines what a top-level page table entry can map */ -#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) +#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define PTRS_PER_PTE (1 << PTE_SHIFT) -#define PTRS_PER_PMD 1 -#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) - #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) /* * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary @@ -82,12 +95,8 @@ extern unsigned long ioremap_bot; -/* - * entries per page directory level: our page-table tree is two-level, so - * we don't really have any PMD directory. - */ -#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT) -#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT)) +/* Bits to mask out from a PGD to get to the PUD page */ +#define PGD_MASKED_BITS 0 #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ @@ -283,15 +292,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) -#ifndef CONFIG_PPC_4K_PAGES -void pgtable_cache_init(void); -#else -/* - * No page table caches to initialise - */ -#define pgtable_cache_init() do { } while (0) -#endif - extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp); diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 9fd77f8794a0..0a46a5f2a739 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -789,9 +789,6 @@ extern struct page *pgd_page(pgd_t pgd); #define pgd_ERROR(e) \ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) -void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); -void pgtable_cache_init(void); - static inline int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) { diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h index 76d6b9e0c8a9..633139291a48 100644 --- a/arch/powerpc/include/asm/nohash/32/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h @@ -2,14 +2,42 @@ #define _ASM_POWERPC_PGALLOC_32_H #include +#include -/* For 32-bit, all levels of page tables are just drawn from get_free_page() */ -#define MAX_PGTABLE_INDEX_SIZE 0 +/* + * Functions that deal with pagetables that could be at any level of + * the table need to be passed an "index_size" so they know how to + * handle allocation. For PTE pages (which are linked to a struct + * page for now, and drawn from the main get_free_pages() pool), the + * allocation size will be (2^index_size * sizeof(pointer)) and + * allocations are drawn from the kmem_cache in PGT_CACHE(index_size). + * + * The maximum index size needs to be big enough to allow any + * pagetable sizes we need, but small enough to fit in the low bits of + * any page table pointer. In other words all pagetables, even tiny + * ones, must be aligned to allow at least enough low 0 bits to + * contain this value. This value is also used as a mask, so it must + * be one less than a power of two. + */ +#define MAX_PGTABLE_INDEX_SIZE 0xf extern void __bad_pte(pmd_t *pmd); -extern pgd_t *pgd_alloc(struct mm_struct *mm); -extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); +extern struct kmem_cache *pgtable_cache[]; +#define PGT_CACHE(shift) ({ \ + BUG_ON(!(shift)); \ + pgtable_cache[(shift) - 1]; \ + }) + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL); +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); +} /* * We don't have any real pmd's, and this code never triggers because @@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) static inline void pgtable_free(void *table, unsigned index_size) { - BUG_ON(index_size); /* 32-bit doesn't use this */ - free_page((unsigned long)table); + if (!index_size) { + free_page((unsigned long)table); + } else { + BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); + kmem_cache_free(PGT_CACHE(index_size), table); + } } #define check_pgt_cache() do { } while (0) diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index c219ef7be53b..7bd916e91295 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -16,6 +16,23 @@ extern int icache_44x_need_flush; #endif /* __ASSEMBLY__ */ +#define PTE_INDEX_SIZE PTE_SHIFT +#define PMD_INDEX_SIZE 0 +#define PUD_INDEX_SIZE 0 +#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) + +#define PMD_CACHE_INDEX PMD_INDEX_SIZE + +#ifndef __ASSEMBLY__ +#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) +#define PMD_TABLE_SIZE 0 +#define PUD_TABLE_SIZE 0 +#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) +#endif /* __ASSEMBLY__ */ + +#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) +#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) + /* * The normal case is that PTEs are 32-bits and we have a 1-page * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus @@ -27,22 +44,12 @@ extern int icache_44x_need_flush; * -Matt */ /* PGDIR_SHIFT determines what a top-level page table entry can map */ -#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) +#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -/* - * entries per page directory level: our page-table tree is two-level, so - * we don't really have any PMD directory. - */ -#ifndef __ASSEMBLY__ -#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT) -#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT)) -#endif /* __ASSEMBLY__ */ - -#define PTRS_PER_PTE (1 << PTE_SHIFT) -#define PTRS_PER_PMD 1 -#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) +/* Bits to mask out from a PGD to get to the PUD page */ +#define PGD_MASKED_BITS 0 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) #define FIRST_USER_ADDRESS 0UL @@ -328,15 +335,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) -#ifndef CONFIG_PPC_4K_PAGES -void pgtable_cache_init(void); -#else -/* - * No page table caches to initialise - */ -#define pgtable_cache_init() do { } while (0) -#endif - extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp); diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index e9e540a804fc..6c4a14292a9e 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -346,8 +346,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) #define __swp_entry_to_pte(x) __pte((x).val) -void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); -void pgtable_cache_init(void); extern int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags); extern int __meminit vmemmap_create_mapping(unsigned long start, diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 9bd87f269d6d..dd01212935ac 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -78,6 +78,8 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned long vmalloc_to_phys(void *vmalloc_addr); +void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); +void pgtable_cache_init(void); #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PGTABLE_H */ diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index f4ffe1f68ce9..3f4d338985fc 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -7,7 +7,8 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) obj-y := fault.o mem.o pgtable.o mmap.o \ - init_$(BITS).o pgtable_$(BITS).o + init_$(BITS).o pgtable_$(BITS).o \ + init-common.o obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ tlb_nohash_low.o obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c new file mode 100644 index 000000000000..a175cd82ae8c --- /dev/null +++ b/arch/powerpc/mm/init-common.c @@ -0,0 +1,107 @@ +/* + * PowerPC version + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * Dave Engebretsen + * Rework for PPC64 port. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#undef DEBUG + +#include +#include +#include + +static void pgd_ctor(void *addr) +{ + memset(addr, 0, PGD_TABLE_SIZE); +} + +static void pud_ctor(void *addr) +{ + memset(addr, 0, PUD_TABLE_SIZE); +} + +static void pmd_ctor(void *addr) +{ + memset(addr, 0, PMD_TABLE_SIZE); +} + +struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; + +/* + * Create a kmem_cache() for pagetables. This is not used for PTE + * pages - they're linked to struct page, come from the normal free + * pages pool and have a different entry size (see real_pte_t) to + * everything else. Caches created by this function are used for all + * the higher level pagetables, and for hugepage pagetables. + */ +void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) +{ + char *name; + unsigned long table_size = sizeof(void *) << shift; + unsigned long align = table_size; + + /* When batching pgtable pointers for RCU freeing, we store + * the index size in the low bits. Table alignment must be + * big enough to fit it. + * + * Likewise, hugeapge pagetable pointers contain a (different) + * shift value in the low bits. All tables must be aligned so + * as to leave enough 0 bits in the address to contain it. */ + unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, + HUGEPD_SHIFT_MASK + 1); + struct kmem_cache *new; + + /* It would be nice if this was a BUILD_BUG_ON(), but at the + * moment, gcc doesn't seem to recognize is_power_of_2 as a + * constant expression, so so much for that. */ + BUG_ON(!is_power_of_2(minalign)); + BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE)); + + if (PGT_CACHE(shift)) + return; /* Already have a cache of this size */ + + align = max_t(unsigned long, align, minalign); + name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); + new = kmem_cache_create(name, table_size, align, 0, ctor); + kfree(name); + pgtable_cache[shift - 1] = new; + pr_debug("Allocated pgtable cache for order %d\n", shift); +} + + +void pgtable_cache_init(void) +{ + pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); + + if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE)) + pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); + /* + * In all current configs, when the PUD index exists it's the + * same size as either the pgd or pmd index except with THP enabled + * on book3s 64 + */ + if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) + pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); + + if (!PGT_CACHE(PGD_INDEX_SIZE)) + panic("Couldn't allocate pgd cache"); + if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE)) + panic("Couldn't allocate pmd pgtable caches"); + if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) + panic("Couldn't allocate pud pgtable caches"); +} diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 16ada1eb7e26..a000c3585390 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -80,83 +80,6 @@ EXPORT_SYMBOL_GPL(memstart_addr); phys_addr_t kernstart_addr; EXPORT_SYMBOL_GPL(kernstart_addr); -static void pgd_ctor(void *addr) -{ - memset(addr, 0, PGD_TABLE_SIZE); -} - -static void pud_ctor(void *addr) -{ - memset(addr, 0, PUD_TABLE_SIZE); -} - -static void pmd_ctor(void *addr) -{ - memset(addr, 0, PMD_TABLE_SIZE); -} - -struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; - -/* - * Create a kmem_cache() for pagetables. This is not used for PTE - * pages - they're linked to struct page, come from the normal free - * pages pool and have a different entry size (see real_pte_t) to - * everything else. Caches created by this function are used for all - * the higher level pagetables, and for hugepage pagetables. - */ -void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) -{ - char *name; - unsigned long table_size = sizeof(void *) << shift; - unsigned long align = table_size; - - /* When batching pgtable pointers for RCU freeing, we store - * the index size in the low bits. Table alignment must be - * big enough to fit it. - * - * Likewise, hugeapge pagetable pointers contain a (different) - * shift value in the low bits. All tables must be aligned so - * as to leave enough 0 bits in the address to contain it. */ - unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, - HUGEPD_SHIFT_MASK + 1); - struct kmem_cache *new; - - /* It would be nice if this was a BUILD_BUG_ON(), but at the - * moment, gcc doesn't seem to recognize is_power_of_2 as a - * constant expression, so so much for that. */ - BUG_ON(!is_power_of_2(minalign)); - BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE)); - - if (PGT_CACHE(shift)) - return; /* Already have a cache of this size */ - - align = max_t(unsigned long, align, minalign); - name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); - new = kmem_cache_create(name, table_size, align, 0, ctor); - kfree(name); - pgtable_cache[shift - 1] = new; - pr_debug("Allocated pgtable cache for order %d\n", shift); -} - - -void pgtable_cache_init(void) -{ - pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); - pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); - /* - * In all current configs, when the PUD index exists it's the - * same size as either the pgd or pmd index except with THP enabled - * on book3s 64 - */ - if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) - pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); - - if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX)) - panic("Couldn't allocate pgtable caches"); - if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) - panic("Couldn't allocate pud pgtable caches"); -} - #ifdef CONFIG_SPARSEMEM_VMEMMAP /* * Given an address within the vmemmap, determine the pfn of the page that diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 0ae0572bc239..a65c0b4c0669 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -42,43 +42,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ extern char etext[], _stext[], _sinittext[], _einittext[]; -#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT) - -#ifndef CONFIG_PPC_4K_PAGES -static struct kmem_cache *pgtable_cache; - -void pgtable_cache_init(void) -{ - pgtable_cache = kmem_cache_create("PGDIR cache", 1 << PGDIR_ORDER, - 1 << PGDIR_ORDER, 0, NULL); - if (pgtable_cache == NULL) - panic("Couldn't allocate pgtable caches"); -} -#endif - -pgd_t *pgd_alloc(struct mm_struct *mm) -{ - pgd_t *ret; - - /* pgdir take page or two with 4K pages and a page fraction otherwise */ -#ifndef CONFIG_PPC_4K_PAGES - ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO); -#else - ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, - PGDIR_ORDER - PAGE_SHIFT); -#endif - return ret; -} - -void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ -#ifndef CONFIG_PPC_4K_PAGES - kmem_cache_free(pgtable_cache, (void *)pgd); -#else - free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT); -#endif -} - __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { pte_t *pte; -- cgit From 03bb2d65900c87a6cc860310b4d598c68fb83393 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 7 Dec 2016 08:47:26 +0100 Subject: powerpc: get hugetlbpage handling more generic Today there are two implementations of hugetlbpages which are managed by exclusive #ifdefs: * FSL_BOOKE: several directory entries points to the same single hugepage * BOOK3S: one upper level directory entry points to a table of hugepages In preparation of implementation of hugepage support on the 8xx, we need a mix of the two above solutions, because the 8xx needs both cases depending on the size of pages: * In 4k page size mode, each PGD entry covers a 4M bytes area. It means that 2 PGD entries will be necessary to cover an 8M hugepage while a single PGD entry will cover 8x 512k hugepages. * In 16 page size mode, each PGD entry covers a 64M bytes area. It means that 8x 8M hugepages will be covered by one PGD entry and 64x 512k hugepages will be covers by one PGD entry. This patch: * removes #ifdefs in favor of if/else based on the range sizes * merges the two huge_pte_alloc() functions as they are pretty similar * merges the two hugetlbpage_init() functions as they are pretty similar Signed-off-by: Christophe Leroy Reviewed-by: Aneesh Kumar K.V (v3) Signed-off-by: Scott Wood --- arch/powerpc/mm/hugetlbpage.c | 195 ++++++++++++++++++------------------------ 1 file changed, 81 insertions(+), 114 deletions(-) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index a5d3ecdabc44..53245aa00e22 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -64,14 +64,16 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, { struct kmem_cache *cachep; pte_t *new; - -#ifdef CONFIG_PPC_FSL_BOOK3E int i; - int num_hugepd = 1 << (pshift - pdshift); - cachep = hugepte_cache; -#else - cachep = PGT_CACHE(pdshift - pshift); -#endif + int num_hugepd; + + if (pshift >= pdshift) { + cachep = hugepte_cache; + num_hugepd = 1 << (pshift - pdshift); + } else { + cachep = PGT_CACHE(pdshift - pshift); + num_hugepd = 1; + } new = kmem_cache_zalloc(cachep, GFP_KERNEL); @@ -89,7 +91,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, smp_wmb(); spin_lock(&mm->page_table_lock); -#ifdef CONFIG_PPC_FSL_BOOK3E + /* * We have multiple higher-level entries that point to the same * actual pte location. Fill in each as we go and backtrack on error. @@ -100,8 +102,13 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, if (unlikely(!hugepd_none(*hpdp))) break; else +#ifdef CONFIG_PPC_BOOK3S_64 + hpdp->pd = __pa(new) | + (shift_to_mmu_psize(pshift) << 2); +#else /* We use the old format for PPC_FSL_BOOK3E */ hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; +#endif } /* If we bailed from the for loop early, an error occurred, clean up */ if (i < num_hugepd) { @@ -109,17 +116,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, hpdp->pd = 0; kmem_cache_free(cachep, new); } -#else - if (!hugepd_none(*hpdp)) - kmem_cache_free(cachep, new); - else { -#ifdef CONFIG_PPC_BOOK3S_64 - hpdp->pd = __pa(new) | (shift_to_mmu_psize(pshift) << 2); -#else - hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; -#endif - } -#endif spin_unlock(&mm->page_table_lock); return 0; } @@ -136,7 +132,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, #define HUGEPD_PUD_SHIFT PMD_SHIFT #endif -#ifdef CONFIG_PPC_BOOK3S_64 /* * At this point we do the placement change only for BOOK3S 64. This would * possibly work on other subarchs. @@ -153,6 +148,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz addr &= ~(sz-1); pg = pgd_offset(mm, addr); +#ifdef CONFIG_PPC_BOOK3S_64 if (pshift == PGDIR_SHIFT) /* 16GB huge page */ return (pte_t *) pg; @@ -178,32 +174,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz hpdp = (hugepd_t *)pm; } } - if (!hpdp) - return NULL; - - BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); - - if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) - return NULL; - - return hugepte_offset(*hpdp, addr, pdshift); -} - #else - -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) -{ - pgd_t *pg; - pud_t *pu; - pmd_t *pm; - hugepd_t *hpdp = NULL; - unsigned pshift = __ffs(sz); - unsigned pdshift = PGDIR_SHIFT; - - addr &= ~(sz-1); - - pg = pgd_offset(mm, addr); - if (pshift >= HUGEPD_PGD_SHIFT) { hpdp = (hugepd_t *)pg; } else { @@ -217,7 +188,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz hpdp = (hugepd_t *)pm; } } - +#endif if (!hpdp) return NULL; @@ -228,7 +199,6 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz return hugepte_offset(*hpdp, addr, pdshift); } -#endif #ifdef CONFIG_PPC_FSL_BOOK3E /* Build list of addresses of gigantic pages. This function is used in early @@ -310,7 +280,11 @@ static int __init do_gpage_early_setup(char *param, char *val, npages = 0; if (npages > MAX_NUMBER_GPAGES) { pr_warn("MMU: %lu pages requested for page " +#ifdef CONFIG_PHYS_ADDR_T_64BIT "size %llu KB, limiting to " +#else + "size %u KB, limiting to " +#endif __stringify(MAX_NUMBER_GPAGES) "\n", npages, size / 1024); npages = MAX_NUMBER_GPAGES; @@ -442,6 +416,8 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) } put_cpu_var(hugepd_freelist_cur); } +#else +static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {} #endif static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift, @@ -453,13 +429,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif unsigned long pdmask = ~((1UL << pdshift) - 1); unsigned int num_hugepd = 1; + unsigned int shift = hugepd_shift(*hpdp); -#ifdef CONFIG_PPC_FSL_BOOK3E /* Note: On fsl the hpdp may be the first of several */ - num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); -#else - unsigned int shift = hugepd_shift(*hpdp); -#endif + if (shift > pdshift) + num_hugepd = 1 << (shift - pdshift); start &= pdmask; if (start < floor) @@ -475,11 +449,10 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif for (i = 0; i < num_hugepd; i++, hpdp++) hpdp->pd = 0; -#ifdef CONFIG_PPC_FSL_BOOK3E - hugepd_free(tlb, hugepte); -#else - pgtable_free_tlb(tlb, hugepte, pdshift - shift); -#endif + if (shift >= pdshift) + hugepd_free(tlb, hugepte); + else + pgtable_free_tlb(tlb, hugepte, pdshift - shift); } static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, @@ -492,6 +465,8 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, start = addr; do { + unsigned long more; + pmd = pmd_offset(pud, addr); next = pmd_addr_end(addr, end); if (!is_hugepd(__hugepd(pmd_val(*pmd)))) { @@ -502,15 +477,16 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, WARN_ON(!pmd_none_or_clear_bad(pmd)); continue; } -#ifdef CONFIG_PPC_FSL_BOOK3E /* * Increment next by the size of the huge mapping since * there may be more than one entry at this level for a * single hugepage, but all of them point to * the same kmem cache that holds the hugepte. */ - next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); -#endif + more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); + if (more > next) + next = more; + free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, addr, next, floor, ceiling); } while (addr = next, addr != end); @@ -550,15 +526,17 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); } else { -#ifdef CONFIG_PPC_FSL_BOOK3E + unsigned long more; /* * Increment next by the size of the huge mapping since * there may be more than one entry at this level for a * single hugepage, but all of them point to * the same kmem cache that holds the hugepte. */ - next = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); -#endif + more = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); + if (more > next) + next = more; + free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, addr, next, floor, ceiling); } @@ -615,15 +593,17 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, continue; hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); } else { -#ifdef CONFIG_PPC_FSL_BOOK3E + unsigned long more; /* * Increment next by the size of the huge mapping since * there may be more than one entry at the pgd level * for a single hugepage, but all of them point to the * same kmem cache that holds the hugepte. */ - next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); -#endif + more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); + if (more > next) + next = more; + free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT, addr, next, floor, ceiling); } @@ -753,12 +733,13 @@ static int __init add_huge_page_size(unsigned long long size) /* Check that it is a page size supported by the hardware and * that it fits within pagetable and slice limits. */ + if (size <= PAGE_SIZE) + return -EINVAL; #ifdef CONFIG_PPC_FSL_BOOK3E - if ((size < PAGE_SIZE) || !is_power_of_4(size)) + if (!is_power_of_4(size)) return -EINVAL; #else - if (!is_power_of_2(size) - || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT)) + if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT)) return -EINVAL; #endif @@ -791,53 +772,15 @@ static int __init hugepage_setup_sz(char *str) } __setup("hugepagesz=", hugepage_setup_sz); -#ifdef CONFIG_PPC_FSL_BOOK3E struct kmem_cache *hugepte_cache; static int __init hugetlbpage_init(void) { int psize; - for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { - unsigned shift; - - if (!mmu_psize_defs[psize].shift) - continue; - - shift = mmu_psize_to_shift(psize); - - /* Don't treat normal page sizes as huge... */ - if (shift != PAGE_SHIFT) - if (add_huge_page_size(1ULL << shift) < 0) - continue; - } - - /* - * Create a kmem cache for hugeptes. The bottom bits in the pte have - * size information encoded in them, so align them to allow this - */ - hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t), - HUGEPD_SHIFT_MASK + 1, 0, NULL); - if (hugepte_cache == NULL) - panic("%s: Unable to create kmem cache for hugeptes\n", - __func__); - - /* Default hpage size = 4M */ - if (mmu_psize_defs[MMU_PAGE_4M].shift) - HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift; - else - panic("%s: Unable to set default huge page size\n", __func__); - - - return 0; -} -#else -static int __init hugetlbpage_init(void) -{ - int psize; - +#if !defined(CONFIG_PPC_FSL_BOOK3E) if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE)) return -ENODEV; - +#endif for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { unsigned shift; unsigned pdshift; @@ -850,9 +793,9 @@ static int __init hugetlbpage_init(void) if (add_huge_page_size(1ULL << shift) < 0) continue; - if (shift < PMD_SHIFT) + if (shift < HUGEPD_PUD_SHIFT) pdshift = PMD_SHIFT; - else if (shift < PUD_SHIFT) + else if (shift < HUGEPD_PGD_SHIFT) pdshift = PUD_SHIFT; else pdshift = PGDIR_SHIFT; @@ -860,14 +803,36 @@ static int __init hugetlbpage_init(void) * if we have pdshift and shift value same, we don't * use pgt cache for hugepd. */ - if (pdshift != shift) { + if (pdshift > shift) { pgtable_cache_add(pdshift - shift, NULL); if (!PGT_CACHE(pdshift - shift)) panic("hugetlbpage_init(): could not create " "pgtable cache for %d bit pagesize\n", shift); } +#ifdef CONFIG_PPC_FSL_BOOK3E + else if (!hugepte_cache) { + /* + * Create a kmem cache for hugeptes. The bottom bits in + * the pte have size information encoded in them, so + * align them to allow this + */ + hugepte_cache = kmem_cache_create("hugepte-cache", + sizeof(pte_t), + HUGEPD_SHIFT_MASK + 1, + 0, NULL); + if (hugepte_cache == NULL) + panic("%s: Unable to create kmem cache " + "for hugeptes\n", __func__); + + } +#endif } +#ifdef CONFIG_PPC_FSL_BOOK3E + /* Default hpage size = 4M */ + if (mmu_psize_defs[MMU_PAGE_4M].shift) + HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift; +#else /* Set default large page size. Currently, we pick 16M or 1M * depending on what is available */ @@ -877,11 +842,13 @@ static int __init hugetlbpage_init(void) HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift; else if (mmu_psize_defs[MMU_PAGE_2M].shift) HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift; - +#endif + else + panic("%s: Unable to set default huge page size\n", __func__); return 0; } -#endif + arch_initcall(hugetlbpage_init); void flush_dcache_icache_hugepage(struct page *page) -- cgit From 4b91428699477532ab1255c2dd5819713e9e8985 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 7 Dec 2016 08:47:28 +0100 Subject: powerpc/8xx: Implement support of hugepages 8xx uses a two level page table with two different linux page size support (4k and 16k). 8xx also support two different hugepage sizes 512k and 8M. In order to support them on linux we define two different page table layout. The size of pages is in the PGD entry, using PS field (bits 28-29): 00 : Small pages (4k or 16k) 01 : 512k pages 10 : reserved 11 : 8M pages For 512K hugepage size a pgd entry have the below format [0101] . The hugepte table allocated will contain 8 entries pointing to 512K huge pte in 4k pages mode and 64 entries in 16k pages mode. For 8M in 16k mode, a pgd entry have the below format [1101] . The hugepte table allocated will contain 8 entries pointing to 8M huge pte. For 8M in 4k mode, multiple pgd entries point to the same hugepte address and pgd entry will have the below format [1101]. The hugepte table allocated will only have one entry. For the time being, we do not support CPU15 ERRATA when HUGETLB is selected Signed-off-by: Christophe Leroy Reviewed-by: Aneesh Kumar K.V (v3, for the generic bits) Signed-off-by: Scott Wood --- arch/powerpc/include/asm/hugetlb.h | 19 ++++- arch/powerpc/include/asm/mmu-8xx.h | 35 ++++++++ arch/powerpc/include/asm/mmu.h | 23 +++--- arch/powerpc/include/asm/nohash/32/pte-8xx.h | 1 + arch/powerpc/include/asm/nohash/pgtable.h | 4 + arch/powerpc/include/asm/reg_8xx.h | 2 +- arch/powerpc/kernel/head_8xx.S | 119 +++++++++++++++++++++++++-- arch/powerpc/mm/hugetlbpage.c | 29 ++++--- arch/powerpc/mm/tlb_nohash.c | 21 ++++- arch/powerpc/platforms/8xx/Kconfig | 1 + arch/powerpc/platforms/Kconfig.cputype | 1 + 11 files changed, 225 insertions(+), 30 deletions(-) diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index c5517f463ec7..3facdd41709c 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -51,12 +51,20 @@ static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma, static inline pte_t *hugepd_page(hugepd_t hpd) { BUG_ON(!hugepd_ok(hpd)); +#ifdef CONFIG_PPC_8xx + return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK)); +#else return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE); +#endif } static inline unsigned int hugepd_shift(hugepd_t hpd) { +#ifdef CONFIG_PPC_8xx + return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17; +#else return hpd.pd & HUGEPD_SHIFT_MASK; +#endif } #endif /* CONFIG_PPC_BOOK3S_64 */ @@ -99,7 +107,15 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte); +#ifdef CONFIG_PPC_8xx +static inline void flush_hugetlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ + flush_tlb_page(vma, vmaddr); +} +#else void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +#endif void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, @@ -205,7 +221,8 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, * are reserved early in the boot process by memblock instead of via * the .dts as on IBM platforms. */ -#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \ + defined(CONFIG_PPC_8xx)) extern void __init reserve_hugetlb_gpages(void); #else static inline void reserve_hugetlb_gpages(void) diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index 3e0e4927811c..798b5bf91427 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -172,6 +172,41 @@ typedef struct { #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) #define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE)) + +/* Page size definitions, common between 32 and 64-bit + * + * shift : is the "PAGE_SHIFT" value for that page size + * penc : is the pte encoding mask + * + */ +struct mmu_psize_def { + unsigned int shift; /* number of bits */ + unsigned int enc; /* PTE encoding */ + unsigned int ind; /* Corresponding indirect page size shift */ + unsigned int flags; +#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */ +#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */ +}; + +extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; + +static inline int shift_to_mmu_psize(unsigned int shift) +{ + int psize; + + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) + if (mmu_psize_defs[psize].shift == shift) + return psize; + return -1; +} + +static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) +{ + if (mmu_psize_defs[mmu_psize].shift) + return mmu_psize_defs[mmu_psize].shift; + BUG(); +} + #endif /* !__ASSEMBLY__ */ #if defined(CONFIG_PPC_4K_PAGES) diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index e88368354e49..b119bdd6ed27 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -264,19 +264,20 @@ static inline bool early_radix_enabled(void) #define MMU_PAGE_64K 2 #define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ #define MMU_PAGE_256K 4 -#define MMU_PAGE_1M 5 -#define MMU_PAGE_2M 6 -#define MMU_PAGE_4M 7 -#define MMU_PAGE_8M 8 -#define MMU_PAGE_16M 9 -#define MMU_PAGE_64M 10 -#define MMU_PAGE_256M 11 -#define MMU_PAGE_1G 12 -#define MMU_PAGE_16G 13 -#define MMU_PAGE_64G 14 +#define MMU_PAGE_512K 5 +#define MMU_PAGE_1M 6 +#define MMU_PAGE_2M 7 +#define MMU_PAGE_4M 8 +#define MMU_PAGE_8M 9 +#define MMU_PAGE_16M 10 +#define MMU_PAGE_64M 11 +#define MMU_PAGE_256M 12 +#define MMU_PAGE_1G 13 +#define MMU_PAGE_16G 14 +#define MMU_PAGE_64G 15 /* N.B. we need to change the type of hpte_page_sizes if this gets to be > 16 */ -#define MMU_PAGE_COUNT 15 +#define MMU_PAGE_COUNT 16 #ifdef CONFIG_PPC_BOOK3S_64 #include diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index 3742b1919661..b4df2734c078 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -49,6 +49,7 @@ #define _PMD_BAD 0x0ff0 #define _PMD_PAGE_MASK 0x000c #define _PMD_PAGE_8M 0x000c +#define _PMD_PAGE_512K 0x0004 /* Until my rework is finished, 8xx still needs atomic PTE updates */ #define PTE_ATOMIC_UPDATES 1 diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 1263c22d60d8..172849727054 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -226,7 +226,11 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #ifdef CONFIG_HUGETLB_PAGE static inline int hugepd_ok(hugepd_t hpd) { +#ifdef CONFIG_PPC_8xx + return ((hpd.pd & 0x4) != 0); +#else return (hpd.pd > 0); +#endif } static inline int pmd_huge(pmd_t pmd) diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h index 0197e12f7d48..1f1636124a04 100644 --- a/arch/powerpc/include/asm/reg_8xx.h +++ b/arch/powerpc/include/asm/reg_8xx.h @@ -4,7 +4,7 @@ #ifndef _ASM_POWERPC_REG_8xx_H #define _ASM_POWERPC_REG_8xx_H -#include +#include /* Cache control on the MPC8xx is provided through some additional * special purpose registers. diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index fb133a163263..1a9c99d3e5d8 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -73,6 +73,9 @@ #define RPN_PATTERN 0x00f0 #endif +#define PAGE_SHIFT_512K 19 +#define PAGE_SHIFT_8M 23 + __HEAD _ENTRY(_stext); _ENTRY(_start); @@ -322,7 +325,7 @@ SystemCall: #endif InstructionTLBMiss: -#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) +#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE) mtspr SPRN_SPRG_SCRATCH2, r3 #endif EXCEPTION_PROLOG_0 @@ -332,10 +335,12 @@ InstructionTLBMiss: */ mfspr r10, SPRN_SRR0 /* Get effective address of fault */ INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10) -#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) /* Only modules will cause ITLB Misses as we always * pin the first 8MB of kernel memory */ +#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE) mfcr r3 +#endif +#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) IS_KERNEL(r11, r10) #endif mfspr r11, SPRN_M_TW /* Get level 1 table */ @@ -343,7 +348,6 @@ InstructionTLBMiss: BRANCH_UNLESS_KERNEL(3f) lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha 3: - mtcr r3 #endif /* Insert level 1 index */ rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 @@ -351,14 +355,25 @@ InstructionTLBMiss: /* Extract level 2 index */ rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 +#ifdef CONFIG_HUGETLB_PAGE + mtcr r11 + bt- 28, 10f /* bit 28 = Large page (8M) */ + bt- 29, 20f /* bit 29 = Large page (8M or 512k) */ +#endif rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */ lwz r10, 0(r10) /* Get the pte */ - +4: +#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE) + mtcr r3 +#endif /* Insert the APG into the TWC from the Linux PTE. */ rlwimi r11, r10, 0, 25, 26 /* Load the MI_TWC with the attributes for this "segment." */ MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */ +#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES) + rlwimi r10, r11, 1, MI_SPS16K +#endif #ifdef CONFIG_SWAP rlwinm r11, r10, 32-5, _PAGE_PRESENT and r11, r11, r10 @@ -371,16 +386,45 @@ InstructionTLBMiss: * set. All other Linux PTE bits control the behavior * of the MMU. */ +#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES) + rlwimi r10, r11, 0, 0x0ff0 /* Set 24-27, clear 20-23 */ +#else rlwimi r10, r11, 0, 0x0ff8 /* Set 24-27, clear 20-23,28 */ +#endif MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */ /* Restore registers */ -#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) +#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE) mfspr r3, SPRN_SPRG_SCRATCH2 #endif EXCEPTION_EPILOG_0 rfi +#ifdef CONFIG_HUGETLB_PAGE +10: /* 8M pages */ +#ifdef CONFIG_PPC_16K_PAGES + /* Extract level 2 index */ + rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29 + /* Add level 2 base */ + rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1 +#else + /* Level 2 base */ + rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK +#endif + lwz r10, 0(r10) /* Get the pte */ + rlwinm r11, r11, 0, 0xf + b 4b + +20: /* 512k pages */ + /* Extract level 2 index */ + rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29 + /* Add level 2 base */ + rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1 + lwz r10, 0(r10) /* Get the pte */ + rlwinm r11, r11, 0, 0xf + b 4b +#endif + . = 0x1200 DataStoreTLBMiss: mtspr SPRN_SPRG_SCRATCH2, r3 @@ -407,7 +451,6 @@ _ENTRY(DTLBMiss_jmp) #endif blt cr7, DTLBMissLinear 3: - mtcr r3 mfspr r10, SPRN_MD_EPN /* Insert level 1 index */ @@ -418,8 +461,15 @@ _ENTRY(DTLBMiss_jmp) */ /* Extract level 2 index */ rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 +#ifdef CONFIG_HUGETLB_PAGE + mtcr r11 + bt- 28, 10f /* bit 28 = Large page (8M) */ + bt- 29, 20f /* bit 29 = Large page (8M or 512k) */ +#endif rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */ lwz r10, 0(r10) /* Get the pte */ +4: + mtcr r3 /* Insert the Guarded flag and APG into the TWC from the Linux PTE. * It is bit 26-27 of both the Linux PTE and the TWC (at least @@ -434,6 +484,11 @@ _ENTRY(DTLBMiss_jmp) rlwimi r11, r10, 32-5, 30, 30 MTSPR_CPU6(SPRN_MD_TWC, r11, r3) + /* In 4k pages mode, SPS (bit 28) in RPN must match PS[1] (bit 29) + * In 16k pages mode, SPS is always 1 */ +#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES) + rlwimi r10, r11, 1, MD_SPS16K +#endif /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. * We also need to know if the insn is a load/store, so: * Clear _PAGE_PRESENT and load that which will @@ -455,7 +510,11 @@ _ENTRY(DTLBMiss_jmp) * of the MMU. */ li r11, RPN_PATTERN +#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES) + rlwimi r10, r11, 0, 24, 27 /* Set 24-27 */ +#else rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ +#endif rlwimi r10, r11, 0, 20, 20 /* clear 20 */ MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */ @@ -465,6 +524,30 @@ _ENTRY(DTLBMiss_jmp) EXCEPTION_EPILOG_0 rfi +#ifdef CONFIG_HUGETLB_PAGE +10: /* 8M pages */ + /* Extract level 2 index */ +#ifdef CONFIG_PPC_16K_PAGES + rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29 + /* Add level 2 base */ + rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1 +#else + /* Level 2 base */ + rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK +#endif + lwz r10, 0(r10) /* Get the pte */ + rlwinm r11, r11, 0, 0xf + b 4b + +20: /* 512k pages */ + /* Extract level 2 index */ + rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29 + /* Add level 2 base */ + rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1 + lwz r10, 0(r10) /* Get the pte */ + rlwinm r11, r11, 0, 0xf + b 4b +#endif /* This is an instruction TLB error on the MPC8xx. This could be due * to many reasons, such as executing guarded memory or illegal instruction @@ -586,6 +669,9 @@ _ENTRY(FixupDAR_cmp) /* Insert level 1 index */ 3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ + mtcr r11 + bt 28,200f /* bit 28 = Large page (8M) */ + bt 29,202f /* bit 29 = Large page (8M or 512K) */ rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */ /* Insert level 2 index */ rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 @@ -611,6 +697,27 @@ _ENTRY(FixupDAR_cmp) 141: mfspr r10,SPRN_SPRG_SCRATCH2 b DARFixed /* Nope, go back to normal TLB processing */ + /* concat physical page address(r11) and page offset(r10) */ +200: +#ifdef CONFIG_PPC_16K_PAGES + rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1 + rlwimi r11, r10, 32 - (PAGE_SHIFT_8M - 2), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29 +#else + rlwinm r11, r10, 0, ~HUGEPD_SHIFT_MASK +#endif + lwz r11, 0(r11) /* Get the pte */ + /* concat physical page address(r11) and page offset(r10) */ + rlwimi r11, r10, 0, 32 - PAGE_SHIFT_8M, 31 + b 201b + +202: + rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1 + rlwimi r11, r10, 32 - (PAGE_SHIFT_512K - 2), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29 + lwz r11, 0(r11) /* Get the pte */ + /* concat physical page address(r11) and page offset(r10) */ + rlwimi r11, r10, 0, 32 - PAGE_SHIFT_512K, 31 + b 201b + 144: mfspr r10, SPRN_DSISR rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */ mtspr SPRN_DSISR, r10 diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 53245aa00e22..289df38fb7e0 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -26,6 +26,8 @@ #ifdef CONFIG_HUGETLB_PAGE #define PAGE_SHIFT_64K 16 +#define PAGE_SHIFT_512K 19 +#define PAGE_SHIFT_8M 23 #define PAGE_SHIFT_16M 24 #define PAGE_SHIFT_16G 34 @@ -38,7 +40,7 @@ unsigned int HPAGE_SHIFT; * implementations may have more than one gpage size, so we need multiple * arrays */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) #define MAX_NUMBER_GPAGES 128 struct psize_gpages { u64 gpage_list[MAX_NUMBER_GPAGES]; @@ -105,6 +107,11 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, #ifdef CONFIG_PPC_BOOK3S_64 hpdp->pd = __pa(new) | (shift_to_mmu_psize(pshift) << 2); +#elif defined(CONFIG_PPC_8xx) + hpdp->pd = __pa(new) | + (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : + _PMD_PAGE_512K) | + _PMD_PRESENT; #else /* We use the old format for PPC_FSL_BOOK3E */ hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; @@ -124,7 +131,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, * These macros define how to determine which level of the page table holds * the hpdp. */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) #define HUGEPD_PGD_SHIFT PGDIR_SHIFT #define HUGEPD_PUD_SHIFT PUD_SHIFT #else @@ -200,7 +207,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz return hugepte_offset(*hpdp, addr, pdshift); } -#ifdef CONFIG_PPC_FSL_BOOK3E +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) /* Build list of addresses of gigantic pages. This function is used in early * boot before the buddy allocator is setup. */ @@ -366,7 +373,7 @@ int alloc_bootmem_huge_page(struct hstate *hstate) } #endif -#ifdef CONFIG_PPC_FSL_BOOK3E +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) #define HUGEPD_FREELIST_SIZE \ ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) @@ -735,10 +742,10 @@ static int __init add_huge_page_size(unsigned long long size) * that it fits within pagetable and slice limits. */ if (size <= PAGE_SIZE) return -EINVAL; -#ifdef CONFIG_PPC_FSL_BOOK3E +#if defined(CONFIG_PPC_FSL_BOOK3E) if (!is_power_of_4(size)) return -EINVAL; -#else +#elif !defined(CONFIG_PPC_8xx) if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT)) return -EINVAL; #endif @@ -777,7 +784,7 @@ static int __init hugetlbpage_init(void) { int psize; -#if !defined(CONFIG_PPC_FSL_BOOK3E) +#if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx) if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE)) return -ENODEV; #endif @@ -809,7 +816,7 @@ static int __init hugetlbpage_init(void) panic("hugetlbpage_init(): could not create " "pgtable cache for %d bit pagesize\n", shift); } -#ifdef CONFIG_PPC_FSL_BOOK3E +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) else if (!hugepte_cache) { /* * Create a kmem cache for hugeptes. The bottom bits in @@ -828,10 +835,12 @@ static int __init hugetlbpage_init(void) #endif } -#ifdef CONFIG_PPC_FSL_BOOK3E - /* Default hpage size = 4M */ +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) + /* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */ if (mmu_psize_defs[MMU_PAGE_4M].shift) HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift; + else if (mmu_psize_defs[MMU_PAGE_512K].shift) + HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift; #else /* Set default large page size. Currently, we pick 16M or 1M * depending on what is available diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 050badc0ebd3..ba28fcb98597 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -53,7 +53,7 @@ * other sizes not listed here. The .ind field is only used on MMUs that have * indirect page table entries. */ -#ifdef CONFIG_PPC_BOOK3E_MMU +#if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx) #ifdef CONFIG_PPC_FSL_BOOK3E struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { @@ -85,6 +85,25 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { .enc = BOOK3E_PAGESZ_1GB, }, }; +#elif defined(CONFIG_PPC_8xx) +struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { + /* we only manage 4k and 16k pages as normal pages */ +#ifdef CONFIG_PPC_4K_PAGES + [MMU_PAGE_4K] = { + .shift = 12, + }, +#else + [MMU_PAGE_16K] = { + .shift = 14, + }, +#endif + [MMU_PAGE_512K] = { + .shift = 19, + }, + [MMU_PAGE_8M] = { + .shift = 23, + }, +}; #else struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index 564d99bb2a26..80cbcb0ad9b1 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -130,6 +130,7 @@ config 8xx_CPU6 config 8xx_CPU15 bool "CPU15 Silicon Errata" + depends on !HUGETLB_PAGE default y help This enables a workaround for erratum CPU15 on MPC8xx chips. diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index ca2da30ad2ab..6e89e5a8d4fb 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -34,6 +34,7 @@ config PPC_8xx select FSL_SOC select 8xx select PPC_LIB_RHEAP + select SYS_SUPPORTS_HUGETLBFS config 40x bool "AMCC 40x" -- cgit From ec5a0171c78db604d8744095554d930efff67b69 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Sun, 4 Dec 2016 13:47:28 +0100 Subject: powerpc/fsl-lbc: use DEFINE_SPINLOCK() Signed-off-by: Fabian Frederick Signed-off-by: Scott Wood --- arch/powerpc/sysdev/fsl_lbc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c index 424b67fdb57f..5340a483cf55 100644 --- a/arch/powerpc/sysdev/fsl_lbc.c +++ b/arch/powerpc/sysdev/fsl_lbc.c @@ -31,7 +31,7 @@ #include #include -static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock); +static DEFINE_SPINLOCK(fsl_lbc_lock); struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev; EXPORT_SYMBOL(fsl_lbc_ctrl_dev); -- cgit From 39e7ac1bcf0fd1e2a607a35c30ebba4012a12d90 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Sun, 4 Dec 2016 13:44:59 +0100 Subject: soc/fsl/qman: test: use DEFINE_SPINLOCK() Signed-off-by: Fabian Frederick Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/qman_test_stash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c index b9795f39bcc8..e87b65403b67 100644 --- a/drivers/soc/fsl/qbman/qman_test_stash.c +++ b/drivers/soc/fsl/qbman/qman_test_stash.c @@ -175,7 +175,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); /* links together the hp_cpu structs, in first-come first-serve order. */ static LIST_HEAD(hp_cpu_list); -static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); +static DEFINE_SPINLOCK(hp_lock); static unsigned int hp_cpu_list_length; -- cgit From 056f96577c83c38bc22a6b384a388112f130244d Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Wed, 7 Dec 2016 17:14:53 +0200 Subject: powerpc/fsl/dts: add QMan and BMan nodes on t1023 Signed-off-by: Madalin Bucur Signed-off-by: Scott Wood --- arch/powerpc/boot/dts/fsl/t1023rdb.dts | 29 ++++++++ arch/powerpc/boot/dts/fsl/t1023si-post.dtsi | 103 ++++++++++++++++++++++++++++ 2 files changed, 132 insertions(+) diff --git a/arch/powerpc/boot/dts/fsl/t1023rdb.dts b/arch/powerpc/boot/dts/fsl/t1023rdb.dts index 29757623e5ba..5ba6fbfca274 100644 --- a/arch/powerpc/boot/dts/fsl/t1023rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1023rdb.dts @@ -41,6 +41,27 @@ #size-cells = <2>; interrupt-parent = <&mpic>; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + bman_fbpr: bman-fbpr { + size = <0 0x1000000>; + alignment = <0 0x1000000>; + }; + + qman_fqd: qman-fqd { + size = <0 0x400000>; + alignment = <0 0x400000>; + }; + + qman_pfdr: qman-pfdr { + size = <0 0x2000000>; + alignment = <0 0x2000000>; + }; + }; + ifc: localbus@ffe124000 { reg = <0xf 0xfe124000 0 0x2000>; ranges = <0 0 0xf 0xe8000000 0x08000000 @@ -72,6 +93,14 @@ ranges = <0x00000000 0xf 0x00000000 0x01072000>; }; + bportals: bman-portals@ff4000000 { + ranges = <0x0 0xf 0xf4000000 0x2000000>; + }; + + qportals: qman-portals@ff6000000 { + ranges = <0x0 0xf 0xf6000000 0x2000000>; + }; + soc: soc@ffe000000 { ranges = <0x00000000 0xf 0xfe000000 0x1000000>; reg = <0xf 0xfe000000 0 0x00001000>; diff --git a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi index 6e0b4892a740..da2894c59479 100644 --- a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi @@ -34,6 +34,21 @@ #include +&bman_fbpr { + compatible = "fsl,bman-fbpr"; + alloc-ranges = <0 0 0x10000 0>; +}; + +&qman_fqd { + compatible = "fsl,qman-fqd"; + alloc-ranges = <0 0 0x10000 0>; +}; + +&qman_pfdr { + compatible = "fsl,qman-pfdr"; + alloc-ranges = <0 0 0x10000 0>; +}; + &ifc { #address-cells = <2>; #size-cells = <1>; @@ -180,6 +195,92 @@ }; }; +&bportals { + #address-cells = <0x1>; + #size-cells = <0x1>; + compatible = "simple-bus"; + + bman-portal@0 { + cell-index = <0x0>; + compatible = "fsl,bman-portal"; + reg = <0x0 0x4000>, <0x1000000 0x1000>; + interrupts = <105 2 0 0>; + }; + bman-portal@4000 { + cell-index = <0x1>; + compatible = "fsl,bman-portal"; + reg = <0x4000 0x4000>, <0x1001000 0x1000>; + interrupts = <107 2 0 0>; + }; + bman-portal@8000 { + cell-index = <2>; + compatible = "fsl,bman-portal"; + reg = <0x8000 0x4000>, <0x1002000 0x1000>; + interrupts = <109 2 0 0>; + }; + bman-portal@c000 { + cell-index = <0x3>; + compatible = "fsl,bman-portal"; + reg = <0xc000 0x4000>, <0x1003000 0x1000>; + interrupts = <111 2 0 0>; + }; + bman-portal@10000 { + cell-index = <0x4>; + compatible = "fsl,bman-portal"; + reg = <0x10000 0x4000>, <0x1004000 0x1000>; + interrupts = <113 2 0 0>; + }; + bman-portal@14000 { + cell-index = <0x5>; + compatible = "fsl,bman-portal"; + reg = <0x14000 0x4000>, <0x1005000 0x1000>; + interrupts = <115 2 0 0>; + }; +}; + +&qportals { + #address-cells = <0x1>; + #size-cells = <0x1>; + compatible = "simple-bus"; + + qportal0: qman-portal@0 { + compatible = "fsl,qman-portal"; + reg = <0x0 0x4000>, <0x1000000 0x1000>; + interrupts = <104 0x2 0 0>; + cell-index = <0x0>; + }; + qportal1: qman-portal@4000 { + compatible = "fsl,qman-portal"; + reg = <0x4000 0x4000>, <0x1001000 0x1000>; + interrupts = <106 0x2 0 0>; + cell-index = <0x1>; + }; + qportal2: qman-portal@8000 { + compatible = "fsl,qman-portal"; + reg = <0x8000 0x4000>, <0x1002000 0x1000>; + interrupts = <108 0x2 0 0>; + cell-index = <0x2>; + }; + qportal3: qman-portal@c000 { + compatible = "fsl,qman-portal"; + reg = <0xc000 0x4000>, <0x1003000 0x1000>; + interrupts = <110 0x2 0 0>; + cell-index = <0x3>; + }; + qportal4: qman-portal@10000 { + compatible = "fsl,qman-portal"; + reg = <0x10000 0x4000>, <0x1004000 0x1000>; + interrupts = <112 0x2 0 0>; + cell-index = <0x4>; + }; + qportal5: qman-portal@14000 { + compatible = "fsl,qman-portal"; + reg = <0x14000 0x4000>, <0x1005000 0x1000>; + interrupts = <114 0x2 0 0>; + cell-index = <0x5>; + }; +}; + &soc { #address-cells = <1>; #size-cells = <1>; @@ -413,6 +514,8 @@ }; /include/ "qoriq-sec5.0-0.dtsi" +/include/ "qoriq-qman3.dtsi" +/include/ "qoriq-bman1.dtsi" /include/ "qoriq-fman3l-0.dtsi" /include/ "qoriq-fman3-0-10g-0-best-effort.dtsi" -- cgit From 138bde87876c10fedf8756f55a775246fe0d3f7e Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Wed, 7 Dec 2016 17:14:54 +0200 Subject: powerpc/fsl/dts: add QMan and BMan nodes on t1024 Signed-off-by: Madalin Bucur Signed-off-by: Scott Wood --- arch/powerpc/boot/dts/fsl/t1024qds.dts | 29 +++++++++++++++++++++++++++++ arch/powerpc/boot/dts/fsl/t1024rdb.dts | 29 +++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/arch/powerpc/boot/dts/fsl/t1024qds.dts b/arch/powerpc/boot/dts/fsl/t1024qds.dts index 772143da367f..d6858b7cd93f 100644 --- a/arch/powerpc/boot/dts/fsl/t1024qds.dts +++ b/arch/powerpc/boot/dts/fsl/t1024qds.dts @@ -41,6 +41,27 @@ #size-cells = <2>; interrupt-parent = <&mpic>; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + bman_fbpr: bman-fbpr { + size = <0 0x1000000>; + alignment = <0 0x1000000>; + }; + + qman_fqd: qman-fqd { + size = <0 0x400000>; + alignment = <0 0x400000>; + }; + + qman_pfdr: qman-pfdr { + size = <0 0x2000000>; + alignment = <0 0x2000000>; + }; + }; + ifc: localbus@ffe124000 { reg = <0xf 0xfe124000 0 0x2000>; ranges = <0 0 0xf 0xe8000000 0x08000000 @@ -80,6 +101,14 @@ ranges = <0x00000000 0xf 0x00000000 0x01072000>; }; + bportals: bman-portals@ff4000000 { + ranges = <0x0 0xf 0xf4000000 0x2000000>; + }; + + qportals: qman-portals@ff6000000 { + ranges = <0x0 0xf 0xf6000000 0x2000000>; + }; + soc: soc@ffe000000 { ranges = <0x00000000 0xf 0xfe000000 0x1000000>; reg = <0xf 0xfe000000 0 0x00001000>; diff --git a/arch/powerpc/boot/dts/fsl/t1024rdb.dts b/arch/powerpc/boot/dts/fsl/t1024rdb.dts index 302cdd22b4bb..c7f4f62afaf4 100644 --- a/arch/powerpc/boot/dts/fsl/t1024rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1024rdb.dts @@ -41,6 +41,27 @@ #size-cells = <2>; interrupt-parent = <&mpic>; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + bman_fbpr: bman-fbpr { + size = <0 0x1000000>; + alignment = <0 0x1000000>; + }; + + qman_fqd: qman-fqd { + size = <0 0x400000>; + alignment = <0 0x400000>; + }; + + qman_pfdr: qman-pfdr { + size = <0 0x2000000>; + alignment = <0 0x2000000>; + }; + }; + ifc: localbus@ffe124000 { reg = <0xf 0xfe124000 0 0x2000>; ranges = <0 0 0xf 0xe8000000 0x08000000 @@ -82,6 +103,14 @@ ranges = <0x00000000 0xf 0x00000000 0x01072000>; }; + bportals: bman-portals@ff4000000 { + ranges = <0x0 0xf 0xf4000000 0x2000000>; + }; + + qportals: qman-portals@ff6000000 { + ranges = <0x0 0xf 0xf6000000 0x2000000>; + }; + soc: soc@ffe000000 { ranges = <0x00000000 0xf 0xfe000000 0x1000000>; reg = <0xf 0xfe000000 0 0x00001000>; -- cgit From f93ace2b22e993bafca117a295a42a7774bff4a4 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Wed, 7 Dec 2016 17:14:55 +0200 Subject: powerpc/fsl/dts: add sg_2500_aqr105_phy4 alias on t1024rdb The alias is used by the boot loader to perform a device tree fixup. Signed-off-by: Madalin Bucur Signed-off-by: Scott Wood --- arch/powerpc/boot/dts/fsl/t1024rdb.dts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/boot/dts/fsl/t1024rdb.dts b/arch/powerpc/boot/dts/fsl/t1024rdb.dts index c7f4f62afaf4..73a645324bc1 100644 --- a/arch/powerpc/boot/dts/fsl/t1024rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1024rdb.dts @@ -41,6 +41,10 @@ #size-cells = <2>; interrupt-parent = <&mpic>; + aliases { + sg_2500_aqr105_phy4 = &sg_2500_aqr105_phy4; + }; + reserved-memory { #address-cells = <2>; #size-cells = <2>; -- cgit From baae856ebdeeaefbadd4a02cdb54b7c2277ff4dd Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Wed, 7 Dec 2016 17:14:56 +0200 Subject: powerpc/fsl/dts: add FMan node for t1042d4rdb Signed-off-by: Madalin Bucur Signed-off-by: Scott Wood --- arch/powerpc/boot/dts/fsl/t1042d4rdb.dts | 52 ++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts b/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts index 2a5a90dd272e..fcd2aeb5b8ac 100644 --- a/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts @@ -48,6 +48,58 @@ "fsl,deepsleep-cpld"; }; }; + + soc: soc@ffe000000 { + fman0: fman@400000 { + ethernet@e0000 { + phy-handle = <&phy_sgmii_0>; + phy-connection-type = "sgmii"; + }; + + ethernet@e2000 { + phy-handle = <&phy_sgmii_1>; + phy-connection-type = "sgmii"; + }; + + ethernet@e4000 { + phy-handle = <&phy_sgmii_2>; + phy-connection-type = "sgmii"; + }; + + ethernet@e6000 { + phy-handle = <&phy_rgmii_0>; + phy-connection-type = "rgmii"; + }; + + ethernet@e8000 { + phy-handle = <&phy_rgmii_1>; + phy-connection-type = "rgmii"; + }; + + mdio0: mdio@fc000 { + phy_sgmii_0: ethernet-phy@02 { + reg = <0x02>; + }; + + phy_sgmii_1: ethernet-phy@03 { + reg = <0x03>; + }; + + phy_sgmii_2: ethernet-phy@01 { + reg = <0x01>; + }; + + phy_rgmii_0: ethernet-phy@04 { + reg = <0x04>; + }; + + phy_rgmii_1: ethernet-phy@05 { + reg = <0x05>; + }; + }; + }; + }; + }; #include "t1042si-post.dtsi" -- cgit