diff options
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/Kconfig | 1 | ||||
-rw-r--r-- | arch/mips/include/asm/checksum.h | 68 | ||||
-rw-r--r-- | arch/mips/include/asm/compat.h | 2 | ||||
-rw-r--r-- | arch/mips/kernel/kprobes.c | 54 | ||||
-rw-r--r-- | arch/mips/kernel/syscalls/syscall_n32.tbl | 12 | ||||
-rw-r--r-- | arch/mips/kernel/syscalls/syscall_o32.tbl | 12 | ||||
-rw-r--r-- | arch/mips/kernel/vmlinux.lds.S | 1 | ||||
-rw-r--r-- | arch/mips/lib/csum_partial.S | 261 |
8 files changed, 117 insertions, 294 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 8f328298f8cc..cff19225da3d 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -86,6 +86,7 @@ config MIPS select MODULES_USE_ELF_REL if MODULES select MODULES_USE_ELF_RELA if MODULES && 64BIT select PERF_USE_VMALLOC + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI select RTC_LIB select SYSCTL_EXCEPTION_TRACE select VIRT_TO_BUS diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index 181f7d14efb9..5f80c28f5253 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -34,42 +34,17 @@ */ __wsum csum_partial(const void *buff, int len, __wsum sum); -__wsum __csum_partial_copy_kernel(const void *src, void *dst, - int len, __wsum sum, int *err_ptr); - -__wsum __csum_partial_copy_from_user(const void *src, void *dst, - int len, __wsum sum, int *err_ptr); -__wsum __csum_partial_copy_to_user(const void *src, void *dst, - int len, __wsum sum, int *err_ptr); -/* - * this is a new version of the above that records errors it finds in *errp, - * but continues and zeros the rest of the buffer. - */ -static inline -__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, - __wsum sum, int *err_ptr) -{ - might_fault(); - if (uaccess_kernel()) - return __csum_partial_copy_kernel((__force void *)src, dst, - len, sum, err_ptr); - else - return __csum_partial_copy_from_user((__force void *)src, dst, - len, sum, err_ptr); -} +__wsum __csum_partial_copy_from_user(const void __user *src, void *dst, int len); +__wsum __csum_partial_copy_to_user(const void *src, void __user *dst, int len); #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER static inline -__wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len) { - if (access_ok(src, len)) - return csum_partial_copy_from_user(src, dst, len, sum, - err_ptr); - if (len) - *err_ptr = -EFAULT; - - return sum; + might_fault(); + if (!access_ok(src, len)) + return 0; + return __csum_partial_copy_from_user(src, dst, len); } /* @@ -77,33 +52,24 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst, */ #define HAVE_CSUM_COPY_USER static inline -__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, - __wsum sum, int *err_ptr) +__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len) { might_fault(); - if (access_ok(dst, len)) { - if (uaccess_kernel()) - return __csum_partial_copy_kernel(src, - (__force void *)dst, - len, sum, err_ptr); - else - return __csum_partial_copy_to_user(src, - (__force void *)dst, - len, sum, err_ptr); - } - if (len) - *err_ptr = -EFAULT; - - return (__force __wsum)-1; /* invalid checksum */ + if (!access_ok(dst, len)) + return 0; + return __csum_partial_copy_to_user(src, dst, len); } /* * the same as csum_partial, but copies from user space (but on MIPS * we have just one address space, so this is identical to the above) */ -__wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum); -#define csum_partial_copy_nocheck csum_partial_copy_nocheck +#define _HAVE_ARCH_CSUM_AND_COPY +__wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len); +static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) +{ + return __csum_partial_copy_nocheck(src, dst, len); +} /* * Fold a partial checksum without adding pseudo headers diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 255afcdd79c9..65975712a22d 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -26,8 +26,6 @@ typedef s32 compat_caddr_t; typedef struct { s32 val[2]; } compat_fsid_t; -typedef s64 compat_s64; -typedef u64 compat_u64; struct compat_stat { compat_dev_t st_dev; diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index d043c2f897fc..54dfba8fa77c 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -477,6 +477,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *) regs->regs[31]; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->regs[31] = (unsigned long)kretprobe_trampoline; @@ -488,57 +489,8 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - instruction_pointer(regs) = orig_ret_address; - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } + instruction_pointer(regs) = __kretprobe_trampoline_handler(regs, + kretprobe_trampoline, NULL); /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index f9df9edb67a4..cf72a0206a87 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -25,8 +25,8 @@ 15 n32 ioctl compat_sys_ioctl 16 n32 pread64 sys_pread64 17 n32 pwrite64 sys_pwrite64 -18 n32 readv compat_sys_readv -19 n32 writev compat_sys_writev +18 n32 readv sys_readv +19 n32 writev sys_writev 20 n32 access sys_access 21 n32 pipe sysm_pipe 22 n32 _newselect compat_sys_select @@ -167,7 +167,7 @@ 157 n32 sync sys_sync 158 n32 acct sys_acct 159 n32 settimeofday compat_sys_settimeofday -160 n32 mount compat_sys_mount +160 n32 mount sys_mount 161 n32 umount2 sys_umount 162 n32 swapon sys_swapon 163 n32 swapoff sys_swapoff @@ -278,7 +278,7 @@ 267 n32 splice sys_splice 268 n32 sync_file_range sys_sync_file_range 269 n32 tee sys_tee -270 n32 vmsplice compat_sys_vmsplice +270 n32 vmsplice sys_vmsplice 271 n32 move_pages compat_sys_move_pages 272 n32 set_robust_list compat_sys_set_robust_list 273 n32 get_robust_list compat_sys_get_robust_list @@ -317,8 +317,8 @@ 306 n32 syncfs sys_syncfs 307 n32 sendmmsg compat_sys_sendmmsg 308 n32 setns sys_setns -309 n32 process_vm_readv compat_sys_process_vm_readv -310 n32 process_vm_writev compat_sys_process_vm_writev +309 n32 process_vm_readv sys_process_vm_readv +310 n32 process_vm_writev sys_process_vm_writev 311 n32 kcmp sys_kcmp 312 n32 finit_module sys_finit_module 313 n32 sched_setattr sys_sched_setattr diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 195b43cf27c8..a17aab5abeb2 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -29,7 +29,7 @@ 18 o32 unused18 sys_ni_syscall 19 o32 lseek sys_lseek 20 o32 getpid sys_getpid -21 o32 mount sys_mount compat_sys_mount +21 o32 mount sys_mount 22 o32 umount sys_oldumount 23 o32 setuid sys_setuid 24 o32 getuid sys_getuid @@ -156,8 +156,8 @@ 142 o32 _newselect sys_select compat_sys_select 143 o32 flock sys_flock 144 o32 msync sys_msync -145 o32 readv sys_readv compat_sys_readv -146 o32 writev sys_writev compat_sys_writev +145 o32 readv sys_readv +146 o32 writev sys_writev 147 o32 cacheflush sys_cacheflush 148 o32 cachectl sys_cachectl 149 o32 sysmips __sys_sysmips @@ -318,7 +318,7 @@ 304 o32 splice sys_splice 305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range 306 o32 tee sys_tee -307 o32 vmsplice sys_vmsplice compat_sys_vmsplice +307 o32 vmsplice sys_vmsplice 308 o32 move_pages sys_move_pages compat_sys_move_pages 309 o32 set_robust_list sys_set_robust_list compat_sys_set_robust_list 310 o32 get_robust_list sys_get_robust_list compat_sys_get_robust_list @@ -356,8 +356,8 @@ 342 o32 syncfs sys_syncfs 343 o32 sendmmsg sys_sendmmsg compat_sys_sendmmsg 344 o32 setns sys_setns -345 o32 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv -346 o32 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +345 o32 process_vm_readv sys_process_vm_readv +346 o32 process_vm_writev sys_process_vm_writev 347 o32 kcmp sys_kcmp 348 o32 finit_module sys_finit_module 349 o32 sched_setattr sys_sched_setattr diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index f185a85a27c1..5e97e9d02f98 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -202,6 +202,7 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + ELF_DETAILS /* These must appear regardless of . */ .gptab.sdata : { diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 87fda0713b84..a46db0807195 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -308,8 +308,8 @@ EXPORT_SYMBOL(csum_partial) /* * checksum and copy routines based on memcpy.S * - * csum_partial_copy_nocheck(src, dst, len, sum) - * __csum_partial_copy_kernel(src, dst, len, sum, errp) + * csum_partial_copy_nocheck(src, dst, len) + * __csum_partial_copy_kernel(src, dst, len) * * See "Spec" in memcpy.S for details. Unlike __copy_user, all * function in this file use the standard calling convention. @@ -318,26 +318,11 @@ EXPORT_SYMBOL(csum_partial) #define src a0 #define dst a1 #define len a2 -#define psum a3 #define sum v0 #define odd t8 -#define errptr t9 /* - * The exception handler for loads requires that: - * 1- AT contain the address of the byte just past the end of the source - * of the copy, - * 2- src_entry <= src < AT, and - * 3- (dst - src) == (dst_entry - src_entry), - * The _entry suffix denotes values when __copy_user was called. - * - * (1) is set up up by __csum_partial_copy_from_user and maintained by - * not writing AT in __csum_partial_copy - * (2) is met by incrementing src by the number of bytes copied - * (3) is met by not doing loads between a pair of increments of dst and src - * - * The exception handlers for stores stores -EFAULT to errptr and return. - * These handlers do not need to overwrite any data. + * All exception handlers simply return 0. */ /* Instruction type */ @@ -358,11 +343,11 @@ EXPORT_SYMBOL(csum_partial) * addr : Address * handler : Exception handler */ -#define EXC(insn, type, reg, addr, handler) \ +#define EXC(insn, type, reg, addr) \ .if \mode == LEGACY_MODE; \ 9: insn reg, addr; \ .section __ex_table,"a"; \ - PTR 9b, handler; \ + PTR 9b, .L_exc; \ .previous; \ /* This is enabled in EVA mode */ \ .else; \ @@ -371,7 +356,7 @@ EXPORT_SYMBOL(csum_partial) ((\to == USEROP) && (type == ST_INSN)); \ 9: __BUILD_EVA_INSN(insn##e, reg, addr); \ .section __ex_table,"a"; \ - PTR 9b, handler; \ + PTR 9b, .L_exc; \ .previous; \ .else; \ /* EVA without exception */ \ @@ -384,14 +369,14 @@ EXPORT_SYMBOL(csum_partial) #ifdef USE_DOUBLE #define LOADK ld /* No exception */ -#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler) -#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler) -#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler) -#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler) -#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) -#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler) -#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler) -#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler) +#define LOAD(reg, addr) EXC(ld, LD_INSN, reg, addr) +#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr) +#define LOADL(reg, addr) EXC(ldl, LD_INSN, reg, addr) +#define LOADR(reg, addr) EXC(ldr, LD_INSN, reg, addr) +#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr) +#define STOREL(reg, addr) EXC(sdl, ST_INSN, reg, addr) +#define STORER(reg, addr) EXC(sdr, ST_INSN, reg, addr) +#define STORE(reg, addr) EXC(sd, ST_INSN, reg, addr) #define ADD daddu #define SUB dsubu #define SRL dsrl @@ -404,14 +389,14 @@ EXPORT_SYMBOL(csum_partial) #else #define LOADK lw /* No exception */ -#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler) -#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler) -#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler) -#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler) -#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) -#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler) -#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler) -#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler) +#define LOAD(reg, addr) EXC(lw, LD_INSN, reg, addr) +#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr) +#define LOADL(reg, addr) EXC(lwl, LD_INSN, reg, addr) +#define LOADR(reg, addr) EXC(lwr, LD_INSN, reg, addr) +#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr) +#define STOREL(reg, addr) EXC(swl, ST_INSN, reg, addr) +#define STORER(reg, addr) EXC(swr, ST_INSN, reg, addr) +#define STORE(reg, addr) EXC(sw, ST_INSN, reg, addr) #define ADD addu #define SUB subu #define SRL srl @@ -450,22 +435,9 @@ EXPORT_SYMBOL(csum_partial) .set at=v1 #endif - .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck + .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to - PTR_ADDU AT, src, len /* See (1) above. */ - /* initialize __nocheck if this the first time we execute this - * macro - */ -#ifdef CONFIG_64BIT - move errptr, a4 -#else - lw errptr, 16(sp) -#endif - .if \__nocheck == 1 - FEXPORT(csum_partial_copy_nocheck) - EXPORT_SYMBOL(csum_partial_copy_nocheck) - .endif - move sum, zero + li sum, -1 move odd, zero /* * Note: dst & src may be unaligned, len may be 0 @@ -497,31 +469,31 @@ EXPORT_SYMBOL(csum_partial) SUB len, 8*NBYTES # subtract here for bgez loop .align 4 1: - LOAD(t0, UNIT(0)(src), .Ll_exc\@) - LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) - LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) - LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) - LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) - LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@) - LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@) - LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@) + LOAD(t0, UNIT(0)(src)) + LOAD(t1, UNIT(1)(src)) + LOAD(t2, UNIT(2)(src)) + LOAD(t3, UNIT(3)(src)) + LOAD(t4, UNIT(4)(src)) + LOAD(t5, UNIT(5)(src)) + LOAD(t6, UNIT(6)(src)) + LOAD(t7, UNIT(7)(src)) SUB len, len, 8*NBYTES ADD src, src, 8*NBYTES - STORE(t0, UNIT(0)(dst), .Ls_exc\@) + STORE(t0, UNIT(0)(dst)) ADDC(t0, t1) - STORE(t1, UNIT(1)(dst), .Ls_exc\@) + STORE(t1, UNIT(1)(dst)) ADDC(sum, t0) - STORE(t2, UNIT(2)(dst), .Ls_exc\@) + STORE(t2, UNIT(2)(dst)) ADDC(t2, t3) - STORE(t3, UNIT(3)(dst), .Ls_exc\@) + STORE(t3, UNIT(3)(dst)) ADDC(sum, t2) - STORE(t4, UNIT(4)(dst), .Ls_exc\@) + STORE(t4, UNIT(4)(dst)) ADDC(t4, t5) - STORE(t5, UNIT(5)(dst), .Ls_exc\@) + STORE(t5, UNIT(5)(dst)) ADDC(sum, t4) - STORE(t6, UNIT(6)(dst), .Ls_exc\@) + STORE(t6, UNIT(6)(dst)) ADDC(t6, t7) - STORE(t7, UNIT(7)(dst), .Ls_exc\@) + STORE(t7, UNIT(7)(dst)) ADDC(sum, t6) .set reorder /* DADDI_WAR */ ADD dst, dst, 8*NBYTES @@ -541,19 +513,19 @@ EXPORT_SYMBOL(csum_partial) /* * len >= 4*NBYTES */ - LOAD(t0, UNIT(0)(src), .Ll_exc\@) - LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) - LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) - LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) + LOAD(t0, UNIT(0)(src)) + LOAD(t1, UNIT(1)(src)) + LOAD(t2, UNIT(2)(src)) + LOAD(t3, UNIT(3)(src)) SUB len, len, 4*NBYTES ADD src, src, 4*NBYTES - STORE(t0, UNIT(0)(dst), .Ls_exc\@) + STORE(t0, UNIT(0)(dst)) ADDC(t0, t1) - STORE(t1, UNIT(1)(dst), .Ls_exc\@) + STORE(t1, UNIT(1)(dst)) ADDC(sum, t0) - STORE(t2, UNIT(2)(dst), .Ls_exc\@) + STORE(t2, UNIT(2)(dst)) ADDC(t2, t3) - STORE(t3, UNIT(3)(dst), .Ls_exc\@) + STORE(t3, UNIT(3)(dst)) ADDC(sum, t2) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES @@ -566,10 +538,10 @@ EXPORT_SYMBOL(csum_partial) beq rem, len, .Lcopy_bytes\@ nop 1: - LOAD(t0, 0(src), .Ll_exc\@) + LOAD(t0, 0(src)) ADD src, src, NBYTES SUB len, len, NBYTES - STORE(t0, 0(dst), .Ls_exc\@) + STORE(t0, 0(dst)) ADDC(sum, t0) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES @@ -592,10 +564,10 @@ EXPORT_SYMBOL(csum_partial) ADD t1, dst, len # t1 is just past last byte of dst li bits, 8*NBYTES SLL rem, len, 3 # rem = number of bits to keep - LOAD(t0, 0(src), .Ll_exc\@) + LOAD(t0, 0(src)) SUB bits, bits, rem # bits = number of bits to discard SHIFT_DISCARD t0, t0, bits - STREST(t0, -1(t1), .Ls_exc\@) + STREST(t0, -1(t1)) SHIFT_DISCARD_REVERT t0, t0, bits .set reorder ADDC(sum, t0) @@ -612,12 +584,12 @@ EXPORT_SYMBOL(csum_partial) * Set match = (src and dst have same alignment) */ #define match rem - LDFIRST(t3, FIRST(0)(src), .Ll_exc\@) + LDFIRST(t3, FIRST(0)(src)) ADD t2, zero, NBYTES - LDREST(t3, REST(0)(src), .Ll_exc_copy\@) + LDREST(t3, REST(0)(src)) SUB t2, t2, t1 # t2 = number of bytes copied xor match, t0, t1 - STFIRST(t3, FIRST(0)(dst), .Ls_exc\@) + STFIRST(t3, FIRST(0)(dst)) SLL t4, t1, 3 # t4 = number of bits to discard SHIFT_DISCARD t3, t3, t4 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ @@ -639,26 +611,26 @@ EXPORT_SYMBOL(csum_partial) * It's OK to load FIRST(N+1) before REST(N) because the two addresses * are to the same unit (unless src is aligned, but it's not). */ - LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) - LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@) + LDFIRST(t0, FIRST(0)(src)) + LDFIRST(t1, FIRST(1)(src)) SUB len, len, 4*NBYTES - LDREST(t0, REST(0)(src), .Ll_exc_copy\@) - LDREST(t1, REST(1)(src), .Ll_exc_copy\@) - LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@) - LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@) - LDREST(t2, REST(2)(src), .Ll_exc_copy\@) - LDREST(t3, REST(3)(src), .Ll_exc_copy\@) + LDREST(t0, REST(0)(src)) + LDREST(t1, REST(1)(src)) + LDFIRST(t2, FIRST(2)(src)) + LDFIRST(t3, FIRST(3)(src)) + LDREST(t2, REST(2)(src)) + LDREST(t3, REST(3)(src)) ADD src, src, 4*NBYTES #ifdef CONFIG_CPU_SB1 nop # improves slotting #endif - STORE(t0, UNIT(0)(dst), .Ls_exc\@) + STORE(t0, UNIT(0)(dst)) ADDC(t0, t1) - STORE(t1, UNIT(1)(dst), .Ls_exc\@) + STORE(t1, UNIT(1)(dst)) ADDC(sum, t0) - STORE(t2, UNIT(2)(dst), .Ls_exc\@) + STORE(t2, UNIT(2)(dst)) ADDC(t2, t3) - STORE(t3, UNIT(3)(dst), .Ls_exc\@) + STORE(t3, UNIT(3)(dst)) ADDC(sum, t2) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES @@ -671,11 +643,11 @@ EXPORT_SYMBOL(csum_partial) beq rem, len, .Lcopy_bytes\@ nop 1: - LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) - LDREST(t0, REST(0)(src), .Ll_exc_copy\@) + LDFIRST(t0, FIRST(0)(src)) + LDREST(t0, REST(0)(src)) ADD src, src, NBYTES SUB len, len, NBYTES - STORE(t0, 0(dst), .Ls_exc\@) + STORE(t0, 0(dst)) ADDC(sum, t0) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES @@ -696,11 +668,10 @@ EXPORT_SYMBOL(csum_partial) #endif move t2, zero # partial word li t3, SHIFT_START # shift -/* use .Ll_exc_copy here to return correct sum on fault */ #define COPY_BYTE(N) \ - LOADBU(t0, N(src), .Ll_exc_copy\@); \ + LOADBU(t0, N(src)); \ SUB len, len, 1; \ - STOREB(t0, N(dst), .Ls_exc\@); \ + STOREB(t0, N(dst)); \ SLLV t0, t0, t3; \ addu t3, SHIFT_INC; \ beqz len, .Lcopy_bytes_done\@; \ @@ -714,9 +685,9 @@ EXPORT_SYMBOL(csum_partial) COPY_BYTE(4) COPY_BYTE(5) #endif - LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@) + LOADBU(t0, NBYTES-2(src)) SUB len, len, 1 - STOREB(t0, NBYTES-2(dst), .Ls_exc\@) + STOREB(t0, NBYTES-2(dst)) SLLV t0, t0, t3 or t2, t0 .Lcopy_bytes_done\@: @@ -753,97 +724,31 @@ EXPORT_SYMBOL(csum_partial) #endif .set pop .set reorder - ADDC32(sum, psum) jr ra .set noreorder + .endm -.Ll_exc_copy\@: - /* - * Copy bytes from src until faulting load address (or until a - * lb faults) - * - * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) - * may be more than a byte beyond the last address. - * Hence, the lb below may get an exception. - * - * Assumes src < THREAD_BUADDR($28) - */ - LOADK t0, TI_TASK($28) - li t2, SHIFT_START - LOADK t0, THREAD_BUADDR(t0) -1: - LOADBU(t1, 0(src), .Ll_exc\@) - ADD src, src, 1 - sb t1, 0(dst) # can't fault -- we're copy_from_user - SLLV t1, t1, t2 - addu t2, SHIFT_INC - ADDC(sum, t1) - .set reorder /* DADDI_WAR */ - ADD dst, dst, 1 - bne src, t0, 1b - .set noreorder -.Ll_exc\@: - LOADK t0, TI_TASK($28) - nop - LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address - nop - SUB len, AT, t0 # len number of uncopied bytes - /* - * Here's where we rely on src and dst being incremented in tandem, - * See (3) above. - * dst += (fault addr - src) to put dst at first byte to clear - */ - ADD dst, t0 # compute start address in a1 - SUB dst, src - /* - * Clear len bytes starting at dst. Can't call __bzero because it - * might modify len. An inefficient loop for these rare times... - */ - .set reorder /* DADDI_WAR */ - SUB src, len, 1 - beqz len, .Ldone\@ - .set noreorder -1: sb zero, 0(dst) - ADD dst, dst, 1 - .set push - .set noat -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS - bnez src, 1b - SUB src, src, 1 -#else - li v1, 1 - bnez src, 1b - SUB src, src, v1 -#endif - li v1, -EFAULT - b .Ldone\@ - sw v1, (errptr) - -.Ls_exc\@: - li v0, -1 /* invalid checksum */ - li v1, -EFAULT + .set noreorder +.L_exc: jr ra - sw v1, (errptr) - .set pop - .endm + li v0, 0 -LEAF(__csum_partial_copy_kernel) -EXPORT_SYMBOL(__csum_partial_copy_kernel) +FEXPORT(__csum_partial_copy_nocheck) +EXPORT_SYMBOL(__csum_partial_copy_nocheck) #ifndef CONFIG_EVA FEXPORT(__csum_partial_copy_to_user) EXPORT_SYMBOL(__csum_partial_copy_to_user) FEXPORT(__csum_partial_copy_from_user) EXPORT_SYMBOL(__csum_partial_copy_from_user) #endif -__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1 -END(__csum_partial_copy_kernel) +__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP #ifdef CONFIG_EVA LEAF(__csum_partial_copy_to_user) -__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0 +__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP END(__csum_partial_copy_to_user) LEAF(__csum_partial_copy_from_user) -__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0 +__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP END(__csum_partial_copy_from_user) #endif |