From 93e2e85139509338c68279c7260ebb68177b23a9 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Sat, 9 Oct 2010 13:58:24 +1000 Subject: microblaze: Separate library optimized functions memcpy/memmove/memset Signed-off-by: Michal Simek --- arch/microblaze/lib/memcpy.c | 13 ++++++++++--- arch/microblaze/lib/memmove.c | 26 ++++++++++++++++++-------- arch/microblaze/lib/memset.c | 22 ++++++++++++++++++---- 3 files changed, 46 insertions(+), 15 deletions(-) (limited to 'arch/microblaze/lib') diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c index 014bac92bdff..ab2d115f9ee5 100644 --- a/arch/microblaze/lib/memcpy.c +++ b/arch/microblaze/lib/memcpy.c @@ -33,17 +33,24 @@ #include #ifdef __HAVE_ARCH_MEMCPY +#ifndef CONFIG_OPT_LIB_FUNCTION void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) { const char *src = v_src; char *dst = v_dst; -#ifndef CONFIG_OPT_LIB_FUNCTION + /* Simple, byte oriented memcpy. */ while (c--) *dst++ = *src++; return v_dst; -#else +} +#else /* CONFIG_OPT_LIB_FUNCTION */ +void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) +{ + const char *src = v_src; + char *dst = v_dst; + /* The following code tries to optimize the copy by using unsigned * alignment. This will work fine if both source and destination are * aligned on the same boundary. However, if they are aligned on @@ -150,7 +157,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) } return v_dst; -#endif } +#endif /* CONFIG_OPT_LIB_FUNCTION */ EXPORT_SYMBOL(memcpy); #endif /* __HAVE_ARCH_MEMCPY */ diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c index 0929198c5e68..1d3c0e7990e5 100644 --- a/arch/microblaze/lib/memmove.c +++ b/arch/microblaze/lib/memmove.c @@ -31,16 +31,12 @@ #include #ifdef __HAVE_ARCH_MEMMOVE +#ifndef CONFIG_OPT_LIB_FUNCTION void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) { const char *src = v_src; char *dst = v_dst; -#ifdef CONFIG_OPT_LIB_FUNCTION - const uint32_t *i_src; - uint32_t *i_dst; -#endif - if (!c) return v_dst; @@ -48,7 +44,6 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) if (v_dst <= v_src) return memcpy(v_dst, v_src, c); -#ifndef CONFIG_OPT_LIB_FUNCTION /* copy backwards, from end to beginning */ src += c; dst += c; @@ -58,7 +53,22 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) *--dst = *--src; return v_dst; -#else +} +#else /* CONFIG_OPT_LIB_FUNCTION */ +void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) +{ + const char *src = v_src; + char *dst = v_dst; + const uint32_t *i_src; + uint32_t *i_dst; + + if (!c) + return v_dst; + + /* Use memcpy when source is higher than dest */ + if (v_dst <= v_src) + return memcpy(v_dst, v_src, c); + /* The following code tries to optimize the copy by using unsigned * alignment. This will work fine if both source and destination are * aligned on the same boundary. However, if they are aligned on @@ -169,7 +179,7 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) *--dst = *--src; } return v_dst; -#endif } +#endif /* CONFIG_OPT_LIB_FUNCTION */ EXPORT_SYMBOL(memmove); #endif /* __HAVE_ARCH_MEMMOVE */ diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c index ecfb663e1fc1..834565d1607e 100644 --- a/arch/microblaze/lib/memset.c +++ b/arch/microblaze/lib/memset.c @@ -31,17 +31,30 @@ #include #ifdef __HAVE_ARCH_MEMSET +#ifndef CONFIG_OPT_LIB_FUNCTION +void *memset(void *v_src, int c, __kernel_size_t n) +{ + char *src = v_src; + + /* Truncate c to 8 bits */ + c = (c & 0xFF); + + /* Simple, byte oriented memset or the rest of count. */ + while (n--) + *src++ = c; + + return v_src; +} +#else /* CONFIG_OPT_LIB_FUNCTION */ void *memset(void *v_src, int c, __kernel_size_t n) { char *src = v_src; -#ifdef CONFIG_OPT_LIB_FUNCTION uint32_t *i_src; uint32_t w32 = 0; -#endif + /* Truncate c to 8 bits */ c = (c & 0xFF); -#ifdef CONFIG_OPT_LIB_FUNCTION if (unlikely(c)) { /* Make a repeating word out of it */ w32 = c; @@ -72,12 +85,13 @@ void *memset(void *v_src, int c, __kernel_size_t n) src = (void *)i_src; } -#endif + /* Simple, byte oriented memset or the rest of count. */ while (n--) *src++ = c; return v_src; } +#endif /* CONFIG_OPT_LIB_FUNCTION */ EXPORT_SYMBOL(memset); #endif /* __HAVE_ARCH_MEMSET */ -- cgit