diff options
author | Enzo Matsumiya <ematsumiya@suse.de> | 2024-09-06 14:41:50 -0300 |
---|---|---|
committer | Steve French <stfrench@microsoft.com> | 2024-09-15 10:42:45 -0500 |
commit | 94ae8c3fee94a87bdf982d5559f8037c6c562657 (patch) | |
tree | b0038fd3eea4583bd7eba56046baa2849ab959d9 /fs/smb/client/compress | |
parent | f046d71e84e1e94cf23335129a27f5cfe3e8b75f (diff) |
smb: client: compress: LZ77 code improvements cleanup
- Check data compressibility with some heuristics (copied from
btrfs):
- should_compress() final decision is is_compressible(data)
- Cleanup compress/lz77.h leaving only lz77_compress() exposed:
- Move parts to compress/lz77.c, while removing the rest of it
because they were either unused, used only once, were
implemented wrong (thanks to David Howells for the help)
- Updated the compression parameters (still compatible with
Windows implementation) trading off ~20% compression ratio
for ~40% performance:
- min match len: 3 -> 4
- max distance: 8KiB -> 1KiB
- hash table type: u32 * -> u64 *
Known bugs:
This implementation currently works fine in general, but breaks with
some payloads used during testing. Investigation ongoing, to be
fixed in a next commit.
Signed-off-by: Enzo Matsumiya <ematsumiya@suse.de>
Co-developed-by: David Howells <dhowells@redhat.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
Diffstat (limited to 'fs/smb/client/compress')
-rw-r--r-- | fs/smb/client/compress/lz77.c | 270 | ||||
-rw-r--r-- | fs/smb/client/compress/lz77.h | 275 |
2 files changed, 149 insertions, 396 deletions
diff --git a/fs/smb/client/compress/lz77.c b/fs/smb/client/compress/lz77.c index 2b8d548f9492..553e253ada29 100644 --- a/fs/smb/client/compress/lz77.c +++ b/fs/smb/client/compress/lz77.c @@ -7,14 +7,75 @@ * Implementation of the LZ77 "plain" compression algorithm, as per MS-XCA spec. */ #include <linux/slab.h> +#include <linux/sizes.h> +#include <linux/count_zeros.h> +#include <asm/unaligned.h> + #include "lz77.h" -static __always_inline u32 hash3(const u8 *ptr) +/* + * Compression parameters. + */ +#define LZ77_MATCH_MIN_LEN 4 +#define LZ77_MATCH_MIN_DIST 1 +#define LZ77_MATCH_MAX_DIST SZ_1K +#define LZ77_HASH_LOG 15 +#define LZ77_HASH_SIZE (1 << LZ77_HASH_LOG) +#define LZ77_STEP_SIZE sizeof(u64) + +static __always_inline u8 lz77_read8(const u8 *ptr) +{ + return get_unaligned(ptr); +} + +static __always_inline u64 lz77_read64(const u64 *ptr) +{ + return get_unaligned(ptr); +} + +static __always_inline void lz77_write8(u8 *ptr, u8 v) +{ + put_unaligned(v, ptr); +} + +static __always_inline void lz77_write16(u16 *ptr, u16 v) +{ + put_unaligned_le16(v, ptr); +} + +static __always_inline void lz77_write32(u32 *ptr, u32 v) +{ + put_unaligned_le32(v, ptr); +} + +static __always_inline u32 lz77_match_len(const void *wnd, const void *cur, const void *end) { - return lz77_hash32(lz77_read32(ptr) & 0xffffff, LZ77_HASH_LOG); + const void *start = cur; + u64 diff; + + /* Safe for a do/while because otherwise we wouldn't reach here from the main loop. */ + do { + diff = lz77_read64(cur) ^ lz77_read64(wnd); + if (!diff) { + cur += LZ77_STEP_SIZE; + wnd += LZ77_STEP_SIZE; + + continue; + } + + /* This computes the number of common bytes in @diff. */ + cur += count_trailing_zeros(diff) >> 3; + + return (cur - start); + } while (likely(cur + LZ77_STEP_SIZE < end)); + + while (cur < end && lz77_read8(cur++) == lz77_read8(wnd++)) + ; + + return (cur - start); } -static u8 *write_match(u8 *dst, u8 **nib, u32 dist, u32 len) +static __always_inline void *lz77_write_match(void *dst, void **nib, u32 dist, u32 len) { len -= 3; dist--; @@ -22,6 +83,7 @@ static u8 *write_match(u8 *dst, u8 **nib, u32 dist, u32 len) if (len < 7) { lz77_write16(dst, dist + len); + return dst + 2; } @@ -31,11 +93,13 @@ static u8 *write_match(u8 *dst, u8 **nib, u32 dist, u32 len) len -= 7; if (!*nib) { + lz77_write8(dst, umin(len, 15)); *nib = dst; - lz77_write8(dst, min_t(unsigned int, len, 15)); dst++; } else { - **nib |= min_t(unsigned int, len, 15) << 4; + u8 *b = *nib; + + lz77_write8(b, *b | umin(len, 15) << 4); *nib = NULL; } @@ -45,15 +109,16 @@ static u8 *write_match(u8 *dst, u8 **nib, u32 dist, u32 len) len -= 15; if (len < 255) { lz77_write8(dst, len); + return dst + 1; } lz77_write8(dst, 0xff); dst++; - len += 7 + 15; if (len <= 0xffff) { lz77_write16(dst, len); + return dst + 2; } @@ -64,148 +129,107 @@ static u8 *write_match(u8 *dst, u8 **nib, u32 dist, u32 len) return dst + 4; } -static u8 *write_literals(u8 *dst, const u8 *dst_end, const u8 *src, size_t count, - struct lz77_flags *flags) +noinline int lz77_compress(const void *src, u32 slen, void *dst, u32 *dlen) { - const u8 *end = src + count; - - while (src < end) { - size_t c = lz77_min(count, 32 - flags->count); - - if (dst + c >= dst_end) - return ERR_PTR(-EFAULT); - - if (lz77_copy(dst, src, c)) - return ERR_PTR(-EFAULT); - - dst += c; - src += c; - count -= c; - - flags->val <<= c; - flags->count += c; - if (flags->count == 32) { - lz77_write32(flags->pos, flags->val); - flags->count = 0; - flags->pos = dst; - dst += 4; - } - } - - return dst; -} - -static __always_inline bool is_valid_match(const u32 dist, const u32 len) -{ - return (dist >= LZ77_MATCH_MIN_DIST && dist < LZ77_MATCH_MAX_DIST) && - (len >= LZ77_MATCH_MIN_LEN && len < LZ77_MATCH_MAX_LEN); -} - -static __always_inline const u8 *find_match(u32 *htable, const u8 *base, const u8 *cur, - const u8 *end, u32 *best_len) -{ - const u8 *match; - u32 hash; - size_t offset; - - hash = hash3(cur); - offset = cur - base; - - if (htable[hash] >= offset) - return cur; - - match = base + htable[hash]; - *best_len = lz77_match(match, cur, end); - if (is_valid_match(cur - match, *best_len)) - return match; - - return cur; -} - -int lz77_compress(const u8 *src, size_t src_len, u8 *dst, size_t *dst_len) -{ - const u8 *srcp, *src_end, *anchor; - struct lz77_flags flags = { 0 }; - u8 *dstp, *dst_end, *nib; - u32 *htable; - int ret; + const void *srcp, *end; + void *dstp, *nib, *flag_pos; + u32 flag_count = 0; + long flag = 0; + u64 *htable; srcp = src; - anchor = srcp; - src_end = src + src_len; - + end = src + slen; dstp = dst; - dst_end = dst + *dst_len; - flags.pos = dstp; nib = NULL; - - memset(dstp, 0, *dst_len); + flag_pos = dstp; dstp += 4; - htable = kvcalloc(LZ77_HASH_SIZE, sizeof(u32), GFP_KERNEL); + htable = kvcalloc(LZ77_HASH_SIZE, sizeof(*htable), GFP_KERNEL); if (!htable) return -ENOMEM; - /* fill hashtable with invalid offsets */ - memset(htable, 0xff, LZ77_HASH_SIZE * sizeof(u32)); + /* Main loop. */ + do { + u32 dist, len = 0; + const void *wnd; + u64 hash; - /* from here on, any error is because @dst_len reached >= @src_len */ - ret = -EMSGSIZE; + hash = ((lz77_read64(srcp) << 24) * 889523592379ULL) >> (64 - LZ77_HASH_LOG); + wnd = src + htable[hash]; + htable[hash] = srcp - src; + dist = srcp - wnd; - /* main loop */ - while (srcp < src_end) { - u32 hash, dist, len; - const u8 *match; + if (dist && dist < LZ77_MATCH_MAX_DIST) + len = lz77_match_len(wnd, srcp, end); - while (srcp + 3 < src_end) { - len = LZ77_MATCH_MIN_LEN - 1; - match = find_match(htable, src, srcp, src_end, &len); - hash = hash3(srcp); - htable[hash] = srcp - src; + if (len < LZ77_MATCH_MIN_LEN) { + lz77_write8(dstp, lz77_read8(srcp)); + + dstp++; + srcp++; - if (likely(match < srcp)) { - dist = srcp - match; - break; + flag <<= 1; + flag_count++; + if (flag_count == 32) { + lz77_write32(flag_pos, flag); + flag_count = 0; + flag_pos = dstp; + dstp += 4; } - srcp++; + continue; } - dstp = write_literals(dstp, dst_end, anchor, srcp - anchor, &flags); - if (IS_ERR(dstp)) - goto err_free; - - if (srcp + 3 >= src_end) - goto leftovers; + /* + * Bail out if @dstp reached >= 7/8 of @slen -- already compressed badly, not worth + * going further. + */ + if (unlikely(dstp - dst >= slen - (slen >> 3))) { + *dlen = slen; + goto out; + } - dstp = write_match(dstp, &nib, dist, len); + dstp = lz77_write_match(dstp, &nib, dist, len); srcp += len; - anchor = srcp; - - flags.val = (flags.val << 1) | 1; - flags.count++; - if (flags.count == 32) { - lz77_write32(flags.pos, flags.val); - flags.count = 0; - flags.pos = dstp; + + flag = (flag << 1) | 1; + flag_count++; + if (flag_count == 32) { + lz77_write32(flag_pos, flag); + flag_count = 0; + flag_pos = dstp; + dstp += 4; + } + } while (likely(srcp + LZ77_STEP_SIZE < end)); + + while (srcp < end) { + u32 c = umin(end - srcp, 32 - flag_count); + + memcpy(dstp, srcp, c); + + dstp += c; + srcp += c; + + flag <<= c; + flag_count += c; + if (flag_count == 32) { + lz77_write32(flag_pos, flag); + flag_count = 0; + flag_pos = dstp; dstp += 4; } - } -leftovers: - if (srcp < src_end) { - dstp = write_literals(dstp, dst_end, srcp, src_end - srcp, &flags); - if (IS_ERR(dstp)) - goto err_free; } - flags.val <<= (32 - flags.count); - flags.val |= (1 << (32 - flags.count)) - 1; - lz77_write32(flags.pos, flags.val); + flag <<= (32 - flag_count); + flag |= (1 << (32 - flag_count)) - 1; + lz77_write32(flag_pos, flag); - *dst_len = dstp - dst; - ret = 0; -err_free: + *dlen = dstp - dst; +out: kvfree(htable); - return ret; + if (*dlen < slen) + return 0; + + return -EMSGSIZE; } diff --git a/fs/smb/client/compress/lz77.h b/fs/smb/client/compress/lz77.h index 3d0d3eaa8ffb..cdcb191b48a2 100644 --- a/fs/smb/client/compress/lz77.h +++ b/fs/smb/client/compress/lz77.h @@ -4,283 +4,12 @@ * * Authors: Enzo Matsumiya <ematsumiya@suse.de> * - * Definitions and optmized helpers for LZ77 compression. + * Implementation of the LZ77 "plain" compression algorithm, as per MS-XCA spec. */ #ifndef _SMB_COMPRESS_LZ77_H #define _SMB_COMPRESS_LZ77_H -#include <linux/uaccess.h> -#ifdef CONFIG_CIFS_COMPRESSION -#include <asm/ptrace.h> #include <linux/kernel.h> -#include <linux/string.h> -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -#include <asm-generic/unaligned.h> -#endif -#define LZ77_HASH_LOG 13 -#define LZ77_HASH_SIZE (1 << LZ77_HASH_LOG) -#define LZ77_HASH_MASK lz77_hash_mask(LZ77_HASH_LOG) - -/* We can increase this for better compression (but worse performance). */ -#define LZ77_MATCH_MIN_LEN 3 -/* From MS-XCA, but it's arbitrarily chosen. */ -#define LZ77_MATCH_MAX_LEN S32_MAX -/* - * Check this to ensure we don't match the current position, which would - * end up doing a verbatim copy of the input, and actually overflowing - * the output buffer because of the encoded metadata. - */ -#define LZ77_MATCH_MIN_DIST 1 -/* How far back in the buffer can we try to find a match (i.e. window size) */ -#define LZ77_MATCH_MAX_DIST 8192 - -#define LZ77_STEPSIZE_16 sizeof(u16) -#define LZ77_STEPSIZE_32 sizeof(u32) -#define LZ77_STEPSIZE_64 sizeof(u64) - -struct lz77_flags { - u8 *pos; - size_t count; - long val; -}; - -static __always_inline u32 lz77_hash_mask(const unsigned int log2) -{ - return ((1 << log2) - 1); -} - -static __always_inline u32 lz77_hash64(const u64 v, const unsigned int log2) -{ - const u64 prime5bytes = 889523592379ULL; - - return (u32)(((v << 24) * prime5bytes) >> (64 - log2)); -} - -static __always_inline u32 lz77_hash32(const u32 v, const unsigned int log2) -{ - return ((v * 2654435769LL) >> (32 - log2)) & lz77_hash_mask(log2); -} - -static __always_inline u32 lz77_log2(unsigned int x) -{ - return x ? ((u32)(31 - __builtin_clz(x))) : 0; -} - -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -static __always_inline u8 lz77_read8(const void *ptr) -{ - return *(u8 *)ptr; -} - -static __always_inline u16 lz77_read16(const void *ptr) -{ - return *(u16 *)ptr; -} - -static __always_inline u32 lz77_read32(const void *ptr) -{ - return *(u32 *)ptr; -} - -static __always_inline u64 lz77_read64(const void *ptr) -{ - return *(u64 *)ptr; -} - -static __always_inline void lz77_write8(void *ptr, const u8 v) -{ - *(u8 *)ptr = v; -} - -static __always_inline void lz77_write16(void *ptr, const u16 v) -{ - *(u16 *)ptr = v; -} - -static __always_inline void lz77_write32(void *ptr, const u32 v) -{ - *(u32 *)ptr = v; -} - -static __always_inline void lz77_write64(void *ptr, const u64 v) -{ - *(u64 *)ptr = v; -} - -static __always_inline void lz77_write_ptr16(void *ptr, const void *vp) -{ - *(u16 *)ptr = *(const u16 *)vp; -} - -static __always_inline void lz77_write_ptr32(void *ptr, const void *vp) -{ - *(u32 *)ptr = *(const u32 *)vp; -} - -static __always_inline void lz77_write_ptr64(void *ptr, const void *vp) -{ - *(u64 *)ptr = *(const u64 *)vp; -} - -static __always_inline long lz77_copy(u8 *dst, const u8 *src, size_t count) -{ - return copy_from_kernel_nofault(dst, src, count); -} -#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ -static __always_inline u8 lz77_read8(const void *ptr) -{ - return get_unaligned((u8 *)ptr); -} - -static __always_inline u16 lz77_read16(const void *ptr) -{ - return lz77_read8(ptr) | (lz77_read8(ptr + 1) << 8); -} - -static __always_inline u32 lz77_read32(const void *ptr) -{ - return lz77_read16(ptr) | (lz77_read16(ptr + 2) << 16); -} - -static __always_inline u64 lz77_read64(const void *ptr) -{ - return lz77_read32(ptr) | ((u64)lz77_read32(ptr + 4) << 32); -} - -static __always_inline void lz77_write8(void *ptr, const u8 v) -{ - put_unaligned(v, (u8 *)ptr); -} - -static __always_inline void lz77_write16(void *ptr, const u16 v) -{ - lz77_write8(ptr, v & 0xff); - lz77_write8(ptr + 1, (v >> 8) & 0xff); -} - -static __always_inline void lz77_write32(void *ptr, const u32 v) -{ - lz77_write16(ptr, v & 0xffff); - lz77_write16(ptr + 2, (v >> 16) & 0xffff); -} - -static __always_inline void lz77_write64(void *ptr, const u64 v) -{ - lz77_write32(ptr, v & 0xffffffff); - lz77_write32(ptr + 4, (v >> 32) & 0xffffffff); -} - -static __always_inline void lz77_write_ptr16(void *ptr, const void *vp) -{ - const u16 v = lz77_read16(vp); - - lz77_write16(ptr, v); -} - -static __always_inline void lz77_write_ptr32(void *ptr, const void *vp) -{ - const u32 v = lz77_read32(vp); - - lz77_write32(ptr, v); -} - -static __always_inline void lz77_write_ptr64(void *ptr, const void *vp) -{ - const u64 v = lz77_read64(vp); - - lz77_write64(ptr, v); -} -static __always_inline long lz77_copy(u8 *dst, const u8 *src, size_t count) -{ - memcpy(dst, src, count); - return 0; -} -#endif /* !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ - -static __always_inline unsigned int __count_common_bytes(const unsigned long diff) -{ -#ifdef __has_builtin -# if __has_builtin(__builtin_ctzll) - return (unsigned int)__builtin_ctzll(diff) >> 3; -# endif -#else - /* count trailing zeroes */ - unsigned long bits = 0, i, z = 0; - - bits |= diff; - for (i = 0; i < 64; i++) { - if (bits[i]) - break; - z++; - } - - return (unsigned int)z >> 3; -#endif -} - -static __always_inline size_t lz77_match(const u8 *match, const u8 *cur, const u8 *end) -{ - const u8 *start = cur; - - if (cur == match) - return 0; - - if (likely(cur < end - (LZ77_STEPSIZE_64 - 1))) { - u64 const diff = lz77_read64(cur) ^ lz77_read64(match); - - if (!diff) { - cur += LZ77_STEPSIZE_64; - match += LZ77_STEPSIZE_64; - } else { - return __count_common_bytes(diff); - } - } - - while (likely(cur < end - (LZ77_STEPSIZE_64 - 1))) { - u64 const diff = lz77_read64(cur) ^ lz77_read64(match); - - if (!diff) { - cur += LZ77_STEPSIZE_64; - match += LZ77_STEPSIZE_64; - continue; - } - - cur += __count_common_bytes(diff); - return (size_t)(cur - start); - } - - if (cur < end - 3 && !(lz77_read32(cur) ^ lz77_read32(match))) { - cur += LZ77_STEPSIZE_32; - match += LZ77_STEPSIZE_32; - } - - if (cur < end - 1 && lz77_read16(cur) == lz77_read16(match)) { - cur += LZ77_STEPSIZE_16; - match += LZ77_STEPSIZE_16; - } - - if (cur < end && *cur == *match) - cur++; - - return (size_t)(cur - start); -} - -static __always_inline unsigned long lz77_max(unsigned long a, unsigned long b) -{ - int m = (a < b) - 1; - - return (a & m) | (b & ~m); -} - -static __always_inline unsigned long lz77_min(unsigned long a, unsigned long b) -{ - int m = (a > b) - 1; - - return (a & m) | (b & ~m); -} - -int lz77_compress(const u8 *src, size_t src_len, u8 *dst, size_t *dst_len); -/* when CONFIG_CIFS_COMPRESSION not set lz77_compress() is not called */ -#endif /* !CONFIG_CIFS_COMPRESSION */ +int lz77_compress(const void *src, u32 slen, void *dst, u32 *dlen); #endif /* _SMB_COMPRESS_LZ77_H */ |