diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-28 17:43:29 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-28 17:43:29 -0700 |
commit | a578dd095dfe8b56c167201d9aea43e47d27f807 (patch) | |
tree | 6d4ee6e286b92ebad6d10572af74eb15fd31973f /lib/crc32.c | |
parent | 8e736a2eeaf261213b4557778e015699da1e1c8c (diff) | |
parent | 118da22eb6fbd48f896d17411f942399283d600c (diff) |
Merge tag 'crc-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux
Pull CRC updates from Eric Biggers:
- Reorganize the architecture-optimized CRC code
It now lives in lib/crc/$(SRCARCH)/ rather than arch/$(SRCARCH)/lib/,
and it is no longer artificially split into separate generic and arch
modules. This allows better inlining and dead code elimination
The generic CRC code is also no longer exported, simplifying the API.
(This mirrors the similar changes to SHA-1 and SHA-2 in lib/crypto/,
which can be found in the "Crypto library updates" pull request)
- Improve crc32c() performance on newer x86_64 CPUs on long messages by
enabling the VPCLMULQDQ optimized code
- Simplify the crypto_shash wrappers for crc32_le() and crc32c()
Register just one shash algorithm for each that uses the (fully
optimized) library functions, instead of unnecessarily providing
direct access to the generic CRC code
- Remove unused and obsolete drivers for hardware CRC engines
- Remove CRC-32 combination functions that are no longer used
- Add kerneldoc for crc32_le(), crc32_be(), and crc32c()
- Convert the crc32() macro to an inline function
* tag 'crc-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux: (26 commits)
lib/crc: x86/crc32c: Enable VPCLMULQDQ optimization where beneficial
lib/crc: x86: Reorganize crc-pclmul static_call initialization
lib/crc: crc64: Add include/linux/crc64.h to kernel-api.rst
lib/crc: crc32: Change crc32() from macro to inline function and remove cast
nvmem: layouts: Switch from crc32() to crc32_le()
lib/crc: crc32: Document crc32_le(), crc32_be(), and crc32c()
lib/crc: Explicitly include <linux/export.h>
lib/crc: Remove ARCH_HAS_* kconfig symbols
lib/crc: x86: Migrate optimized CRC code into lib/crc/
lib/crc: sparc: Migrate optimized CRC code into lib/crc/
lib/crc: s390: Migrate optimized CRC code into lib/crc/
lib/crc: riscv: Migrate optimized CRC code into lib/crc/
lib/crc: powerpc: Migrate optimized CRC code into lib/crc/
lib/crc: mips: Migrate optimized CRC code into lib/crc/
lib/crc: loongarch: Migrate optimized CRC code into lib/crc/
lib/crc: arm64: Migrate optimized CRC code into lib/crc/
lib/crc: arm: Migrate optimized CRC code into lib/crc/
lib/crc: Prepare for arch-optimized code in subdirs of lib/crc/
lib/crc: Move files into lib/crc/
lib/crc32: Remove unused combination support
...
Diffstat (limited to 'lib/crc32.c')
-rw-r--r-- | lib/crc32.c | 126 |
1 files changed, 0 insertions, 126 deletions
diff --git a/lib/crc32.c b/lib/crc32.c deleted file mode 100644 index 95429861d3ac..000000000000 --- a/lib/crc32.c +++ /dev/null @@ -1,126 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin - * cleaned up code to current version of sparse and added the slicing-by-8 - * algorithm to the closely similar existing slicing-by-4 algorithm. - * - * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com> - * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks! - * Code was from the public domain, copyright abandoned. Code was - * subsequently included in the kernel, thus was re-licensed under the - * GNU GPL v2. - * - * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com> - * Same crc32 function was used in 5 other places in the kernel. - * I made one version, and deleted the others. - * There are various incantations of crc32(). Some use a seed of 0 or ~0. - * Some xor at the end with ~0. The generic crc32() function takes - * seed as an argument, and doesn't xor at the end. Then individual - * users can do whatever they need. - * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0. - * fs/jffs2 uses seed 0, doesn't xor with ~0. - * fs/partitions/efi.c uses seed ~0, xor's with ~0. - */ - -/* see: Documentation/staging/crc32.rst for a description of algorithms */ - -#include <linux/crc32.h> -#include <linux/crc32poly.h> -#include <linux/module.h> -#include <linux/types.h> - -#include "crc32table.h" - -MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); -MODULE_DESCRIPTION("Various CRC32 calculations"); -MODULE_LICENSE("GPL"); - -u32 crc32_le_base(u32 crc, const u8 *p, size_t len) -{ - while (len--) - crc = (crc >> 8) ^ crc32table_le[(crc & 255) ^ *p++]; - return crc; -} -EXPORT_SYMBOL(crc32_le_base); - -u32 crc32c_base(u32 crc, const u8 *p, size_t len) -{ - while (len--) - crc = (crc >> 8) ^ crc32ctable_le[(crc & 255) ^ *p++]; - return crc; -} -EXPORT_SYMBOL(crc32c_base); - -/* - * This multiplies the polynomials x and y modulo the given modulus. - * This follows the "little-endian" CRC convention that the lsbit - * represents the highest power of x, and the msbit represents x^0. - */ -static u32 gf2_multiply(u32 x, u32 y, u32 modulus) -{ - u32 product = x & 1 ? y : 0; - int i; - - for (i = 0; i < 31; i++) { - product = (product >> 1) ^ (product & 1 ? modulus : 0); - x >>= 1; - product ^= x & 1 ? y : 0; - } - - return product; -} - -/** - * crc32_generic_shift - Append @len 0 bytes to crc, in logarithmic time - * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient) - * @len: The number of bytes. @crc is multiplied by x^(8*@len) - * @polynomial: The modulus used to reduce the result to 32 bits. - * - * It's possible to parallelize CRC computations by computing a CRC - * over separate ranges of a buffer, then summing them. - * This shifts the given CRC by 8*len bits (i.e. produces the same effect - * as appending len bytes of zero to the data), in time proportional - * to log(len). - */ -static u32 crc32_generic_shift(u32 crc, size_t len, u32 polynomial) -{ - u32 power = polynomial; /* CRC of x^32 */ - int i; - - /* Shift up to 32 bits in the simple linear way */ - for (i = 0; i < 8 * (int)(len & 3); i++) - crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0); - - len >>= 2; - if (!len) - return crc; - - for (;;) { - /* "power" is x^(2^i), modulo the polynomial */ - if (len & 1) - crc = gf2_multiply(crc, power, polynomial); - - len >>= 1; - if (!len) - break; - - /* Square power, advancing to x^(2^(i+1)) */ - power = gf2_multiply(power, power, polynomial); - } - - return crc; -} - -u32 crc32_le_shift(u32 crc, size_t len) -{ - return crc32_generic_shift(crc, len, CRC32_POLY_LE); -} -EXPORT_SYMBOL(crc32_le_shift); - -u32 crc32_be_base(u32 crc, const u8 *p, size_t len) -{ - while (len--) - crc = (crc << 8) ^ crc32table_be[(crc >> 24) ^ *p++]; - return crc; -} -EXPORT_SYMBOL(crc32_be_base); |