summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig30
-rw-r--r--lib/Kconfig.debug100
-rw-r--r--lib/Makefile11
-rw-r--r--lib/asn1_decoder.c2
-rw-r--r--lib/atomic64_test.c13
-rw-r--r--lib/audit.c15
-rw-r--r--lib/btree.c1
-rw-r--r--lib/bug.c21
-rw-r--r--lib/clz_ctz.c7
-rw-r--r--lib/compat_audit.c50
-rw-r--r--lib/cpumask.c63
-rw-r--r--lib/crc32.c157
-rw-r--r--lib/crc7.c84
-rw-r--r--lib/debugobjects.c19
-rw-r--r--lib/decompress.c3
-rw-r--r--lib/decompress_inflate.c1
-rw-r--r--lib/devres.c48
-rw-r--r--lib/digsig.c5
-rw-r--r--lib/dump_stack.c4
-rw-r--r--lib/dynamic_debug.c8
-rw-r--r--lib/fdt_empty_tree.c2
-rw-r--r--lib/idr.c64
-rw-r--r--lib/interval_tree.c6
-rw-r--r--lib/interval_tree_test.c (renamed from lib/interval_tree_test_main.c)0
-rw-r--r--lib/iomap.c4
-rw-r--r--lib/iovec.c59
-rw-r--r--lib/kobject_uevent.c48
-rw-r--r--lib/libcrc32c.c5
-rw-r--r--lib/lockref.c3
-rw-r--r--lib/lz4/lz4_decompress.c12
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c62
-rw-r--r--lib/net_utils.c10
-rw-r--r--lib/nlattr.c31
-rw-r--r--lib/percpu-refcount.c86
-rw-r--r--lib/percpu_counter.c2
-rw-r--r--lib/plist.c56
-rw-r--r--lib/radix-tree.c396
-rw-r--r--lib/random32.c125
-rw-r--r--lib/rhashtable.c797
-rw-r--r--lib/scatterlist.c25
-rw-r--r--lib/smp_processor_id.c18
-rw-r--r--lib/string.c26
-rw-r--r--lib/swiotlb.c30
-rw-r--r--lib/syscall.c1
-rw-r--r--lib/test_bpf.c1929
-rw-r--r--lib/test_firmware.c117
-rw-r--r--lib/textsearch.c9
-rw-r--r--lib/vsprintf.c49
-rw-r--r--lib/xz/Kconfig24
-rw-r--r--lib/xz/xz_dec_lzma2.c4
50 files changed, 3933 insertions, 709 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 991c98bc4a3f..a8a775730c09 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -182,6 +182,15 @@ config AUDIT_GENERIC
depends on AUDIT && !AUDIT_ARCH
default y
+config AUDIT_ARCH_COMPAT_GENERIC
+ bool
+ default n
+
+config AUDIT_COMPAT_GENERIC
+ bool
+ depends on AUDIT_GENERIC && AUDIT_ARCH_COMPAT_GENERIC && COMPAT
+ default y
+
config RANDOM32_SELFTEST
bool "PRNG perform self test on init"
default n
@@ -322,6 +331,20 @@ config TEXTSEARCH_FSM
config BTREE
boolean
+config INTERVAL_TREE
+ boolean
+ help
+ Simple, embeddable, interval-tree. Can find the start of an
+ overlapping range in log(n) time and then iterate over all
+ overlapping nodes. The algorithm is implemented as an
+ augmented rbtree.
+
+ See:
+
+ Documentation/rbtree.txt
+
+ for more information.
+
config ASSOCIATIVE_ARRAY
bool
help
@@ -342,9 +365,9 @@ config HAS_IOMEM
select GENERIC_IO
default y
-config HAS_IOPORT
+config HAS_IOPORT_MAP
boolean
- depends on HAS_IOMEM && !NO_IOPORT
+ depends on HAS_IOMEM && !NO_IOPORT_MAP
default y
config HAS_DMA
@@ -428,7 +451,8 @@ config MPILIB
config SIGNATURE
tristate
- depends on KEYS && CRYPTO
+ depends on KEYS
+ select CRYPTO
select CRYPTO_SHA1
select MPILIB
help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index fc77167a7e43..cfe7df8f62cc 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -501,12 +501,21 @@ config DEBUG_VM
If unsure, say N.
+config DEBUG_VM_VMACACHE
+ bool "Debug VMA caching"
+ depends on DEBUG_VM
+ help
+ Enable this to turn on VMA caching debug information. Doing so
+ can cause significant overhead, so only enable it in non-production
+ environments.
+
+ If unsure, say N.
+
config DEBUG_VM_RB
bool "Debug VM red-black trees"
depends on DEBUG_VM
help
- Enable this to turn on more extended checks in the virtual-memory
- system that may impact performance.
+ Enable VM red-black tree debugging information and extra validations.
If unsure, say N.
@@ -824,14 +833,9 @@ config DEBUG_RT_MUTEXES
This allows rt mutex semantics violations and rt mutex related
deadlocks (lockups) to be detected and reported automatically.
-config DEBUG_PI_LIST
- bool
- default y
- depends on DEBUG_RT_MUTEXES
-
config RT_MUTEX_TESTER
bool "Built-in scriptable tester for rt-mutexes"
- depends on DEBUG_KERNEL && RT_MUTEXES
+ depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN
help
This option enables a rt-mutex tester.
@@ -926,7 +930,7 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC
+ select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE
select KALLSYMS
select KALLSYMS_ALL
@@ -1045,22 +1049,22 @@ config DEBUG_BUGVERBOSE
of the BUG call as well as the EIP and oops trace. This aids
debugging but costs about 70-100K of memory.
-config DEBUG_WRITECOUNT
- bool "Debug filesystem writers count"
+config DEBUG_LIST
+ bool "Debug linked list manipulation"
depends on DEBUG_KERNEL
help
- Enable this to catch wrong use of the writers count in struct
- vfsmount. This will increase the size of each file struct by
- 32 bits.
+ Enable this to turn on extended checks in the linked-list
+ walking routines.
If unsure, say N.
-config DEBUG_LIST
- bool "Debug linked list manipulation"
+config DEBUG_PI_LIST
+ bool "Debug priority linked list manipulation"
depends on DEBUG_KERNEL
help
- Enable this to turn on extended checks in the linked-list
- walking routines.
+ Enable this to turn on extended checks in the priority-ordered
+ linked-list (plist) walking routines. This checks the entire
+ list multiple times during each manipulation.
If unsure, say N.
@@ -1127,20 +1131,6 @@ config PROVE_RCU_REPEATEDLY
Say N if you are unsure.
-config PROVE_RCU_DELAY
- bool "RCU debugging: preemptible RCU race provocation"
- depends on DEBUG_KERNEL && PREEMPT_RCU
- default n
- help
- There is a class of races that involve an unlikely preemption
- of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
- been set to INT_MIN. This feature inserts a delay at that
- point to increase the probability of these races.
-
- Say Y to increase probability of preemption of __rcu_read_unlock().
-
- Say N if you are unsure.
-
config SPARSE_RCU_POINTER
bool "RCU debugging: sparse-based checks for pointer usage"
default n
@@ -1404,7 +1394,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE
help
Provide stacktrace filter for fault-injection capabilities
@@ -1507,6 +1497,7 @@ config RBTREE_TEST
config INTERVAL_TREE_TEST
tristate "Interval tree test"
depends on m && DEBUG_KERNEL
+ select INTERVAL_TREE
help
A benchmark measuring the performance of the interval tree library
@@ -1545,6 +1536,14 @@ config TEST_STRING_HELPERS
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
+config TEST_RHASHTABLE
+ bool "Perform selftest on resizable hash table"
+ default n
+ help
+ Enable this option to test the rhashtable functions at boot.
+
+ If unsure, say N.
+
endmenu # runtime tests
config PROVIDE_OHCI1394_DMA_INIT
@@ -1631,6 +1630,41 @@ config TEST_USER_COPY
If unsure, say N.
+config TEST_BPF
+ tristate "Test BPF filter functionality"
+ default n
+ depends on m && NET
+ help
+ This builds the "test_bpf" module that runs various test vectors
+ against the BPF interpreter or BPF JIT compiler depending on the
+ current setting. This is in particular useful for BPF JIT compiler
+ development, but also to run regression tests against changes in
+ the interpreter code.
+
+ If unsure, say N.
+
+config TEST_FIRMWARE
+ tristate "Test firmware loading via userspace interface"
+ default n
+ depends on FW_LOADER
+ help
+ This builds the "test_firmware" module that creates a userspace
+ interface for testing firmware loading. This can be used to
+ control the triggering of firmware loading without needing an
+ actual firmware-using device. The contents can be rechecked by
+ userspace.
+
+ If unsure, say N.
+
+config TEST_UDELAY
+ tristate "udelay test driver"
+ default n
+ help
+ This builds the "udelay_test" module that helps to make sure
+ that udelay() is working properly.
+
+ If unsure, say N.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 48140e3ba73f..8427df95dade 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,13 +26,15 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o percpu_ida.o hash.o
+ percpu-refcount.o percpu_ida.o hash.o rhashtable.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_MODULE) += test_module.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
+obj-$(CONFIG_TEST_BPF) += test_bpf.o
+obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -50,6 +52,7 @@ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o
+obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
@@ -96,6 +99,7 @@ obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
@@ -147,7 +151,8 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
-libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o
+libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
+ fdt_empty_tree.o
$(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
@@ -155,8 +160,6 @@ lib-$(CONFIG_LIBFDT) += $(libfdt_files)
obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
-interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
-
obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
obj-$(CONFIG_ASN1) += asn1_decoder.o
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 11b9b01fda6b..1a000bb050f9 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -140,7 +140,7 @@ error:
* @decoder: The decoder definition (produced by asn1_compiler)
* @context: The caller's context (to be passed to the action functions)
* @data: The encoded data
- * @datasize: The size of the encoded data
+ * @datalen: The size of the encoded data
*
* Decode BER/DER/CER encoded ASN.1 data according to a bytecode pattern
* produced by asn1_compiler. Action functions are called on marked tags to
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 00bca223d1e1..0211d30d8c39 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -8,6 +8,9 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/bug.h>
#include <linux/kernel.h>
@@ -146,18 +149,18 @@ static __init int test_atomic64(void)
BUG_ON(v.counter != r);
#ifdef CONFIG_X86
- printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n",
+ pr_info("passed for %s platform %s CX8 and %s SSE\n",
#ifdef CONFIG_X86_64
- "x86-64",
+ "x86-64",
#elif defined(CONFIG_X86_CMPXCHG64)
- "i586+",
+ "i586+",
#else
- "i386+",
+ "i386+",
#endif
boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
#else
- printk(KERN_INFO "atomic64 test passed\n");
+ pr_info("passed\n");
#endif
return 0;
diff --git a/lib/audit.c b/lib/audit.c
index 76bbed4a20e5..1d726a22565b 100644
--- a/lib/audit.c
+++ b/lib/audit.c
@@ -30,11 +30,17 @@ static unsigned signal_class[] = {
int audit_classify_arch(int arch)
{
- return 0;
+ if (audit_is_compat(arch))
+ return 1;
+ else
+ return 0;
}
int audit_classify_syscall(int abi, unsigned syscall)
{
+ if (audit_is_compat(abi))
+ return audit_classify_compat_syscall(abi, syscall);
+
switch(syscall) {
#ifdef __NR_open
case __NR_open:
@@ -57,6 +63,13 @@ int audit_classify_syscall(int abi, unsigned syscall)
static int __init audit_classes_init(void)
{
+#ifdef CONFIG_AUDIT_COMPAT_GENERIC
+ audit_register_class(AUDIT_CLASS_WRITE_32, compat_write_class);
+ audit_register_class(AUDIT_CLASS_READ_32, compat_read_class);
+ audit_register_class(AUDIT_CLASS_DIR_WRITE_32, compat_dir_class);
+ audit_register_class(AUDIT_CLASS_CHATTR_32, compat_chattr_class);
+ audit_register_class(AUDIT_CLASS_SIGNAL_32, compat_signal_class);
+#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
diff --git a/lib/btree.c b/lib/btree.c
index f9a484676cb6..4264871ea1a0 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -198,6 +198,7 @@ EXPORT_SYMBOL_GPL(btree_init);
void btree_destroy(struct btree_head *head)
{
+ mempool_free(head->node, head->mempool);
mempool_destroy(head->mempool);
head->mempool = NULL;
}
diff --git a/lib/bug.c b/lib/bug.c
index 168603477f02..d1d7c7878900 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -37,6 +37,9 @@
Jeremy Fitzhardinge <jeremy@goop.org> 2006
*/
+
+#define pr_fmt(fmt) fmt
+
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -153,15 +156,13 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
if (warning) {
/* this is a WARN_ON rather than BUG/BUG_ON */
- printk(KERN_WARNING "------------[ cut here ]------------\n");
+ pr_warn("------------[ cut here ]------------\n");
if (file)
- printk(KERN_WARNING "WARNING: at %s:%u\n",
- file, line);
+ pr_warn("WARNING: at %s:%u\n", file, line);
else
- printk(KERN_WARNING "WARNING: at %p "
- "[verbose debug info unavailable]\n",
- (void *)bugaddr);
+ pr_warn("WARNING: at %p [verbose debug info unavailable]\n",
+ (void *)bugaddr);
print_modules();
show_regs(regs);
@@ -174,12 +175,10 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
printk(KERN_DEFAULT "------------[ cut here ]------------\n");
if (file)
- printk(KERN_CRIT "kernel BUG at %s:%u!\n",
- file, line);
+ pr_crit("kernel BUG at %s:%u!\n", file, line);
else
- printk(KERN_CRIT "Kernel BUG at %p "
- "[verbose debug info unavailable]\n",
- (void *)bugaddr);
+ pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n",
+ (void *)bugaddr);
return BUG_TRAP_TYPE_BUG;
}
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
index a8f8379eb49f..2e11e48446ab 100644
--- a/lib/clz_ctz.c
+++ b/lib/clz_ctz.c
@@ -6,6 +6,9 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
+ * The functions in this file aren't called directly, but are required by
+ * GCC builtins such as __builtin_ctz, and therefore they can't be removed
+ * despite appearing unreferenced in kernel source.
*
* __c[lt]z[sd]i2 can be overridden by linking arch-specific versions.
*/
@@ -13,18 +16,22 @@
#include <linux/export.h>
#include <linux/kernel.h>
+int __weak __ctzsi2(int val);
int __weak __ctzsi2(int val)
{
return __ffs(val);
}
EXPORT_SYMBOL(__ctzsi2);
+int __weak __clzsi2(int val);
int __weak __clzsi2(int val)
{
return 32 - fls(val);
}
EXPORT_SYMBOL(__clzsi2);
+int __weak __clzdi2(long val);
+int __weak __ctzdi2(long val);
#if BITS_PER_LONG == 32
int __weak __clzdi2(long val)
diff --git a/lib/compat_audit.c b/lib/compat_audit.c
new file mode 100644
index 000000000000..873f75b640ab
--- /dev/null
+++ b/lib/compat_audit.c
@@ -0,0 +1,50 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <asm/unistd32.h>
+
+unsigned compat_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned compat_read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+unsigned compat_write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+unsigned compat_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+unsigned compat_signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int audit_classify_compat_syscall(int abi, unsigned syscall)
+{
+ switch (syscall) {
+#ifdef __NR_open
+ case __NR_open:
+ return 2;
+#endif
+#ifdef __NR_openat
+ case __NR_openat:
+ return 3;
+#endif
+#ifdef __NR_socketcall
+ case __NR_socketcall:
+ return 4;
+#endif
+ case __NR_execve:
+ return 5;
+ default:
+ return 1;
+ }
+}
diff --git a/lib/cpumask.c b/lib/cpumask.c
index b810b753c607..b6513a9f2892 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -164,3 +164,66 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
memblock_free_early(__pa(mask), cpumask_size());
}
#endif
+
+/**
+ * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first
+ *
+ * @i: index number
+ * @numa_node: local numa_node
+ * @dstp: cpumask with the relevant cpu bit set according to the policy
+ *
+ * This function sets the cpumask according to a numa aware policy.
+ * cpumask could be used as an affinity hint for the IRQ related to a
+ * queue. When the policy is to spread queues across cores - local cores
+ * first.
+ *
+ * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set
+ * the cpu bit and need to re-call the function.
+ */
+int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
+{
+ cpumask_var_t mask;
+ int cpu;
+ int ret = 0;
+
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ i %= num_online_cpus();
+
+ if (numa_node == -1 || !cpumask_of_node(numa_node)) {
+ /* Use all online cpu's for non numa aware system */
+ cpumask_copy(mask, cpu_online_mask);
+ } else {
+ int n;
+
+ cpumask_and(mask,
+ cpumask_of_node(numa_node), cpu_online_mask);
+
+ n = cpumask_weight(mask);
+ if (i >= n) {
+ i -= n;
+
+ /* If index > number of local cpu's, mask out local
+ * cpu's
+ */
+ cpumask_andnot(mask, cpu_online_mask, mask);
+ }
+ }
+
+ for_each_cpu(cpu, mask) {
+ if (--i < 0)
+ goto out;
+ }
+
+ ret = -EAGAIN;
+
+out:
+ free_cpumask_var(mask);
+
+ if (!ret)
+ cpumask_set_cpu(cpu, dstp);
+
+ return ret;
+}
+EXPORT_SYMBOL(cpumask_set_cpu_local_first);
diff --git a/lib/crc32.c b/lib/crc32.c
index 70f00ca5ef1e..9a907d489d95 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -33,13 +33,13 @@
#include "crc32defs.h"
#if CRC_LE_BITS > 8
-# define tole(x) ((__force u32) __constant_cpu_to_le32(x))
+# define tole(x) ((__force u32) cpu_to_le32(x))
#else
# define tole(x) (x)
#endif
#if CRC_BE_BITS > 8
-# define tobe(x) ((__force u32) __constant_cpu_to_be32(x))
+# define tobe(x) ((__force u32) cpu_to_be32(x))
#else
# define tobe(x) (x)
#endif
@@ -50,34 +50,10 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Various CRC32 calculations");
MODULE_LICENSE("GPL");
-#define GF2_DIM 32
-
-static u32 gf2_matrix_times(u32 *mat, u32 vec)
-{
- u32 sum = 0;
-
- while (vec) {
- if (vec & 1)
- sum ^= *mat;
- vec >>= 1;
- mat++;
- }
-
- return sum;
-}
-
-static void gf2_matrix_square(u32 *square, u32 *mat)
-{
- int i;
-
- for (i = 0; i < GF2_DIM; i++)
- square[i] = gf2_matrix_times(mat, mat[i]);
-}
-
#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
/* implements slicing-by-4 or slicing-by-8 algorithm */
-static inline u32
+static inline u32 __pure
crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
{
# ifdef __LITTLE_ENDIAN
@@ -155,51 +131,6 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
}
#endif
-/* For conditions of distribution and use, see copyright notice in zlib.h */
-static u32 crc32_generic_combine(u32 crc1, u32 crc2, size_t len2,
- u32 polynomial)
-{
- u32 even[GF2_DIM]; /* Even-power-of-two zeros operator */
- u32 odd[GF2_DIM]; /* Odd-power-of-two zeros operator */
- u32 row;
- int i;
-
- if (len2 <= 0)
- return crc1;
-
- /* Put operator for one zero bit in odd */
- odd[0] = polynomial;
- row = 1;
- for (i = 1; i < GF2_DIM; i++) {
- odd[i] = row;
- row <<= 1;
- }
-
- gf2_matrix_square(even, odd); /* Put operator for two zero bits in even */
- gf2_matrix_square(odd, even); /* Put operator for four zero bits in odd */
-
- /* Apply len2 zeros to crc1 (first square will put the operator for one
- * zero byte, eight zero bits, in even).
- */
- do {
- /* Apply zeros operator for this bit of len2 */
- gf2_matrix_square(even, odd);
- if (len2 & 1)
- crc1 = gf2_matrix_times(even, crc1);
- len2 >>= 1;
- /* If no more bits set, then done */
- if (len2 == 0)
- break;
- /* Another iteration of the loop with odd and even swapped */
- gf2_matrix_square(odd, even);
- if (len2 & 1)
- crc1 = gf2_matrix_times(odd, crc1);
- len2 >>= 1;
- } while (len2 != 0);
-
- crc1 ^= crc2;
- return crc1;
-}
/**
* crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
@@ -271,19 +202,81 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
(const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
}
#endif
-u32 __pure crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
+EXPORT_SYMBOL(crc32_le);
+EXPORT_SYMBOL(__crc32c_le);
+
+/*
+ * This multiplies the polynomials x and y modulo the given modulus.
+ * This follows the "little-endian" CRC convention that the lsbit
+ * represents the highest power of x, and the msbit represents x^0.
+ */
+static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
{
- return crc32_generic_combine(crc1, crc2, len2, CRCPOLY_LE);
+ u32 product = x & 1 ? y : 0;
+ int i;
+
+ for (i = 0; i < 31; i++) {
+ product = (product >> 1) ^ (product & 1 ? modulus : 0);
+ x >>= 1;
+ product ^= x & 1 ? y : 0;
+ }
+
+ return product;
}
-u32 __pure __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
+/**
+ * crc32_generic_shift - Append len 0 bytes to crc, in logarithmic time
+ * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient)
+ * @len: The number of bytes. @crc is multiplied by x^(8*@len)
+ * @polynomial: The modulus used to reduce the result to 32 bits.
+ *
+ * It's possible to parallelize CRC computations by computing a CRC
+ * over separate ranges of a buffer, then summing them.
+ * This shifts the given CRC by 8*len bits (i.e. produces the same effect
+ * as appending len bytes of zero to the data), in time proportional
+ * to log(len).
+ */
+static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
+ u32 polynomial)
{
- return crc32_generic_combine(crc1, crc2, len2, CRC32C_POLY_LE);
+ u32 power = polynomial; /* CRC of x^32 */
+ int i;
+
+ /* Shift up to 32 bits in the simple linear way */
+ for (i = 0; i < 8 * (int)(len & 3); i++)
+ crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0);
+
+ len >>= 2;
+ if (!len)
+ return crc;
+
+ for (;;) {
+ /* "power" is x^(2^i), modulo the polynomial */
+ if (len & 1)
+ crc = gf2_multiply(crc, power, polynomial);
+
+ len >>= 1;
+ if (!len)
+ break;
+
+ /* Square power, advancing to x^(2^(i+1)) */
+ power = gf2_multiply(power, power, polynomial);
+ }
+
+ return crc;
}
-EXPORT_SYMBOL(crc32_le);
-EXPORT_SYMBOL(crc32_le_combine);
-EXPORT_SYMBOL(__crc32c_le);
-EXPORT_SYMBOL(__crc32c_le_combine);
+
+u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
+{
+ return crc32_generic_shift(crc, len, CRCPOLY_LE);
+}
+
+u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
+{
+ return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
+}
+EXPORT_SYMBOL(crc32_le_shift);
+EXPORT_SYMBOL(__crc32c_le_shift);
/**
* crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
@@ -351,7 +344,7 @@ EXPORT_SYMBOL(crc32_be);
#ifdef CONFIG_CRC32_SELFTEST
/* 4096 random bytes */
-static u8 __attribute__((__aligned__(8))) test_buf[] =
+static u8 const __aligned(8) test_buf[] __initconst =
{
0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
@@ -875,7 +868,7 @@ static struct crc_test {
u32 crc_le; /* expected crc32_le result */
u32 crc_be; /* expected crc32_be result */
u32 crc32c_le; /* expected crc32c_le result */
-} test[] =
+} const test[] __initconst =
{
{0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
{0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
diff --git a/lib/crc7.c b/lib/crc7.c
index f1c3a144cec1..bf6255e23919 100644
--- a/lib/crc7.c
+++ b/lib/crc7.c
@@ -10,42 +10,47 @@
#include <linux/crc7.h>
-/* Table for CRC-7 (polynomial x^7 + x^3 + 1) */
-const u8 crc7_syndrome_table[256] = {
- 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f,
- 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77,
- 0x19, 0x10, 0x0b, 0x02, 0x3d, 0x34, 0x2f, 0x26,
- 0x51, 0x58, 0x43, 0x4a, 0x75, 0x7c, 0x67, 0x6e,
- 0x32, 0x3b, 0x20, 0x29, 0x16, 0x1f, 0x04, 0x0d,
- 0x7a, 0x73, 0x68, 0x61, 0x5e, 0x57, 0x4c, 0x45,
- 0x2b, 0x22, 0x39, 0x30, 0x0f, 0x06, 0x1d, 0x14,
- 0x63, 0x6a, 0x71, 0x78, 0x47, 0x4e, 0x55, 0x5c,
- 0x64, 0x6d, 0x76, 0x7f, 0x40, 0x49, 0x52, 0x5b,
- 0x2c, 0x25, 0x3e, 0x37, 0x08, 0x01, 0x1a, 0x13,
- 0x7d, 0x74, 0x6f, 0x66, 0x59, 0x50, 0x4b, 0x42,
- 0x35, 0x3c, 0x27, 0x2e, 0x11, 0x18, 0x03, 0x0a,
- 0x56, 0x5f, 0x44, 0x4d, 0x72, 0x7b, 0x60, 0x69,
- 0x1e, 0x17, 0x0c, 0x05, 0x3a, 0x33, 0x28, 0x21,
- 0x4f, 0x46, 0x5d, 0x54, 0x6b, 0x62, 0x79, 0x70,
- 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31, 0x38,
- 0x41, 0x48, 0x53, 0x5a, 0x65, 0x6c, 0x77, 0x7e,
- 0x09, 0x00, 0x1b, 0x12, 0x2d, 0x24, 0x3f, 0x36,
- 0x58, 0x51, 0x4a, 0x43, 0x7c, 0x75, 0x6e, 0x67,
- 0x10, 0x19, 0x02, 0x0b, 0x34, 0x3d, 0x26, 0x2f,
- 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c,
- 0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04,
- 0x6a, 0x63, 0x78, 0x71, 0x4e, 0x47, 0x5c, 0x55,
- 0x22, 0x2b, 0x30, 0x39, 0x06, 0x0f, 0x14, 0x1d,
- 0x25, 0x2c, 0x37, 0x3e, 0x01, 0x08, 0x13, 0x1a,
- 0x6d, 0x64, 0x7f, 0x76, 0x49, 0x40, 0x5b, 0x52,
- 0x3c, 0x35, 0x2e, 0x27, 0x18, 0x11, 0x0a, 0x03,
- 0x74, 0x7d, 0x66, 0x6f, 0x50, 0x59, 0x42, 0x4b,
- 0x17, 0x1e, 0x05, 0x0c, 0x33, 0x3a, 0x21, 0x28,
- 0x5f, 0x56, 0x4d, 0x44, 0x7b, 0x72, 0x69, 0x60,
- 0x0e, 0x07, 0x1c, 0x15, 0x2a, 0x23, 0x38, 0x31,
- 0x46, 0x4f, 0x54, 0x5d, 0x62, 0x6b, 0x70, 0x79
+/*
+ * Table for CRC-7 (polynomial x^7 + x^3 + 1).
+ * This is a big-endian CRC (msbit is highest power of x),
+ * aligned so the msbit of the byte is the x^6 coefficient
+ * and the lsbit is not used.
+ */
+const u8 crc7_be_syndrome_table[256] = {
+ 0x00, 0x12, 0x24, 0x36, 0x48, 0x5a, 0x6c, 0x7e,
+ 0x90, 0x82, 0xb4, 0xa6, 0xd8, 0xca, 0xfc, 0xee,
+ 0x32, 0x20, 0x16, 0x04, 0x7a, 0x68, 0x5e, 0x4c,
+ 0xa2, 0xb0, 0x86, 0x94, 0xea, 0xf8, 0xce, 0xdc,
+ 0x64, 0x76, 0x40, 0x52, 0x2c, 0x3e, 0x08, 0x1a,
+ 0xf4, 0xe6, 0xd0, 0xc2, 0xbc, 0xae, 0x98, 0x8a,
+ 0x56, 0x44, 0x72, 0x60, 0x1e, 0x0c, 0x3a, 0x28,
+ 0xc6, 0xd4, 0xe2, 0xf0, 0x8e, 0x9c, 0xaa, 0xb8,
+ 0xc8, 0xda, 0xec, 0xfe, 0x80, 0x92, 0xa4, 0xb6,
+ 0x58, 0x4a, 0x7c, 0x6e, 0x10, 0x02, 0x34, 0x26,
+ 0xfa, 0xe8, 0xde, 0xcc, 0xb2, 0xa0, 0x96, 0x84,
+ 0x6a, 0x78, 0x4e, 0x5c, 0x22, 0x30, 0x06, 0x14,
+ 0xac, 0xbe, 0x88, 0x9a, 0xe4, 0xf6, 0xc0, 0xd2,
+ 0x3c, 0x2e, 0x18, 0x0a, 0x74, 0x66, 0x50, 0x42,
+ 0x9e, 0x8c, 0xba, 0xa8, 0xd6, 0xc4, 0xf2, 0xe0,
+ 0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 0x70,
+ 0x82, 0x90, 0xa6, 0xb4, 0xca, 0xd8, 0xee, 0xfc,
+ 0x12, 0x00, 0x36, 0x24, 0x5a, 0x48, 0x7e, 0x6c,
+ 0xb0, 0xa2, 0x94, 0x86, 0xf8, 0xea, 0xdc, 0xce,
+ 0x20, 0x32, 0x04, 0x16, 0x68, 0x7a, 0x4c, 0x5e,
+ 0xe6, 0xf4, 0xc2, 0xd0, 0xae, 0xbc, 0x8a, 0x98,
+ 0x76, 0x64, 0x52, 0x40, 0x3e, 0x2c, 0x1a, 0x08,
+ 0xd4, 0xc6, 0xf0, 0xe2, 0x9c, 0x8e, 0xb8, 0xaa,
+ 0x44, 0x56, 0x60, 0x72, 0x0c, 0x1e, 0x28, 0x3a,
+ 0x4a, 0x58, 0x6e, 0x7c, 0x02, 0x10, 0x26, 0x34,
+ 0xda, 0xc8, 0xfe, 0xec, 0x92, 0x80, 0xb6, 0xa4,
+ 0x78, 0x6a, 0x5c, 0x4e, 0x30, 0x22, 0x14, 0x06,
+ 0xe8, 0xfa, 0xcc, 0xde, 0xa0, 0xb2, 0x84, 0x96,
+ 0x2e, 0x3c, 0x0a, 0x18, 0x66, 0x74, 0x42, 0x50,
+ 0xbe, 0xac, 0x9a, 0x88, 0xf6, 0xe4, 0xd2, 0xc0,
+ 0x1c, 0x0e, 0x38, 0x2a, 0x54, 0x46, 0x70, 0x62,
+ 0x8c, 0x9e, 0xa8, 0xba, 0xc4, 0xd6, 0xe0, 0xf2
};
-EXPORT_SYMBOL(crc7_syndrome_table);
+EXPORT_SYMBOL(crc7_be_syndrome_table);
/**
* crc7 - update the CRC7 for the data buffer
@@ -55,14 +60,17 @@ EXPORT_SYMBOL(crc7_syndrome_table);
* Context: any
*
* Returns the updated CRC7 value.
+ * The CRC7 is left-aligned in the byte (the lsbit is always 0), as that
+ * makes the computation easier, and all callers want it in that form.
+ *
*/
-u8 crc7(u8 crc, const u8 *buffer, size_t len)
+u8 crc7_be(u8 crc, const u8 *buffer, size_t len)
{
while (len--)
- crc = crc7_byte(crc, *buffer++);
+ crc = crc7_be_byte(crc, *buffer++);
return crc;
}
-EXPORT_SYMBOL(crc7);
+EXPORT_SYMBOL(crc7_be);
MODULE_DESCRIPTION("CRC7 calculations");
MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index e0731c3db706..547f7f923dbc 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -7,6 +7,9 @@
*
* For licencing details see kernel-base/COPYING
*/
+
+#define pr_fmt(fmt) "ODEBUG: " fmt
+
#include <linux/debugobjects.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
@@ -218,7 +221,7 @@ static void debug_objects_oom(void)
unsigned long flags;
int i;
- printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
+ pr_warn("Out of memory. ODEBUG disabled\n");
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
raw_spin_lock_irqsave(&db->lock, flags);
@@ -292,11 +295,9 @@ static void debug_object_is_on_stack(void *addr, int onstack)
limit++;
if (is_on_stack)
- printk(KERN_WARNING
- "ODEBUG: object is on stack, but not annotated\n");
+ pr_warn("object is on stack, but not annotated\n");
else
- printk(KERN_WARNING
- "ODEBUG: object is not on stack, but annotated\n");
+ pr_warn("object is not on stack, but annotated\n");
WARN_ON(1);
}
@@ -985,7 +986,7 @@ static void __init debug_objects_selftest(void)
if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
goto out;
#endif
- printk(KERN_INFO "ODEBUG: selftest passed\n");
+ pr_info("selftest passed\n");
out:
debug_objects_fixups = oldfixups;
@@ -1060,8 +1061,8 @@ static int __init debug_objects_replace_static_objects(void)
}
local_irq_enable();
- printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
- obj_pool_used);
+ pr_debug("%d of %d active objects replaced\n",
+ cnt, obj_pool_used);
return 0;
free:
hlist_for_each_entry_safe(obj, tmp, &objects, node) {
@@ -1090,7 +1091,7 @@ void __init debug_objects_mem_init(void)
debug_objects_enabled = 0;
if (obj_cache)
kmem_cache_destroy(obj_cache);
- printk(KERN_WARNING "ODEBUG: out of memory.\n");
+ pr_warn("out of memory.\n");
} else
debug_objects_selftest();
}
diff --git a/lib/decompress.c b/lib/decompress.c
index 4d1cd0397aab..86069d74c062 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/printk.h>
#ifndef CONFIG_DECOMPRESS_GZIP
# define gunzip NULL
@@ -61,6 +62,8 @@ decompress_fn __init decompress_method(const unsigned char *inbuf, int len,
if (len < 2)
return NULL; /* Need at least this much... */
+ pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]);
+
for (cf = compressed_formats; cf->name; cf++) {
if (!memcmp(inbuf, cf->magic, 2))
break;
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index d619b28c456f..0edfd742a154 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -19,6 +19,7 @@
#include "zlib_inflate/inflate.h"
#include "zlib_inflate/infutil.h"
+#include <linux/decompress/inflate.h>
#endif /* STATIC */
diff --git a/lib/devres.c b/lib/devres.c
index 823533138fa0..f4a195a6efe4 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
void devm_iounmap(struct device *dev, void __iomem *addr)
{
WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
- (void *)addr));
+ (__force void *)addr));
iounmap(addr);
}
EXPORT_SYMBOL(devm_iounmap);
@@ -114,7 +114,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
if (!res || resource_type(res) != IORESOURCE_MEM) {
dev_err(dev, "invalid resource\n");
- return ERR_PTR(-EINVAL);
+ return IOMEM_ERR_PTR(-EINVAL);
}
size = resource_size(res);
@@ -122,7 +122,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
if (!devm_request_mem_region(dev, res->start, size, name)) {
dev_err(dev, "can't request region for resource %pR\n", res);
- return ERR_PTR(-EBUSY);
+ return IOMEM_ERR_PTR(-EBUSY);
}
if (res->flags & IORESOURCE_CACHEABLE)
@@ -133,42 +133,14 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
if (!dest_ptr) {
dev_err(dev, "ioremap failed for resource %pR\n", res);
devm_release_mem_region(dev, res->start, size);
- dest_ptr = ERR_PTR(-ENOMEM);
+ dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
}
return dest_ptr;
}
EXPORT_SYMBOL(devm_ioremap_resource);
-/**
- * devm_request_and_ioremap() - Check, request region, and ioremap resource
- * @dev: Generic device to handle the resource for
- * @res: resource to be handled
- *
- * Takes all necessary steps to ioremap a mem resource. Uses managed device, so
- * everything is undone on driver detach. Checks arguments, so you can feed
- * it the result from e.g. platform_get_resource() directly. Returns the
- * remapped pointer or NULL on error. Usage example:
- *
- * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- * base = devm_request_and_ioremap(&pdev->dev, res);
- * if (!base)
- * return -EADDRNOTAVAIL;
- */
-void __iomem *devm_request_and_ioremap(struct device *device,
- struct resource *res)
-{
- void __iomem *dest_ptr;
-
- dest_ptr = devm_ioremap_resource(device, res);
- if (IS_ERR(dest_ptr))
- return NULL;
-
- return dest_ptr;
-}
-EXPORT_SYMBOL(devm_request_and_ioremap);
-
-#ifdef CONFIG_HAS_IOPORT
+#ifdef CONFIG_HAS_IOPORT_MAP
/*
* Generic iomap devres
*/
@@ -192,7 +164,7 @@ static int devm_ioport_map_match(struct device *dev, void *res,
* Managed ioport_map(). Map is automatically unmapped on driver
* detach.
*/
-void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
+void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
unsigned int nr)
{
void __iomem **ptr, *addr;
@@ -224,10 +196,10 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
{
ioport_unmap(addr);
WARN_ON(devres_destroy(dev, devm_ioport_map_release,
- devm_ioport_map_match, (void *)addr));
+ devm_ioport_map_match, (__force void *)addr));
}
EXPORT_SYMBOL(devm_ioport_unmap);
-#endif /* CONFIG_HAS_IOPORT */
+#endif /* CONFIG_HAS_IOPORT_MAP */
#ifdef CONFIG_PCI
/*
@@ -263,7 +235,7 @@ static void pcim_iomap_release(struct device *gendev, void *res)
* be safely called without context and guaranteed to succed once
* allocated.
*/
-void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
+void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
{
struct pcim_iomap_devres *dr, *new_dr;
@@ -288,7 +260,7 @@ EXPORT_SYMBOL(pcim_iomap_table);
* Managed pci_iomap(). Map is automatically unmapped on driver
* detach.
*/
-void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
{
void __iomem **tbl;
diff --git a/lib/digsig.c b/lib/digsig.c
index 8793aeda30ca..ae05ea393fc8 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -175,10 +175,11 @@ err1:
* digsig_verify() - digital signature verification with public key
* @keyring: keyring to search key in
* @sig: digital signature
- * @sigen: length of the signature
+ * @siglen: length of the signature
* @data: data
* @datalen: length of the data
- * @return: 0 on success, -EINVAL otherwise
+ *
+ * Returns 0 on success, -EINVAL otherwise
*
* Verifies data integrity against digital signature.
* Currently only RSA is supported.
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index f23b63f0a1c3..6745c6230db3 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -23,7 +23,7 @@ static void __dump_stack(void)
#ifdef CONFIG_SMP
static atomic_t dump_lock = ATOMIC_INIT(-1);
-asmlinkage void dump_stack(void)
+asmlinkage __visible void dump_stack(void)
{
int was_locked;
int old;
@@ -55,7 +55,7 @@ retry:
preempt_enable();
}
#else
-asmlinkage void dump_stack(void)
+asmlinkage __visible void dump_stack(void)
{
__dump_stack();
}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 7288e38e1757..c9afbe2c445a 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -614,13 +614,15 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
char buf[PREFIX_SIZE];
res = dev_printk_emit(7, dev->dev.parent,
- "%s%s %s %s: %pV",
+ "%s%s %s %s%s: %pV",
dynamic_emit_prefix(descriptor, buf),
dev_driver_string(dev->dev.parent),
dev_name(dev->dev.parent),
- netdev_name(dev), &vaf);
+ netdev_name(dev), netdev_reg_state(dev),
+ &vaf);
} else if (dev) {
- res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf);
+ res = printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
+ netdev_reg_state(dev), &vaf);
} else {
res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
}
diff --git a/lib/fdt_empty_tree.c b/lib/fdt_empty_tree.c
new file mode 100644
index 000000000000..5d30c58150ad
--- /dev/null
+++ b/lib/fdt_empty_tree.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_empty_tree.c"
diff --git a/lib/idr.c b/lib/idr.c
index 1ba4956bfbff..39158abebad1 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -18,12 +18,6 @@
* pointer or what ever, we treat it as a (void *). You can pass this
* id to a user for him to pass back at a later time. You then pass
* that id to this code and it returns your pointer.
-
- * You can release ids at any time. When all ids are released, most of
- * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
- * don't need to go to the memory "store" during an id allocate, just
- * so you don't need to be too concerned about locking and conflicts
- * with the slab allocator.
*/
#ifndef TEST // to test in user space...
@@ -151,7 +145,7 @@ static void idr_layer_rcu_free(struct rcu_head *head)
static inline void free_layer(struct idr *idr, struct idr_layer *p)
{
- if (idr->hint && idr->hint == p)
+ if (idr->hint == p)
RCU_INIT_POINTER(idr->hint, NULL);
call_rcu(&p->rcu_head, idr_layer_rcu_free);
}
@@ -196,7 +190,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
}
}
-int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
+static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
while (idp->id_free_cnt < MAX_IDR_FREE) {
struct idr_layer *new;
@@ -207,7 +201,6 @@ int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
}
return 1;
}
-EXPORT_SYMBOL(__idr_pre_get);
/**
* sub_alloc - try to allocate an id without growing the tree depth
@@ -250,7 +243,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
/* if already at the top layer, we need to grow */
- if (id >= 1 << (idp->layers * IDR_BITS)) {
+ if (id > idr_max(idp->layers)) {
*starting_id = id;
return -EAGAIN;
}
@@ -374,20 +367,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
idr_mark_full(pa, id);
}
-int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
-{
- struct idr_layer *pa[MAX_IDR_LEVEL + 1];
- int rv;
-
- rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
- if (rv < 0)
- return rv == -ENOMEM ? -EAGAIN : rv;
-
- idr_fill_slot(idp, ptr, rv, pa);
- *id = rv;
- return 0;
-}
-EXPORT_SYMBOL(__idr_get_new_above);
/**
* idr_preload - preload for idr_alloc()
@@ -548,7 +527,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
n = id & IDR_MASK;
if (likely(p != NULL && test_bit(n, p->bitmap))) {
__clear_bit(n, p->bitmap);
- rcu_assign_pointer(p->ary[n], NULL);
+ RCU_INIT_POINTER(p->ary[n], NULL);
to_free = NULL;
while(*paa && ! --((**paa)->count)){
if (to_free)
@@ -577,6 +556,11 @@ void idr_remove(struct idr *idp, int id)
if (id < 0)
return;
+ if (id > idr_max(idp->layers)) {
+ idr_remove_warning(id);
+ return;
+ }
+
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
idp->top->ary[0]) {
@@ -594,20 +578,10 @@ void idr_remove(struct idr *idp, int id)
bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
free_layer(idp, to_free);
}
- while (idp->id_free_cnt >= MAX_IDR_FREE) {
- p = get_from_free_list(idp);
- /*
- * Note: we don't call the rcu callback here, since the only
- * layers that fall into the freelist are those that have been
- * preallocated.
- */
- kmem_cache_free(idr_layer_cache, p);
- }
- return;
}
EXPORT_SYMBOL(idr_remove);
-void __idr_remove_all(struct idr *idp)
+static void __idr_remove_all(struct idr *idp)
{
int n, id, max;
int bt_mask;
@@ -617,7 +591,7 @@ void __idr_remove_all(struct idr *idp)
n = idp->layers * IDR_BITS;
p = idp->top;
- rcu_assign_pointer(idp->top, NULL);
+ RCU_INIT_POINTER(idp->top, NULL);
max = idr_max(idp->layers);
id = 0;
@@ -640,7 +614,6 @@ void __idr_remove_all(struct idr *idp)
}
idp->layers = 0;
}
-EXPORT_SYMBOL(__idr_remove_all);
/**
* idr_destroy - release all cached layers within an idr tree
@@ -825,14 +798,12 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
p = idp->top;
if (!p)
- return ERR_PTR(-EINVAL);
-
- n = (p->layer+1) * IDR_BITS;
+ return ERR_PTR(-ENOENT);
- if (id >= (1 << n))
- return ERR_PTR(-EINVAL);
+ if (id > idr_max(p->layer + 1))
+ return ERR_PTR(-ENOENT);
- n -= IDR_BITS;
+ n = p->layer * IDR_BITS;
while ((n > 0) && p) {
p = p->ary[(id >> n) & IDR_MASK];
n -= IDR_BITS;
@@ -1043,6 +1014,9 @@ void ida_remove(struct ida *ida, int id)
int n;
struct ida_bitmap *bitmap;
+ if (idr_id > idr_max(ida->idr.layers))
+ goto err;
+
/* clear full bits while looking up the leaf idr_layer */
while ((shift > 0) && p) {
n = (idr_id >> shift) & IDR_MASK;
@@ -1058,7 +1032,7 @@ void ida_remove(struct ida *ida, int id)
__clear_bit(n, p->bitmap);
bitmap = (void *)p->ary[n];
- if (!test_bit(offset, bitmap->bitmap))
+ if (!bitmap || !test_bit(offset, bitmap->bitmap))
goto err;
/* update bitmap and remove it if empty */
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
index e6eb406f2d65..f367f9ad544c 100644
--- a/lib/interval_tree.c
+++ b/lib/interval_tree.c
@@ -1,6 +1,7 @@
#include <linux/init.h>
#include <linux/interval_tree.h>
#include <linux/interval_tree_generic.h>
+#include <linux/module.h>
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)
@@ -8,3 +9,8 @@
INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
unsigned long, __subtree_last,
START, LAST,, interval_tree)
+
+EXPORT_SYMBOL_GPL(interval_tree_insert);
+EXPORT_SYMBOL_GPL(interval_tree_remove);
+EXPORT_SYMBOL_GPL(interval_tree_iter_first);
+EXPORT_SYMBOL_GPL(interval_tree_iter_next);
diff --git a/lib/interval_tree_test_main.c b/lib/interval_tree_test.c
index 245900b98c8e..245900b98c8e 100644
--- a/lib/interval_tree_test_main.c
+++ b/lib/interval_tree_test.c
diff --git a/lib/iomap.c b/lib/iomap.c
index 2c08f36862eb..fc3dcb4b238e 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -224,7 +224,7 @@ EXPORT_SYMBOL(iowrite8_rep);
EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(iowrite32_rep);
-#ifdef CONFIG_HAS_IOPORT
+#ifdef CONFIG_HAS_IOPORT_MAP
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
@@ -239,7 +239,7 @@ void ioport_unmap(void __iomem *addr)
}
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
-#endif /* CONFIG_HAS_IOPORT */
+#endif /* CONFIG_HAS_IOPORT_MAP */
#ifdef CONFIG_PCI
/* Hide the details if this is a MMIO or PIO address space and just do what
diff --git a/lib/iovec.c b/lib/iovec.c
index 454baa88bf27..df3abd1eaa4a 100644
--- a/lib/iovec.c
+++ b/lib/iovec.c
@@ -51,3 +51,62 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
return 0;
}
EXPORT_SYMBOL(memcpy_toiovec);
+
+/*
+ * Copy kernel to iovec. Returns -EFAULT on error.
+ */
+
+int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
+ int offset, int len)
+{
+ int copy;
+ for (; len > 0; ++iov) {
+ /* Skip over the finished iovecs */
+ if (unlikely(offset >= iov->iov_len)) {
+ offset -= iov->iov_len;
+ continue;
+ }
+ copy = min_t(unsigned int, iov->iov_len - offset, len);
+ if (copy_to_user(iov->iov_base + offset, kdata, copy))
+ return -EFAULT;
+ offset = 0;
+ kdata += copy;
+ len -= copy;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(memcpy_toiovecend);
+
+/*
+ * Copy iovec to kernel. Returns -EFAULT on error.
+ */
+
+int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
+ int offset, int len)
+{
+ /* No data? Done! */
+ if (len == 0)
+ return 0;
+
+ /* Skip over the finished iovecs */
+ while (offset >= iov->iov_len) {
+ offset -= iov->iov_len;
+ iov++;
+ }
+
+ while (len > 0) {
+ u8 __user *base = iov->iov_base + offset;
+ int copy = min_t(unsigned int, len, iov->iov_len - offset);
+
+ offset = 0;
+ if (copy_from_user(kdata, base, copy))
+ return -EFAULT;
+ len -= copy;
+ kdata += copy;
+ iov++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(memcpy_fromiovecend);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 5f72767ddd9b..9ebf9e20de53 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -29,7 +29,9 @@
u64 uevent_seqnum;
+#ifdef CONFIG_UEVENT_HELPER
char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
+#endif
#ifdef CONFIG_NET
struct uevent_sock {
struct list_head list;
@@ -109,6 +111,7 @@ static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
}
#endif
+#ifdef CONFIG_UEVENT_HELPER
static int kobj_usermode_filter(struct kobject *kobj)
{
const struct kobj_ns_type_operations *ops;
@@ -124,6 +127,31 @@ static int kobj_usermode_filter(struct kobject *kobj)
return 0;
}
+static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
+{
+ int len;
+
+ len = strlcpy(&env->buf[env->buflen], subsystem,
+ sizeof(env->buf) - env->buflen);
+ if (len >= (sizeof(env->buf) - env->buflen)) {
+ WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
+ return -ENOMEM;
+ }
+
+ env->argv[0] = uevent_helper;
+ env->argv[1] = &env->buf[env->buflen];
+ env->argv[2] = NULL;
+
+ env->buflen += len + 1;
+ return 0;
+}
+
+static void cleanup_uevent_env(struct subprocess_info *info)
+{
+ kfree(info->data);
+}
+#endif
+
/**
* kobject_uevent_env - send an uevent with environmental data
*
@@ -299,13 +327,11 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
#endif
mutex_unlock(&uevent_sock_mutex);
+#ifdef CONFIG_UEVENT_HELPER
/* call uevent_helper, usually only enabled during early boot */
if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
- char *argv [3];
+ struct subprocess_info *info;
- argv [0] = uevent_helper;
- argv [1] = (char *)subsystem;
- argv [2] = NULL;
retval = add_uevent_var(env, "HOME=/");
if (retval)
goto exit;
@@ -313,10 +339,20 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
"PATH=/sbin:/bin:/usr/sbin:/usr/bin");
if (retval)
goto exit;
+ retval = init_uevent_argv(env, subsystem);
+ if (retval)
+ goto exit;
- retval = call_usermodehelper(argv[0], argv,
- env->envp, UMH_WAIT_EXEC);
+ retval = -ENOMEM;
+ info = call_usermodehelper_setup(env->argv[0], env->argv,
+ env->envp, GFP_KERNEL,
+ NULL, cleanup_uevent_env, env);
+ if (info) {
+ retval = call_usermodehelper_exec(info, UMH_NO_WAIT);
+ env = NULL; /* freed by cleanup_uevent_env */
+ }
}
+#endif
exit:
kfree(devpath);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 244f5480c898..b3131f5cf8a2 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -62,10 +62,7 @@ EXPORT_SYMBOL(crc32c);
static int __init libcrc32c_mod_init(void)
{
tfm = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- return 0;
+ return PTR_ERR_OR_ZERO(tfm);
}
static void __exit libcrc32c_mod_fini(void)
diff --git a/lib/lockref.c b/lib/lockref.c
index f07a40d33871..d2233de9a86e 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -1,6 +1,5 @@
#include <linux/export.h>
#include <linux/lockref.h>
-#include <linux/mutex.h>
#if USE_CMPXCHG_LOCKREF
@@ -29,7 +28,7 @@
if (likely(old.lock_count == prev.lock_count)) { \
SUCCESS; \
} \
- arch_mutex_cpu_relax(); \
+ cpu_relax_lowlatency(); \
} \
} while (0)
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index df6839e3ce08..7a85967060a5 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
len = *ip++;
for (; len == 255; length += 255)
len = *ip++;
+ if (unlikely(length > (size_t)(length + len)))
+ goto _output_error;
length += len;
}
@@ -106,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
if (length == ML_MASK) {
for (; *ip == 255; length += 255)
ip++;
+ if (unlikely(length > (size_t)(length + *ip)))
+ goto _output_error;
length += *ip++;
}
@@ -155,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
/* write overflow error detected */
_output_error:
- return (int) (-(((char *)ip) - source));
+ return -1;
}
static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
@@ -188,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
int s = 255;
while ((ip < iend) && (s == 255)) {
s = *ip++;
+ if (unlikely(length > (size_t)(length + s)))
+ goto _output_error;
length += s;
}
}
@@ -228,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
if (length == ML_MASK) {
while (ip < iend) {
int s = *ip++;
+ if (unlikely(length > (size_t)(length + s)))
+ goto _output_error;
length += s;
if (s == 255)
continue;
@@ -280,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
/* write overflow error detected */
_output_error:
- return (int) (-(((char *) ip) - source));
+ return -1;
}
int lz4_decompress(const unsigned char *src, size_t *src_len,
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 569985d522d5..8563081e8da3 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -19,11 +19,31 @@
#include <linux/lzo.h>
#include "lzodefs.h"
-#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
-#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
-#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
-#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
-#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
+#define HAVE_IP(t, x) \
+ (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
+ (((t + x) >= t) && ((t + x) >= x)))
+
+#define HAVE_OP(t, x) \
+ (((size_t)(op_end - op) >= (size_t)(t + x)) && \
+ (((t + x) >= t) && ((t + x) >= x)))
+
+#define NEED_IP(t, x) \
+ do { \
+ if (!HAVE_IP(t, x)) \
+ goto input_overrun; \
+ } while (0)
+
+#define NEED_OP(t, x) \
+ do { \
+ if (!HAVE_OP(t, x)) \
+ goto output_overrun; \
+ } while (0)
+
+#define TEST_LB(m_pos) \
+ do { \
+ if ((m_pos) < out) \
+ goto lookbehind_overrun; \
+ } while (0)
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len)
@@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
while (unlikely(*ip == 0)) {
t += 255;
ip++;
- NEED_IP(1);
+ NEED_IP(1, 0);
}
t += 15 + *ip++;
}
t += 3;
copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+ if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
const unsigned char *ie = ip + t;
unsigned char *oe = op + t;
do {
@@ -81,8 +101,8 @@ copy_literal_run:
} else
#endif
{
- NEED_OP(t);
- NEED_IP(t + 3);
+ NEED_OP(t, 0);
+ NEED_IP(t, 3);
do {
*op++ = *ip++;
} while (--t > 0);
@@ -95,7 +115,7 @@ copy_literal_run:
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
TEST_LB(m_pos);
- NEED_OP(2);
+ NEED_OP(2, 0);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -119,10 +139,10 @@ copy_literal_run:
while (unlikely(*ip == 0)) {
t += 255;
ip++;
- NEED_IP(1);
+ NEED_IP(1, 0);
}
t += 31 + *ip++;
- NEED_IP(2);
+ NEED_IP(2, 0);
}
m_pos = op - 1;
next = get_unaligned_le16(ip);
@@ -137,10 +157,10 @@ copy_literal_run:
while (unlikely(*ip == 0)) {
t += 255;
ip++;
- NEED_IP(1);
+ NEED_IP(1, 0);
}
t += 7 + *ip++;
- NEED_IP(2);
+ NEED_IP(2, 0);
}
next = get_unaligned_le16(ip);
ip += 2;
@@ -154,7 +174,7 @@ copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (op - m_pos >= 8) {
unsigned char *oe = op + t;
- if (likely(HAVE_OP(t + 15))) {
+ if (likely(HAVE_OP(t, 15))) {
do {
COPY8(op, m_pos);
op += 8;
@@ -164,7 +184,7 @@ copy_literal_run:
m_pos += 8;
} while (op < oe);
op = oe;
- if (HAVE_IP(6)) {
+ if (HAVE_IP(6, 0)) {
state = next;
COPY4(op, ip);
op += next;
@@ -172,7 +192,7 @@ copy_literal_run:
continue;
}
} else {
- NEED_OP(t);
+ NEED_OP(t, 0);
do {
*op++ = *m_pos++;
} while (op < oe);
@@ -181,7 +201,7 @@ copy_literal_run:
#endif
{
unsigned char *oe = op + t;
- NEED_OP(t);
+ NEED_OP(t, 0);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -194,15 +214,15 @@ match_next:
state = next;
t = next;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+ if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
COPY4(op, ip);
op += t;
ip += t;
} else
#endif
{
- NEED_IP(t + 3);
- NEED_OP(t);
+ NEED_IP(t, 3);
+ NEED_OP(t, 0);
while (t > 0) {
*op++ = *ip++;
t--;
diff --git a/lib/net_utils.c b/lib/net_utils.c
index 2e3c52c8d050..148fc6e99ef6 100644
--- a/lib/net_utils.c
+++ b/lib/net_utils.c
@@ -3,24 +3,24 @@
#include <linux/ctype.h>
#include <linux/kernel.h>
-int mac_pton(const char *s, u8 *mac)
+bool mac_pton(const char *s, u8 *mac)
{
int i;
/* XX:XX:XX:XX:XX:XX */
if (strlen(s) < 3 * ETH_ALEN - 1)
- return 0;
+ return false;
/* Don't dirty result unless string is valid MAC. */
for (i = 0; i < ETH_ALEN; i++) {
if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1]))
- return 0;
+ return false;
if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
- return 0;
+ return false;
}
for (i = 0; i < ETH_ALEN; i++) {
mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]);
}
- return 1;
+ return true;
}
EXPORT_SYMBOL(mac_pton);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 18eca7809b08..9c3e85ff0a6c 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -136,6 +136,7 @@ int nla_validate(const struct nlattr *head, int len, int maxtype,
errout:
return err;
}
+EXPORT_SYMBOL(nla_validate);
/**
* nla_policy_len - Determin the max. length of a policy
@@ -162,6 +163,7 @@ nla_policy_len(const struct nla_policy *p, int n)
return len;
}
+EXPORT_SYMBOL(nla_policy_len);
/**
* nla_parse - Parse a stream of attributes into a tb buffer
@@ -201,13 +203,14 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
}
if (unlikely(rem > 0))
- printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
- "attributes.\n", rem);
+ pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
+ rem, current->comm);
err = 0;
errout:
return err;
}
+EXPORT_SYMBOL(nla_parse);
/**
* nla_find - Find a specific attribute in a stream of attributes
@@ -228,6 +231,7 @@ struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
return NULL;
}
+EXPORT_SYMBOL(nla_find);
/**
* nla_strlcpy - Copy string attribute payload into a sized buffer
@@ -258,6 +262,7 @@ size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
return srclen;
}
+EXPORT_SYMBOL(nla_strlcpy);
/**
* nla_memcpy - Copy a netlink attribute into another memory area
@@ -278,6 +283,7 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
return minlen;
}
+EXPORT_SYMBOL(nla_memcpy);
/**
* nla_memcmp - Compare an attribute with sized memory area
@@ -295,6 +301,7 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
return d;
}
+EXPORT_SYMBOL(nla_memcmp);
/**
* nla_strcmp - Compare a string attribute against a string
@@ -303,14 +310,21 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
*/
int nla_strcmp(const struct nlattr *nla, const char *str)
{
- int len = strlen(str) + 1;
- int d = nla_len(nla) - len;
+ int len = strlen(str);
+ char *buf = nla_data(nla);
+ int attrlen = nla_len(nla);
+ int d;
+ if (attrlen > 0 && buf[attrlen - 1] == '\0')
+ attrlen--;
+
+ d = attrlen - len;
if (d == 0)
d = memcmp(nla_data(nla), str, len);
return d;
}
+EXPORT_SYMBOL(nla_strcmp);
#ifdef CONFIG_NET
/**
@@ -496,12 +510,3 @@ int nla_append(struct sk_buff *skb, int attrlen, const void *data)
}
EXPORT_SYMBOL(nla_append);
#endif
-
-EXPORT_SYMBOL(nla_validate);
-EXPORT_SYMBOL(nla_policy_len);
-EXPORT_SYMBOL(nla_parse);
-EXPORT_SYMBOL(nla_find);
-EXPORT_SYMBOL(nla_strlcpy);
-EXPORT_SYMBOL(nla_memcpy);
-EXPORT_SYMBOL(nla_memcmp);
-EXPORT_SYMBOL(nla_strcmp);
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 963b7034a51b..fe5a3342e960 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -31,6 +31,11 @@
#define PCPU_COUNT_BIAS (1U << 31)
+static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
+{
+ return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+}
+
/**
* percpu_ref_init - initialize a percpu refcount
* @ref: percpu_ref to initialize
@@ -46,8 +51,8 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
{
atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
- ref->pcpu_count = alloc_percpu(unsigned);
- if (!ref->pcpu_count)
+ ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
+ if (!ref->pcpu_count_ptr)
return -ENOMEM;
ref->release = release;
@@ -56,53 +61,71 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
EXPORT_SYMBOL_GPL(percpu_ref_init);
/**
- * percpu_ref_cancel_init - cancel percpu_ref_init()
- * @ref: percpu_ref to cancel init for
+ * percpu_ref_reinit - re-initialize a percpu refcount
+ * @ref: perpcu_ref to re-initialize
*
- * Once a percpu_ref is initialized, its destruction is initiated by
- * percpu_ref_kill() and completes asynchronously, which can be painful to
- * do when destroying a half-constructed object in init failure path.
+ * Re-initialize @ref so that it's in the same state as when it finished
+ * percpu_ref_init(). @ref must have been initialized successfully, killed
+ * and reached 0 but not exited.
*
- * This function destroys @ref without invoking @ref->release and the
- * memory area containing it can be freed immediately on return. To
- * prevent accidental misuse, it's required that @ref has finished
- * percpu_ref_init(), whether successful or not, but never used.
- *
- * The weird name and usage restriction are to prevent people from using
- * this function by mistake for normal shutdown instead of
- * percpu_ref_kill().
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
*/
-void percpu_ref_cancel_init(struct percpu_ref *ref)
+void percpu_ref_reinit(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count = ref->pcpu_count;
+ unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
int cpu;
- WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS);
+ BUG_ON(!pcpu_count);
+ WARN_ON(!percpu_ref_is_zero(ref));
+
+ atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+
+ /*
+ * Restore per-cpu operation. smp_store_release() is paired with
+ * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
+ * that the zeroing is visible to all percpu accesses which can see
+ * the following PCPU_REF_DEAD clearing.
+ */
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(pcpu_count, cpu) = 0;
+
+ smp_store_release(&ref->pcpu_count_ptr,
+ ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+
+/**
+ * percpu_ref_exit - undo percpu_ref_init()
+ * @ref: percpu_ref to exit
+ *
+ * This function exits @ref. The caller is responsible for ensuring that
+ * @ref is no longer in active use. The usual places to invoke this
+ * function from are the @ref->release() callback or in init failure path
+ * where percpu_ref_init() succeeded but other parts of the initialization
+ * of the embedding object failed.
+ */
+void percpu_ref_exit(struct percpu_ref *ref)
+{
+ unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
if (pcpu_count) {
- for_each_possible_cpu(cpu)
- WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu));
- free_percpu(ref->pcpu_count);
+ free_percpu(pcpu_count);
+ ref->pcpu_count_ptr = PCPU_REF_DEAD;
}
}
-EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);
+EXPORT_SYMBOL_GPL(percpu_ref_exit);
static void percpu_ref_kill_rcu(struct rcu_head *rcu)
{
struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
- unsigned __percpu *pcpu_count = ref->pcpu_count;
+ unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
unsigned count = 0;
int cpu;
- /* Mask out PCPU_REF_DEAD */
- pcpu_count = (unsigned __percpu *)
- (((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK);
-
for_each_possible_cpu(cpu)
count += *per_cpu_ptr(pcpu_count, cpu);
- free_percpu(pcpu_count);
-
pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
/*
@@ -152,11 +175,10 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
- WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD,
+ WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
"percpu_ref_kill() called more than once!\n");
- ref->pcpu_count = (unsigned __percpu *)
- (((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD);
+ ref->pcpu_count_ptr |= PCPU_REF_DEAD;
ref->confirm_kill = confirm_kill;
call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 8280a5dd1727..7dd33577b905 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -169,7 +169,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
struct percpu_counter *fbc;
compute_batch_value();
- if (action != CPU_DEAD)
+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
return NOTIFY_OK;
cpu = (unsigned long)hcpu;
diff --git a/lib/plist.c b/lib/plist.c
index 1ebc95f7a46f..d408e774b746 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -134,6 +134,46 @@ void plist_del(struct plist_node *node, struct plist_head *head)
plist_check_head(head);
}
+/**
+ * plist_requeue - Requeue @node at end of same-prio entries.
+ *
+ * This is essentially an optimized plist_del() followed by
+ * plist_add(). It moves an entry already in the plist to
+ * after any other same-priority entries.
+ *
+ * @node: &struct plist_node pointer - entry to be moved
+ * @head: &struct plist_head pointer - list head
+ */
+void plist_requeue(struct plist_node *node, struct plist_head *head)
+{
+ struct plist_node *iter;
+ struct list_head *node_next = &head->node_list;
+
+ plist_check_head(head);
+ BUG_ON(plist_head_empty(head));
+ BUG_ON(plist_node_empty(node));
+
+ if (node == plist_last(head))
+ return;
+
+ iter = plist_next(node);
+
+ if (node->prio != iter->prio)
+ return;
+
+ plist_del(node, head);
+
+ plist_for_each_continue(iter, head) {
+ if (node->prio != iter->prio) {
+ node_next = &iter->node_list;
+ break;
+ }
+ }
+ list_add_tail(&node->node_list, node_next);
+
+ plist_check_head(head);
+}
+
#ifdef CONFIG_DEBUG_PI_LIST
#include <linux/sched.h>
#include <linux/module.h>
@@ -170,12 +210,20 @@ static void __init plist_test_check(int nr_expect)
BUG_ON(prio_pos->prio_list.next != &first->prio_list);
}
+static void __init plist_test_requeue(struct plist_node *node)
+{
+ plist_requeue(node, &test_head);
+
+ if (node != plist_last(&test_head))
+ BUG_ON(node->prio == plist_next(node)->prio);
+}
+
static int __init plist_test(void)
{
int nr_expect = 0, i, loop;
unsigned int r = local_clock();
- pr_debug("start plist test\n");
+ printk(KERN_DEBUG "start plist test\n");
plist_head_init(&test_head);
for (i = 0; i < ARRAY_SIZE(test_node); i++)
plist_node_init(test_node + i, 0);
@@ -193,6 +241,10 @@ static int __init plist_test(void)
nr_expect--;
}
plist_test_check(nr_expect);
+ if (!plist_node_empty(test_node + i)) {
+ plist_test_requeue(test_node + i);
+ plist_test_check(nr_expect);
+ }
}
for (i = 0; i < ARRAY_SIZE(test_node); i++) {
@@ -203,7 +255,7 @@ static int __init plist_test(void)
plist_test_check(nr_expect);
}
- pr_debug("end plist test\n");
+ printk(KERN_DEBUG "end plist test\n");
return 0;
}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index bd4a8dfdf0b8..3291a8e37490 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -27,6 +27,7 @@
#include <linux/radix-tree.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/kmemleak.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/string.h>
@@ -35,33 +36,6 @@
#include <linux/hardirq.h> /* in_interrupt() */
-#ifdef __KERNEL__
-#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
-#else
-#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
-#endif
-
-#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
-#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
-
-#define RADIX_TREE_TAG_LONGS \
- ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
-
-struct radix_tree_node {
- unsigned int height; /* Height from the bottom */
- unsigned int count;
- union {
- struct radix_tree_node *parent; /* Used when ascending tree */
- struct rcu_head rcu_head; /* Used when freeing node */
- };
- void __rcu *slots[RADIX_TREE_MAP_SIZE];
- unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
-};
-
-#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
-#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
- RADIX_TREE_MAP_SHIFT))
-
/*
* The height_to_maxindex array needs to be one deeper than the maximum
* path as height 0 holds only 1 entry.
@@ -221,12 +195,17 @@ radix_tree_node_alloc(struct radix_tree_root *root)
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
- rtp = &__get_cpu_var(radix_tree_preloads);
+ rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr) {
ret = rtp->nodes[rtp->nr - 1];
rtp->nodes[rtp->nr - 1] = NULL;
rtp->nr--;
}
+ /*
+ * Update the allocation stack trace as this is more useful
+ * for debugging.
+ */
+ kmemleak_update_trace(ret);
}
if (ret == NULL)
ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
@@ -277,14 +256,14 @@ static int __radix_tree_preload(gfp_t gfp_mask)
int ret = -ENOMEM;
preempt_disable();
- rtp = &__get_cpu_var(radix_tree_preloads);
+ rtp = this_cpu_ptr(&radix_tree_preloads);
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
preempt_enable();
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
preempt_disable();
- rtp = &__get_cpu_var(radix_tree_preloads);
+ rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < ARRAY_SIZE(rtp->nodes))
rtp->nodes[rtp->nr++] = node;
else
@@ -369,7 +348,8 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
/* Increase the height. */
newheight = root->height+1;
- node->height = newheight;
+ BUG_ON(newheight & ~RADIX_TREE_HEIGHT_MASK);
+ node->path = newheight;
node->count = 1;
node->parent = NULL;
slot = root->rnode;
@@ -387,23 +367,28 @@ out:
}
/**
- * radix_tree_insert - insert into a radix tree
+ * __radix_tree_create - create a slot in a radix tree
* @root: radix tree root
* @index: index key
- * @item: item to insert
+ * @nodep: returns node
+ * @slotp: returns slot
*
- * Insert an item into the radix tree at position @index.
+ * Create, if necessary, and return the node and slot for an item
+ * at position @index in the radix tree @root.
+ *
+ * Until there is more than one item in the tree, no nodes are
+ * allocated and @root->rnode is used as a direct slot instead of
+ * pointing to a node, in which case *@nodep will be NULL.
+ *
+ * Returns -ENOMEM, or 0 for success.
*/
-int radix_tree_insert(struct radix_tree_root *root,
- unsigned long index, void *item)
+int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
+ struct radix_tree_node **nodep, void ***slotp)
{
struct radix_tree_node *node = NULL, *slot;
- unsigned int height, shift;
- int offset;
+ unsigned int height, shift, offset;
int error;
- BUG_ON(radix_tree_is_indirect_ptr(item));
-
/* Make sure the tree is high enough. */
if (index > radix_tree_maxindex(root->height)) {
error = radix_tree_extend(root, index);
@@ -422,11 +407,12 @@ int radix_tree_insert(struct radix_tree_root *root,
/* Have to add a child node. */
if (!(slot = radix_tree_node_alloc(root)))
return -ENOMEM;
- slot->height = height;
+ slot->path = height;
slot->parent = node;
if (node) {
rcu_assign_pointer(node->slots[offset], slot);
node->count++;
+ slot->path |= offset << RADIX_TREE_HEIGHT_SHIFT;
} else
rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
}
@@ -439,16 +425,42 @@ int radix_tree_insert(struct radix_tree_root *root,
height--;
}
- if (slot != NULL)
+ if (nodep)
+ *nodep = node;
+ if (slotp)
+ *slotp = node ? node->slots + offset : (void **)&root->rnode;
+ return 0;
+}
+
+/**
+ * radix_tree_insert - insert into a radix tree
+ * @root: radix tree root
+ * @index: index key
+ * @item: item to insert
+ *
+ * Insert an item into the radix tree at position @index.
+ */
+int radix_tree_insert(struct radix_tree_root *root,
+ unsigned long index, void *item)
+{
+ struct radix_tree_node *node;
+ void **slot;
+ int error;
+
+ BUG_ON(radix_tree_is_indirect_ptr(item));
+
+ error = __radix_tree_create(root, index, &node, &slot);
+ if (error)
+ return error;
+ if (*slot != NULL)
return -EEXIST;
+ rcu_assign_pointer(*slot, item);
if (node) {
node->count++;
- rcu_assign_pointer(node->slots[offset], item);
- BUG_ON(tag_get(node, 0, offset));
- BUG_ON(tag_get(node, 1, offset));
+ BUG_ON(tag_get(node, 0, index & RADIX_TREE_MAP_MASK));
+ BUG_ON(tag_get(node, 1, index & RADIX_TREE_MAP_MASK));
} else {
- rcu_assign_pointer(root->rnode, item);
BUG_ON(root_tag_get(root, 0));
BUG_ON(root_tag_get(root, 1));
}
@@ -457,15 +469,26 @@ int radix_tree_insert(struct radix_tree_root *root,
}
EXPORT_SYMBOL(radix_tree_insert);
-/*
- * is_slot == 1 : search for the slot.
- * is_slot == 0 : search for the node.
+/**
+ * __radix_tree_lookup - lookup an item in a radix tree
+ * @root: radix tree root
+ * @index: index key
+ * @nodep: returns node
+ * @slotp: returns slot
+ *
+ * Lookup and return the item at position @index in the radix
+ * tree @root.
+ *
+ * Until there is more than one item in the tree, no nodes are
+ * allocated and @root->rnode is used as a direct slot instead of
+ * pointing to a node, in which case *@nodep will be NULL.
*/
-static void *radix_tree_lookup_element(struct radix_tree_root *root,
- unsigned long index, int is_slot)
+void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
+ struct radix_tree_node **nodep, void ***slotp)
{
+ struct radix_tree_node *node, *parent;
unsigned int height, shift;
- struct radix_tree_node *node, **slot;
+ void **slot;
node = rcu_dereference_raw(root->rnode);
if (node == NULL)
@@ -474,19 +497,24 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
if (!radix_tree_is_indirect_ptr(node)) {
if (index > 0)
return NULL;
- return is_slot ? (void *)&root->rnode : node;
+
+ if (nodep)
+ *nodep = NULL;
+ if (slotp)
+ *slotp = (void **)&root->rnode;
+ return node;
}
node = indirect_to_ptr(node);
- height = node->height;
+ height = node->path & RADIX_TREE_HEIGHT_MASK;
if (index > radix_tree_maxindex(height))
return NULL;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
do {
- slot = (struct radix_tree_node **)
- (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
+ parent = node;
+ slot = node->slots + ((index >> shift) & RADIX_TREE_MAP_MASK);
node = rcu_dereference_raw(*slot);
if (node == NULL)
return NULL;
@@ -495,7 +523,11 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
height--;
} while (height > 0);
- return is_slot ? (void *)slot : indirect_to_ptr(node);
+ if (nodep)
+ *nodep = parent;
+ if (slotp)
+ *slotp = slot;
+ return node;
}
/**
@@ -513,7 +545,11 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
*/
void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
{
- return (void **)radix_tree_lookup_element(root, index, 1);
+ void **slot;
+
+ if (!__radix_tree_lookup(root, index, NULL, &slot))
+ return NULL;
+ return slot;
}
EXPORT_SYMBOL(radix_tree_lookup_slot);
@@ -531,7 +567,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
*/
void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
{
- return radix_tree_lookup_element(root, index, 0);
+ return __radix_tree_lookup(root, index, NULL, NULL);
}
EXPORT_SYMBOL(radix_tree_lookup);
@@ -676,7 +712,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
return (index == 0);
node = indirect_to_ptr(node);
- height = node->height;
+ height = node->path & RADIX_TREE_HEIGHT_MASK;
if (index > radix_tree_maxindex(height))
return 0;
@@ -713,7 +749,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
{
unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK;
struct radix_tree_node *rnode, *node;
- unsigned long index, offset;
+ unsigned long index, offset, height;
if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
return NULL;
@@ -744,7 +780,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
return NULL;
restart:
- shift = (rnode->height - 1) * RADIX_TREE_MAP_SHIFT;
+ height = rnode->path & RADIX_TREE_HEIGHT_MASK;
+ shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
offset = index >> shift;
/* Index outside of the tree */
@@ -946,81 +983,6 @@ next:
}
EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
-
-/**
- * radix_tree_next_hole - find the next hole (not-present entry)
- * @root: tree root
- * @index: index key
- * @max_scan: maximum range to search
- *
- * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest
- * indexed hole.
- *
- * Returns: the index of the hole if found, otherwise returns an index
- * outside of the set specified (in which case 'return - index >= max_scan'
- * will be true). In rare cases of index wrap-around, 0 will be returned.
- *
- * radix_tree_next_hole may be called under rcu_read_lock. However, like
- * radix_tree_gang_lookup, this will not atomically search a snapshot of
- * the tree at a single point in time. For example, if a hole is created
- * at index 5, then subsequently a hole is created at index 10,
- * radix_tree_next_hole covering both indexes may return 10 if called
- * under rcu_read_lock.
- */
-unsigned long radix_tree_next_hole(struct radix_tree_root *root,
- unsigned long index, unsigned long max_scan)
-{
- unsigned long i;
-
- for (i = 0; i < max_scan; i++) {
- if (!radix_tree_lookup(root, index))
- break;
- index++;
- if (index == 0)
- break;
- }
-
- return index;
-}
-EXPORT_SYMBOL(radix_tree_next_hole);
-
-/**
- * radix_tree_prev_hole - find the prev hole (not-present entry)
- * @root: tree root
- * @index: index key
- * @max_scan: maximum range to search
- *
- * Search backwards in the range [max(index-max_scan+1, 0), index]
- * for the first hole.
- *
- * Returns: the index of the hole if found, otherwise returns an index
- * outside of the set specified (in which case 'index - return >= max_scan'
- * will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
- *
- * radix_tree_next_hole may be called under rcu_read_lock. However, like
- * radix_tree_gang_lookup, this will not atomically search a snapshot of
- * the tree at a single point in time. For example, if a hole is created
- * at index 10, then subsequently a hole is created at index 5,
- * radix_tree_prev_hole covering both indexes may return 5 if called under
- * rcu_read_lock.
- */
-unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
- unsigned long index, unsigned long max_scan)
-{
- unsigned long i;
-
- for (i = 0; i < max_scan; i++) {
- if (!radix_tree_lookup(root, index))
- break;
- index--;
- if (index == ULONG_MAX)
- break;
- }
-
- return index;
-}
-EXPORT_SYMBOL(radix_tree_prev_hole);
-
/**
* radix_tree_gang_lookup - perform multiple lookup on a radix tree
* @root: radix tree root
@@ -1189,7 +1151,7 @@ static unsigned long __locate(struct radix_tree_node *slot, void *item,
unsigned int shift, height;
unsigned long i;
- height = slot->height;
+ height = slot->path & RADIX_TREE_HEIGHT_MASK;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
for ( ; height > 1; height--) {
@@ -1252,7 +1214,8 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
}
node = indirect_to_ptr(node);
- max_index = radix_tree_maxindex(node->height);
+ max_index = radix_tree_maxindex(node->path &
+ RADIX_TREE_HEIGHT_MASK);
if (cur_index > max_index) {
rcu_read_unlock();
break;
@@ -1337,48 +1300,89 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
}
/**
- * radix_tree_delete - delete an item from a radix tree
+ * __radix_tree_delete_node - try to free node after clearing a slot
+ * @root: radix tree root
+ * @node: node containing @index
+ *
+ * After clearing the slot at @index in @node from radix tree
+ * rooted at @root, call this function to attempt freeing the
+ * node and shrinking the tree.
+ *
+ * Returns %true if @node was freed, %false otherwise.
+ */
+bool __radix_tree_delete_node(struct radix_tree_root *root,
+ struct radix_tree_node *node)
+{
+ bool deleted = false;
+
+ do {
+ struct radix_tree_node *parent;
+
+ if (node->count) {
+ if (node == indirect_to_ptr(root->rnode)) {
+ radix_tree_shrink(root);
+ if (root->height == 0)
+ deleted = true;
+ }
+ return deleted;
+ }
+
+ parent = node->parent;
+ if (parent) {
+ unsigned int offset;
+
+ offset = node->path >> RADIX_TREE_HEIGHT_SHIFT;
+ parent->slots[offset] = NULL;
+ parent->count--;
+ } else {
+ root_tag_clear_all(root);
+ root->height = 0;
+ root->rnode = NULL;
+ }
+
+ radix_tree_node_free(node);
+ deleted = true;
+
+ node = parent;
+ } while (node);
+
+ return deleted;
+}
+
+/**
+ * radix_tree_delete_item - delete an item from a radix tree
* @root: radix tree root
* @index: index key
+ * @item: expected item
*
- * Remove the item at @index from the radix tree rooted at @root.
+ * Remove @item at @index from the radix tree rooted at @root.
*
- * Returns the address of the deleted item, or NULL if it was not present.
+ * Returns the address of the deleted item, or NULL if it was not present
+ * or the entry at the given @index was not @item.
*/
-void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
+void *radix_tree_delete_item(struct radix_tree_root *root,
+ unsigned long index, void *item)
{
- struct radix_tree_node *node = NULL;
- struct radix_tree_node *slot = NULL;
- struct radix_tree_node *to_free;
- unsigned int height, shift;
+ struct radix_tree_node *node;
+ unsigned int offset;
+ void **slot;
+ void *entry;
int tag;
- int uninitialized_var(offset);
- height = root->height;
- if (index > radix_tree_maxindex(height))
- goto out;
+ entry = __radix_tree_lookup(root, index, &node, &slot);
+ if (!entry)
+ return NULL;
- slot = root->rnode;
- if (height == 0) {
+ if (item && entry != item)
+ return NULL;
+
+ if (!node) {
root_tag_clear_all(root);
root->rnode = NULL;
- goto out;
+ return entry;
}
- slot = indirect_to_ptr(slot);
- shift = height * RADIX_TREE_MAP_SHIFT;
- do {
- if (slot == NULL)
- goto out;
-
- shift -= RADIX_TREE_MAP_SHIFT;
- offset = (index >> shift) & RADIX_TREE_MAP_MASK;
- node = slot;
- slot = slot->slots[offset];
- } while (shift);
-
- if (slot == NULL)
- goto out;
+ offset = index & RADIX_TREE_MAP_MASK;
/*
* Clear all tags associated with the item to be deleted.
@@ -1389,40 +1393,27 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
radix_tree_tag_clear(root, index, tag);
}
- to_free = NULL;
- /* Now free the nodes we do not need anymore */
- while (node) {
- node->slots[offset] = NULL;
- node->count--;
- /*
- * Queue the node for deferred freeing after the
- * last reference to it disappears (set NULL, above).
- */
- if (to_free)
- radix_tree_node_free(to_free);
-
- if (node->count) {
- if (node == indirect_to_ptr(root->rnode))
- radix_tree_shrink(root);
- goto out;
- }
-
- /* Node with zero slots in use so free it */
- to_free = node;
+ node->slots[offset] = NULL;
+ node->count--;
- index >>= RADIX_TREE_MAP_SHIFT;
- offset = index & RADIX_TREE_MAP_MASK;
- node = node->parent;
- }
+ __radix_tree_delete_node(root, node);
- root_tag_clear_all(root);
- root->height = 0;
- root->rnode = NULL;
- if (to_free)
- radix_tree_node_free(to_free);
+ return entry;
+}
+EXPORT_SYMBOL(radix_tree_delete_item);
-out:
- return slot;
+/**
+ * radix_tree_delete - delete an item from a radix tree
+ * @root: radix tree root
+ * @index: index key
+ *
+ * Remove the item at @index from the radix tree rooted at @root.
+ *
+ * Returns the address of the deleted item, or NULL if it was not present.
+ */
+void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
+{
+ return radix_tree_delete_item(root, index, NULL);
}
EXPORT_SYMBOL(radix_tree_delete);
@@ -1438,9 +1429,12 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
EXPORT_SYMBOL(radix_tree_tagged);
static void
-radix_tree_node_ctor(void *node)
+radix_tree_node_ctor(void *arg)
{
- memset(node, 0, sizeof(struct radix_tree_node));
+ struct radix_tree_node *node = arg;
+
+ memset(node, 0, sizeof(*node));
+ INIT_LIST_HEAD(&node->private_list);
}
static __init unsigned long __maxindex(unsigned int height)
diff --git a/lib/random32.c b/lib/random32.c
index 614896778700..c9b6bf3afe0c 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -1,37 +1,35 @@
/*
- This is a maximally equidistributed combined Tausworthe generator
- based on code from GNU Scientific Library 1.5 (30 Jun 2004)
-
- lfsr113 version:
-
- x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
-
- s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13))
- s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27))
- s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21))
- s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12))
-
- The period of this generator is about 2^113 (see erratum paper).
-
- From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
- Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
- http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
- ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
-
- There is an erratum in the paper "Tables of Maximally
- Equidistributed Combined LFSR Generators", Mathematics of
- Computation, 68, 225 (1999), 261--269:
- http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
-
- ... the k_j most significant bits of z_j must be non-
- zero, for each j. (Note: this restriction also applies to the
- computer code given in [4], but was mistakenly not mentioned in
- that paper.)
-
- This affects the seeding procedure by imposing the requirement
- s1 > 1, s2 > 7, s3 > 15, s4 > 127.
-
-*/
+ * This is a maximally equidistributed combined Tausworthe generator
+ * based on code from GNU Scientific Library 1.5 (30 Jun 2004)
+ *
+ * lfsr113 version:
+ *
+ * x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
+ *
+ * s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13))
+ * s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27))
+ * s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21))
+ * s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12))
+ *
+ * The period of this generator is about 2^113 (see erratum paper).
+ *
+ * From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
+ * Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
+ * http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
+ * ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
+ *
+ * There is an erratum in the paper "Tables of Maximally Equidistributed
+ * Combined LFSR Generators", Mathematics of Computation, 68, 225 (1999),
+ * 261--269: http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
+ *
+ * ... the k_j most significant bits of z_j must be non-zero,
+ * for each j. (Note: this restriction also applies to the
+ * computer code given in [4], but was mistakenly not mentioned
+ * in that paper.)
+ *
+ * This affects the seeding procedure by imposing the requirement
+ * s1 > 1, s2 > 7, s3 > 15, s4 > 127.
+ */
#include <linux/types.h>
#include <linux/percpu.h>
@@ -42,6 +40,10 @@
#ifdef CONFIG_RANDOM32_SELFTEST
static void __init prandom_state_selftest(void);
+#else
+static inline void prandom_state_selftest(void)
+{
+}
#endif
static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
@@ -55,8 +57,7 @@ static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
*/
u32 prandom_u32_state(struct rnd_state *state)
{
-#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
-
+#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
@@ -75,15 +76,17 @@ EXPORT_SYMBOL(prandom_u32_state);
*/
u32 prandom_u32(void)
{
- unsigned long r;
struct rnd_state *state = &get_cpu_var(net_rand_state);
- r = prandom_u32_state(state);
+ u32 res;
+
+ res = prandom_u32_state(state);
put_cpu_var(state);
- return r;
+
+ return res;
}
EXPORT_SYMBOL(prandom_u32);
-/*
+/**
* prandom_bytes_state - get the requested number of pseudo-random bytes
*
* @state: pointer to state structure holding seeded state.
@@ -147,21 +150,25 @@ static void prandom_warmup(struct rnd_state *state)
prandom_u32_state(state);
}
-static void prandom_seed_very_weak(struct rnd_state *state, u32 seed)
+static u32 __extract_hwseed(void)
{
- /* Note: This sort of seeding is ONLY used in test cases and
- * during boot at the time from core_initcall until late_initcall
- * as we don't have a stronger entropy source available yet.
- * After late_initcall, we reseed entire state, we have to (!),
- * otherwise an attacker just needs to search 32 bit space to
- * probe for our internal 128 bit state if he knows a couple
- * of prandom32 outputs!
- */
-#define LCG(x) ((x) * 69069U) /* super-duper LCG */
- state->s1 = __seed(LCG(seed), 2U);
- state->s2 = __seed(LCG(state->s1), 8U);
- state->s3 = __seed(LCG(state->s2), 16U);
- state->s4 = __seed(LCG(state->s3), 128U);
+ u32 val = 0;
+
+ (void)(arch_get_random_seed_int(&val) ||
+ arch_get_random_int(&val));
+
+ return val;
+}
+
+static void prandom_seed_early(struct rnd_state *state, u32 seed,
+ bool mix_with_hwseed)
+{
+#define LCG(x) ((x) * 69069U) /* super-duper LCG */
+#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
+ state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
+ state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
+ state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
+ state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
}
/**
@@ -194,21 +201,22 @@ static int __init prandom_init(void)
{
int i;
-#ifdef CONFIG_RANDOM32_SELFTEST
prandom_state_selftest();
-#endif
for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state,i);
+ u32 weak_seed = (i + jiffies) ^ random_get_entropy();
- prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy());
+ prandom_seed_early(state, weak_seed, true);
prandom_warmup(state);
}
+
return 0;
}
core_initcall(prandom_init);
static void __prandom_timer(unsigned long dontcare);
+
static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
static void __prandom_timer(unsigned long dontcare)
@@ -259,6 +267,7 @@ static void __prandom_reseed(bool late)
if (latch && !late)
goto out;
+
latch = true;
for_each_possible_cpu(i) {
@@ -417,7 +426,7 @@ static void __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test1); i++) {
struct rnd_state state;
- prandom_seed_very_weak(&state, test1[i].seed);
+ prandom_seed_early(&state, test1[i].seed, false);
prandom_warmup(&state);
if (test1[i].result != prandom_u32_state(&state))
@@ -432,7 +441,7 @@ static void __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test2); i++) {
struct rnd_state state;
- prandom_seed_very_weak(&state, test2[i].seed);
+ prandom_seed_early(&state, test2[i].seed, false);
prandom_warmup(&state);
for (j = 0; j < test2[i].iteration - 1; j++)
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
new file mode 100644
index 000000000000..e6940cf16628
--- /dev/null
+++ b/lib/rhashtable.c
@@ -0,0 +1,797 @@
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
+ *
+ * Based on the following paper:
+ * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
+ *
+ * Code partially derived from nft_hash
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/hash.h>
+#include <linux/random.h>
+#include <linux/rhashtable.h>
+#include <linux/log2.h>
+
+#define HASH_DEFAULT_SIZE 64UL
+#define HASH_MIN_SIZE 4UL
+
+#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
+
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+{
+ return ht->p.mutex_is_held();
+}
+EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
+#endif
+
+/**
+ * rht_obj - cast hash head to outer object
+ * @ht: hash table
+ * @he: hashed node
+ */
+void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
+{
+ return (void *) he - ht->p.head_offset;
+}
+EXPORT_SYMBOL_GPL(rht_obj);
+
+static u32 __hashfn(const struct rhashtable *ht, const void *key,
+ u32 len, u32 hsize)
+{
+ u32 h;
+
+ h = ht->p.hashfn(key, len, ht->p.hash_rnd);
+
+ return h & (hsize - 1);
+}
+
+/**
+ * rhashtable_hashfn - compute hash for key of given length
+ * @ht: hash table to compuate for
+ * @key: pointer to key
+ * @len: length of key
+ *
+ * Computes the hash value using the hash function provided in the 'hashfn'
+ * of struct rhashtable_params. The returned value is guaranteed to be
+ * smaller than the number of buckets in the hash table.
+ */
+u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
+{
+ struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ return __hashfn(ht, key, len, tbl->size);
+}
+EXPORT_SYMBOL_GPL(rhashtable_hashfn);
+
+static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
+{
+ if (unlikely(!ht->p.key_len)) {
+ u32 h;
+
+ h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
+
+ return h & (hsize - 1);
+ }
+
+ return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
+}
+
+/**
+ * rhashtable_obj_hashfn - compute hash for hashed object
+ * @ht: hash table to compuate for
+ * @ptr: pointer to hashed object
+ *
+ * Computes the hash value using the hash function `hashfn` respectively
+ * 'obj_hashfn' depending on whether the hash table is set up to work with
+ * a fixed length key. The returned value is guaranteed to be smaller than
+ * the number of buckets in the hash table.
+ */
+u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
+{
+ struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ return obj_hashfn(ht, ptr, tbl->size);
+}
+EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
+
+static u32 head_hashfn(const struct rhashtable *ht,
+ const struct rhash_head *he, u32 hsize)
+{
+ return obj_hashfn(ht, rht_obj(ht, he), hsize);
+}
+
+static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
+{
+ struct bucket_table *tbl;
+ size_t size;
+
+ size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
+ tbl = kzalloc(size, flags);
+ if (tbl == NULL)
+ tbl = vzalloc(size);
+
+ if (tbl == NULL)
+ return NULL;
+
+ tbl->size = nbuckets;
+
+ return tbl;
+}
+
+static void bucket_table_free(const struct bucket_table *tbl)
+{
+ kvfree(tbl);
+}
+
+/**
+ * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
+ * @ht: hash table
+ * @new_size: new table size
+ */
+bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
+{
+ /* Expand table when exceeding 75% load */
+ return ht->nelems > (new_size / 4 * 3);
+}
+EXPORT_SYMBOL_GPL(rht_grow_above_75);
+
+/**
+ * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
+ * @ht: hash table
+ * @new_size: new table size
+ */
+bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
+{
+ /* Shrink table beneath 30% load */
+ return ht->nelems < (new_size * 3 / 10);
+}
+EXPORT_SYMBOL_GPL(rht_shrink_below_30);
+
+static void hashtable_chain_unzip(const struct rhashtable *ht,
+ const struct bucket_table *new_tbl,
+ struct bucket_table *old_tbl, size_t n)
+{
+ struct rhash_head *he, *p, *next;
+ unsigned int h;
+
+ /* Old bucket empty, no work needed. */
+ p = rht_dereference(old_tbl->buckets[n], ht);
+ if (!p)
+ return;
+
+ /* Advance the old bucket pointer one or more times until it
+ * reaches a node that doesn't hash to the same bucket as the
+ * previous node p. Call the previous node p;
+ */
+ h = head_hashfn(ht, p, new_tbl->size);
+ rht_for_each(he, p->next, ht) {
+ if (head_hashfn(ht, he, new_tbl->size) != h)
+ break;
+ p = he;
+ }
+ RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
+
+ /* Find the subsequent node which does hash to the same
+ * bucket as node P, or NULL if no such node exists.
+ */
+ next = NULL;
+ if (he) {
+ rht_for_each(he, he->next, ht) {
+ if (head_hashfn(ht, he, new_tbl->size) == h) {
+ next = he;
+ break;
+ }
+ }
+ }
+
+ /* Set p's next pointer to that subsequent node pointer,
+ * bypassing the nodes which do not hash to p's bucket
+ */
+ RCU_INIT_POINTER(p->next, next);
+}
+
+/**
+ * rhashtable_expand - Expand hash table while allowing concurrent lookups
+ * @ht: the hash table to expand
+ * @flags: allocation flags
+ *
+ * A secondary bucket array is allocated and the hash entries are migrated
+ * while keeping them on both lists until the end of the RCU grace period.
+ *
+ * This function may only be called in a context where it is safe to call
+ * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
+ *
+ * The caller must ensure that no concurrent table mutations take place.
+ * It is however valid to have concurrent lookups if they are RCU protected.
+ */
+int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
+{
+ struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
+ struct rhash_head *he;
+ unsigned int i, h;
+ bool complete;
+
+ ASSERT_RHT_MUTEX(ht);
+
+ if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
+ return 0;
+
+ new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
+ if (new_tbl == NULL)
+ return -ENOMEM;
+
+ ht->shift++;
+
+ /* For each new bucket, search the corresponding old bucket
+ * for the first entry that hashes to the new bucket, and
+ * link the new bucket to that entry. Since all the entries
+ * which will end up in the new bucket appear in the same
+ * old bucket, this constructs an entirely valid new hash
+ * table, but with multiple buckets "zipped" together into a
+ * single imprecise chain.
+ */
+ for (i = 0; i < new_tbl->size; i++) {
+ h = i & (old_tbl->size - 1);
+ rht_for_each(he, old_tbl->buckets[h], ht) {
+ if (head_hashfn(ht, he, new_tbl->size) == i) {
+ RCU_INIT_POINTER(new_tbl->buckets[i], he);
+ break;
+ }
+ }
+ }
+
+ /* Publish the new table pointer. Lookups may now traverse
+ * the new table, but they will not benefit from any
+ * additional efficiency until later steps unzip the buckets.
+ */
+ rcu_assign_pointer(ht->tbl, new_tbl);
+
+ /* Unzip interleaved hash chains */
+ do {
+ /* Wait for readers. All new readers will see the new
+ * table, and thus no references to the old table will
+ * remain.
+ */
+ synchronize_rcu();
+
+ /* For each bucket in the old table (each of which
+ * contains items from multiple buckets of the new
+ * table): ...
+ */
+ complete = true;
+ for (i = 0; i < old_tbl->size; i++) {
+ hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
+ if (old_tbl->buckets[i] != NULL)
+ complete = false;
+ }
+ } while (!complete);
+
+ bucket_table_free(old_tbl);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_expand);
+
+/**
+ * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
+ * @ht: the hash table to shrink
+ * @flags: allocation flags
+ *
+ * This function may only be called in a context where it is safe to call
+ * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
+ *
+ * The caller must ensure that no concurrent table mutations take place.
+ * It is however valid to have concurrent lookups if they are RCU protected.
+ */
+int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
+{
+ struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
+ struct rhash_head __rcu **pprev;
+ unsigned int i;
+
+ ASSERT_RHT_MUTEX(ht);
+
+ if (tbl->size <= HASH_MIN_SIZE)
+ return 0;
+
+ ntbl = bucket_table_alloc(tbl->size / 2, flags);
+ if (ntbl == NULL)
+ return -ENOMEM;
+
+ ht->shift--;
+
+ /* Link each bucket in the new table to the first bucket
+ * in the old table that contains entries which will hash
+ * to the new bucket.
+ */
+ for (i = 0; i < ntbl->size; i++) {
+ ntbl->buckets[i] = tbl->buckets[i];
+
+ /* Link each bucket in the new table to the first bucket
+ * in the old table that contains entries which will hash
+ * to the new bucket.
+ */
+ for (pprev = &ntbl->buckets[i]; *pprev != NULL;
+ pprev = &rht_dereference(*pprev, ht)->next)
+ ;
+ RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
+ }
+
+ /* Publish the new, valid hash table */
+ rcu_assign_pointer(ht->tbl, ntbl);
+
+ /* Wait for readers. No new readers will have references to the
+ * old hash table.
+ */
+ synchronize_rcu();
+
+ bucket_table_free(tbl);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_shrink);
+
+/**
+ * rhashtable_insert - insert object into hash hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @flags: allocation flags (table expansion)
+ *
+ * Will automatically grow the table via rhashtable_expand() if the the
+ * grow_decision function specified at rhashtable_init() returns true.
+ *
+ * The caller must ensure that no concurrent table mutations occur. It is
+ * however valid to have concurrent lookups if they are RCU protected.
+ */
+void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
+ gfp_t flags)
+{
+ struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+ u32 hash;
+
+ ASSERT_RHT_MUTEX(ht);
+
+ hash = head_hashfn(ht, obj, tbl->size);
+ RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
+ rcu_assign_pointer(tbl->buckets[hash], obj);
+ ht->nelems++;
+
+ if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
+ rhashtable_expand(ht, flags);
+}
+EXPORT_SYMBOL_GPL(rhashtable_insert);
+
+/**
+ * rhashtable_remove_pprev - remove object from hash table given previous element
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @pprev: pointer to previous element
+ * @flags: allocation flags (table expansion)
+ *
+ * Identical to rhashtable_remove() but caller is alreayd aware of the element
+ * in front of the element to be deleted. This is in particular useful for
+ * deletion when combined with walking or lookup.
+ */
+void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
+ struct rhash_head **pprev, gfp_t flags)
+{
+ struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+
+ ASSERT_RHT_MUTEX(ht);
+
+ RCU_INIT_POINTER(*pprev, obj->next);
+ ht->nelems--;
+
+ if (ht->p.shrink_decision &&
+ ht->p.shrink_decision(ht, tbl->size))
+ rhashtable_shrink(ht, flags);
+}
+EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
+
+/**
+ * rhashtable_remove - remove object from hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @flags: allocation flags (table expansion)
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * The caller must ensure that no concurrent table mutations occur. It is
+ * however valid to have concurrent lookups if they are RCU protected.
+ */
+bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
+ gfp_t flags)
+{
+ struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+ struct rhash_head __rcu **pprev;
+ struct rhash_head *he;
+ u32 h;
+
+ ASSERT_RHT_MUTEX(ht);
+
+ h = head_hashfn(ht, obj, tbl->size);
+
+ pprev = &tbl->buckets[h];
+ rht_for_each(he, tbl->buckets[h], ht) {
+ if (he != obj) {
+ pprev = &he->next;
+ continue;
+ }
+
+ rhashtable_remove_pprev(ht, he, pprev, flags);
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(rhashtable_remove);
+
+/**
+ * rhashtable_lookup - lookup key in hash table
+ * @ht: hash table
+ * @key: pointer to key
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * This lookup function may only be used for fixed key hash table (key_len
+ * paramter set). It will BUG() if used inappropriately.
+ *
+ * Lookups may occur in parallel with hash mutations as long as the lookup is
+ * guarded by rcu_read_lock(). The caller must take care of this.
+ */
+void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
+{
+ const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+ struct rhash_head *he;
+ u32 h;
+
+ BUG_ON(!ht->p.key_len);
+
+ h = __hashfn(ht, key, ht->p.key_len, tbl->size);
+ rht_for_each_rcu(he, tbl->buckets[h], ht) {
+ if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
+ ht->p.key_len))
+ continue;
+ return (void *) he - ht->p.head_offset;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup);
+
+/**
+ * rhashtable_lookup_compare - search hash table with compare function
+ * @ht: hash table
+ * @hash: hash value of desired entry
+ * @compare: compare function, must return true on match
+ * @arg: argument passed on to compare function
+ *
+ * Traverses the bucket chain behind the provided hash value and calls the
+ * specified compare function for each entry.
+ *
+ * Lookups may occur in parallel with hash mutations as long as the lookup is
+ * guarded by rcu_read_lock(). The caller must take care of this.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+ bool (*compare)(void *, void *), void *arg)
+{
+ const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+ struct rhash_head *he;
+
+ if (unlikely(hash >= tbl->size))
+ return NULL;
+
+ rht_for_each_rcu(he, tbl->buckets[hash], ht) {
+ if (!compare(rht_obj(ht, he), arg))
+ continue;
+ return (void *) he - ht->p.head_offset;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
+
+static size_t rounded_hashtable_size(unsigned int nelem)
+{
+ return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE);
+}
+
+/**
+ * rhashtable_init - initialize a new hash table
+ * @ht: hash table to be initialized
+ * @params: configuration parameters
+ *
+ * Initializes a new hash table based on the provided configuration
+ * parameters. A table can be configured either with a variable or
+ * fixed length key:
+ *
+ * Configuration Example 1: Fixed length keys
+ * struct test_obj {
+ * int key;
+ * void * my_member;
+ * struct rhash_head node;
+ * };
+ *
+ * struct rhashtable_params params = {
+ * .head_offset = offsetof(struct test_obj, node),
+ * .key_offset = offsetof(struct test_obj, key),
+ * .key_len = sizeof(int),
+ * .hashfn = arch_fast_hash,
+ * .mutex_is_held = &my_mutex_is_held,
+ * };
+ *
+ * Configuration Example 2: Variable length keys
+ * struct test_obj {
+ * [...]
+ * struct rhash_head node;
+ * };
+ *
+ * u32 my_hash_fn(const void *data, u32 seed)
+ * {
+ * struct test_obj *obj = data;
+ *
+ * return [... hash ...];
+ * }
+ *
+ * struct rhashtable_params params = {
+ * .head_offset = offsetof(struct test_obj, node),
+ * .hashfn = arch_fast_hash,
+ * .obj_hashfn = my_hash_fn,
+ * .mutex_is_held = &my_mutex_is_held,
+ * };
+ */
+int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
+{
+ struct bucket_table *tbl;
+ size_t size;
+
+ size = HASH_DEFAULT_SIZE;
+
+ if ((params->key_len && !params->hashfn) ||
+ (!params->key_len && !params->obj_hashfn))
+ return -EINVAL;
+
+ if (params->nelem_hint)
+ size = rounded_hashtable_size(params->nelem_hint);
+
+ tbl = bucket_table_alloc(size, GFP_KERNEL);
+ if (tbl == NULL)
+ return -ENOMEM;
+
+ memset(ht, 0, sizeof(*ht));
+ ht->shift = ilog2(tbl->size);
+ memcpy(&ht->p, params, sizeof(*params));
+ RCU_INIT_POINTER(ht->tbl, tbl);
+
+ if (!ht->p.hash_rnd)
+ get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_init);
+
+/**
+ * rhashtable_destroy - destroy hash table
+ * @ht: the hash table to destroy
+ *
+ * Frees the bucket array.
+ */
+void rhashtable_destroy(const struct rhashtable *ht)
+{
+ const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+
+ bucket_table_free(tbl);
+}
+EXPORT_SYMBOL_GPL(rhashtable_destroy);
+
+/**************************************************************************
+ * Self Test
+ **************************************************************************/
+
+#ifdef CONFIG_TEST_RHASHTABLE
+
+#define TEST_HT_SIZE 8
+#define TEST_ENTRIES 2048
+#define TEST_PTR ((void *) 0xdeadbeef)
+#define TEST_NEXPANDS 4
+
+static int test_mutex_is_held(void)
+{
+ return 1;
+}
+
+struct test_obj {
+ void *ptr;
+ int value;
+ struct rhash_head node;
+};
+
+static int __init test_rht_lookup(struct rhashtable *ht)
+{
+ unsigned int i;
+
+ for (i = 0; i < TEST_ENTRIES * 2; i++) {
+ struct test_obj *obj;
+ bool expected = !(i % 2);
+ u32 key = i;
+
+ obj = rhashtable_lookup(ht, &key);
+
+ if (expected && !obj) {
+ pr_warn("Test failed: Could not find key %u\n", key);
+ return -ENOENT;
+ } else if (!expected && obj) {
+ pr_warn("Test failed: Unexpected entry found for key %u\n",
+ key);
+ return -EEXIST;
+ } else if (expected && obj) {
+ if (obj->ptr != TEST_PTR || obj->value != i) {
+ pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
+ obj->ptr, TEST_PTR, obj->value, i);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void test_bucket_stats(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ bool quiet)
+{
+ unsigned int cnt, i, total = 0;
+ struct test_obj *obj;
+
+ for (i = 0; i < tbl->size; i++) {
+ cnt = 0;
+
+ if (!quiet)
+ pr_info(" [%#4x/%zu]", i, tbl->size);
+
+ rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
+ cnt++;
+ total++;
+ if (!quiet)
+ pr_cont(" [%p],", obj);
+ }
+
+ if (!quiet)
+ pr_cont("\n [%#x] first element: %p, chain length: %u\n",
+ i, tbl->buckets[i], cnt);
+ }
+
+ pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
+ total, ht->nelems, TEST_ENTRIES);
+}
+
+static int __init test_rhashtable(struct rhashtable *ht)
+{
+ struct bucket_table *tbl;
+ struct test_obj *obj, *next;
+ int err;
+ unsigned int i;
+
+ /*
+ * Insertion Test:
+ * Insert TEST_ENTRIES into table with all keys even numbers
+ */
+ pr_info(" Adding %d keys\n", TEST_ENTRIES);
+ for (i = 0; i < TEST_ENTRIES; i++) {
+ struct test_obj *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ obj->ptr = TEST_PTR;
+ obj->value = i * 2;
+
+ rhashtable_insert(ht, &obj->node, GFP_KERNEL);
+ }
+
+ rcu_read_lock();
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+ test_bucket_stats(ht, tbl, true);
+ test_rht_lookup(ht);
+ rcu_read_unlock();
+
+ for (i = 0; i < TEST_NEXPANDS; i++) {
+ pr_info(" Table expansion iteration %u...\n", i);
+ rhashtable_expand(ht, GFP_KERNEL);
+
+ rcu_read_lock();
+ pr_info(" Verifying lookups...\n");
+ test_rht_lookup(ht);
+ rcu_read_unlock();
+ }
+
+ for (i = 0; i < TEST_NEXPANDS; i++) {
+ pr_info(" Table shrinkage iteration %u...\n", i);
+ rhashtable_shrink(ht, GFP_KERNEL);
+
+ rcu_read_lock();
+ pr_info(" Verifying lookups...\n");
+ test_rht_lookup(ht);
+ rcu_read_unlock();
+ }
+
+ pr_info(" Deleting %d keys\n", TEST_ENTRIES);
+ for (i = 0; i < TEST_ENTRIES; i++) {
+ u32 key = i * 2;
+
+ obj = rhashtable_lookup(ht, &key);
+ BUG_ON(!obj);
+
+ rhashtable_remove(ht, &obj->node, GFP_KERNEL);
+ kfree(obj);
+ }
+
+ return 0;
+
+error:
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+ for (i = 0; i < tbl->size; i++)
+ rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
+ kfree(obj);
+
+ return err;
+}
+
+static int __init test_rht_init(void)
+{
+ struct rhashtable ht;
+ struct rhashtable_params params = {
+ .nelem_hint = TEST_HT_SIZE,
+ .head_offset = offsetof(struct test_obj, node),
+ .key_offset = offsetof(struct test_obj, value),
+ .key_len = sizeof(int),
+ .hashfn = arch_fast_hash,
+ .mutex_is_held = &test_mutex_is_held,
+ .grow_decision = rht_grow_above_75,
+ .shrink_decision = rht_shrink_below_30,
+ };
+ int err;
+
+ pr_info("Running resizable hashtable tests...\n");
+
+ err = rhashtable_init(&ht, &params);
+ if (err < 0) {
+ pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+ err);
+ return err;
+ }
+
+ err = test_rhashtable(&ht);
+
+ rhashtable_destroy(&ht);
+
+ return err;
+}
+
+subsys_initcall(test_rht_init);
+
+#endif /* CONFIG_TEST_RHASHTABLE */
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 3a8e8e8fb2a5..b4415fceb7e7 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -165,6 +165,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
* __sg_free_table - Free a previously mapped sg table
* @table: The sg table header to use
* @max_ents: The maximum number of entries per single scatterlist
+ * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
* @free_fn: Free function
*
* Description:
@@ -174,7 +175,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
*
**/
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
- sg_free_fn *free_fn)
+ bool skip_first_chunk, sg_free_fn *free_fn)
{
struct scatterlist *sgl, *next;
@@ -202,7 +203,10 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
}
table->orig_nents -= sg_size;
- free_fn(sgl, alloc_size);
+ if (!skip_first_chunk) {
+ free_fn(sgl, alloc_size);
+ skip_first_chunk = false;
+ }
sgl = next;
}
@@ -217,7 +221,7 @@ EXPORT_SYMBOL(__sg_free_table);
**/
void sg_free_table(struct sg_table *table)
{
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
}
EXPORT_SYMBOL(sg_free_table);
@@ -241,8 +245,8 @@ EXPORT_SYMBOL(sg_free_table);
*
**/
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
- unsigned int max_ents, gfp_t gfp_mask,
- sg_alloc_fn *alloc_fn)
+ unsigned int max_ents, struct scatterlist *first_chunk,
+ gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
{
struct scatterlist *sg, *prv;
unsigned int left;
@@ -269,7 +273,12 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
left -= sg_size;
- sg = alloc_fn(alloc_size, gfp_mask);
+ if (first_chunk) {
+ sg = first_chunk;
+ first_chunk = NULL;
+ } else {
+ sg = alloc_fn(alloc_size, gfp_mask);
+ }
if (unlikely(!sg)) {
/*
* Adjust entry count to reflect that the last
@@ -324,9 +333,9 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
int ret;
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
- gfp_mask, sg_kmalloc);
+ NULL, gfp_mask, sg_kmalloc);
if (unlikely(ret))
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
return ret;
}
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 04abe53f12a1..1afec32de6f2 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -7,7 +7,8 @@
#include <linux/kallsyms.h>
#include <linux/sched.h>
-notrace unsigned int debug_smp_processor_id(void)
+notrace static unsigned int check_preemption_disabled(const char *what1,
+ const char *what2)
{
int this_cpu = raw_smp_processor_id();
@@ -38,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void)
if (!printk_ratelimit())
goto out_enable;
- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
- "code: %s/%d\n",
- preempt_count() - 1, current->comm, current->pid);
+ printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
+ what1, what2, preempt_count() - 1, current->comm, current->pid);
+
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
@@ -50,5 +51,14 @@ out:
return this_cpu;
}
+notrace unsigned int debug_smp_processor_id(void)
+{
+ return check_preemption_disabled("smp_processor_id", "");
+}
EXPORT_SYMBOL(debug_smp_processor_id);
+notrace void __this_cpu_preempt_check(const char *op)
+{
+ check_preemption_disabled("__this_cpu_", op);
+}
+EXPORT_SYMBOL(__this_cpu_preempt_check);
diff --git a/lib/string.c b/lib/string.c
index 9b1f9062a202..992bf30af759 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -107,7 +107,7 @@ EXPORT_SYMBOL(strcpy);
#ifndef __HAVE_ARCH_STRNCPY
/**
- * strncpy - Copy a length-limited, %NUL-terminated string
+ * strncpy - Copy a length-limited, C-string
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @count: The maximum number of bytes to copy
@@ -136,7 +136,7 @@ EXPORT_SYMBOL(strncpy);
#ifndef __HAVE_ARCH_STRLCPY
/**
- * strlcpy - Copy a %NUL terminated string into a sized buffer
+ * strlcpy - Copy a C-string into a sized buffer
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @size: size of destination buffer
@@ -182,7 +182,7 @@ EXPORT_SYMBOL(strcat);
#ifndef __HAVE_ARCH_STRNCAT
/**
- * strncat - Append a length-limited, %NUL-terminated string to another
+ * strncat - Append a length-limited, C-string to another
* @dest: The string to be appended to
* @src: The string to append to it
* @count: The maximum numbers of bytes to copy
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(strncat);
#ifndef __HAVE_ARCH_STRLCAT
/**
- * strlcat - Append a length-limited, %NUL-terminated string to another
+ * strlcat - Append a length-limited, C-string to another
* @dest: The string to be appended to
* @src: The string to append to it
* @count: The size of the destination buffer.
@@ -301,6 +301,24 @@ char *strchr(const char *s, int c)
EXPORT_SYMBOL(strchr);
#endif
+#ifndef __HAVE_ARCH_STRCHRNUL
+/**
+ * strchrnul - Find and return a character in a string, or end of string
+ * @s: The string to be searched
+ * @c: The character to search for
+ *
+ * Returns pointer to first occurrence of 'c' in s. If c is not found, then
+ * return a pointer to the null byte at the end of s.
+ */
+char *strchrnul(const char *s, int c)
+{
+ while (*s && *s != (char)c)
+ s++;
+ return (char *)s;
+}
+EXPORT_SYMBOL(strchrnul);
+#endif
+
#ifndef __HAVE_ARCH_STRRCHR
/**
* strrchr - Find the last occurrence of a character in a string
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index b604b831f4d1..4abda074ea45 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -86,6 +86,7 @@ static unsigned int io_tlb_index;
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
static phys_addr_t *io_tlb_orig_addr;
/*
@@ -188,12 +189,14 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
io_tlb_list = memblock_virt_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
PAGE_SIZE);
- for (i = 0; i < io_tlb_nslabs; i++)
- io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
- io_tlb_index = 0;
io_tlb_orig_addr = memblock_virt_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
PAGE_SIZE);
+ for (i = 0; i < io_tlb_nslabs; i++) {
+ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+ io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ }
+ io_tlb_index = 0;
if (verbose)
swiotlb_print_info();
@@ -313,10 +316,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
if (!io_tlb_list)
goto cleanup3;
- for (i = 0; i < io_tlb_nslabs; i++)
- io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
- io_tlb_index = 0;
-
io_tlb_orig_addr = (phys_addr_t *)
__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs *
@@ -324,7 +323,11 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
if (!io_tlb_orig_addr)
goto cleanup4;
- memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
+ for (i = 0; i < io_tlb_nslabs; i++) {
+ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+ io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ }
+ io_tlb_index = 0;
swiotlb_print_info();
@@ -374,7 +377,7 @@ void __init swiotlb_free(void)
io_tlb_nslabs = 0;
}
-static int is_swiotlb_buffer(phys_addr_t paddr)
+int is_swiotlb_buffer(phys_addr_t paddr)
{
return paddr >= io_tlb_start && paddr < io_tlb_end;
}
@@ -556,7 +559,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
/*
* First, sync the memory before unmapping the entry
*/
- if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+ if (orig_addr != INVALID_PHYS_ADDR &&
+ ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
/*
@@ -573,8 +577,10 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
* Step 1: return the slots to the free list, merging the
* slots with superceeding slots
*/
- for (i = index + nslots - 1; i >= index; i--)
+ for (i = index + nslots - 1; i >= index; i--) {
io_tlb_list[i] = ++count;
+ io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ }
/*
* Step 2: merge the returned slots with the preceding slots,
* if available (non zero)
@@ -593,6 +599,8 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t orig_addr = io_tlb_orig_addr[index];
+ if (orig_addr == INVALID_PHYS_ADDR)
+ return;
orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
switch (target) {
diff --git a/lib/syscall.c b/lib/syscall.c
index 58710eefeac8..e30e03932480 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -72,4 +72,3 @@ int task_current_syscall(struct task_struct *target, long *callno,
return 0;
}
-EXPORT_SYMBOL_GPL(task_current_syscall);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
new file mode 100644
index 000000000000..89e0345733bd
--- /dev/null
+++ b/lib/test_bpf.c
@@ -0,0 +1,1929 @@
+/*
+ * Testsuite for BPF interpreter and BPF JIT compiler
+ *
+ * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/filter.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+
+/* General test specific settings */
+#define MAX_SUBTESTS 3
+#define MAX_TESTRUNS 10000
+#define MAX_DATA 128
+#define MAX_INSNS 512
+#define MAX_K 0xffffFFFF
+
+/* Few constants used to init test 'skb' */
+#define SKB_TYPE 3
+#define SKB_MARK 0x1234aaaa
+#define SKB_HASH 0x1234aaab
+#define SKB_QUEUE_MAP 123
+#define SKB_VLAN_TCI 0xffff
+#define SKB_DEV_IFINDEX 577
+#define SKB_DEV_TYPE 588
+
+/* Redefine REGs to make tests less verbose */
+#define R0 BPF_REG_0
+#define R1 BPF_REG_1
+#define R2 BPF_REG_2
+#define R3 BPF_REG_3
+#define R4 BPF_REG_4
+#define R5 BPF_REG_5
+#define R6 BPF_REG_6
+#define R7 BPF_REG_7
+#define R8 BPF_REG_8
+#define R9 BPF_REG_9
+#define R10 BPF_REG_10
+
+/* Flags that can be passed to test cases */
+#define FLAG_NO_DATA BIT(0)
+#define FLAG_EXPECTED_FAIL BIT(1)
+
+enum {
+ CLASSIC = BIT(6), /* Old BPF instructions only. */
+ INTERNAL = BIT(7), /* Extended instruction set. */
+};
+
+#define TEST_TYPE_MASK (CLASSIC | INTERNAL)
+
+struct bpf_test {
+ const char *descr;
+ union {
+ struct sock_filter insns[MAX_INSNS];
+ struct bpf_insn insns_int[MAX_INSNS];
+ } u;
+ __u8 aux;
+ __u8 data[MAX_DATA];
+ struct {
+ int data_size;
+ __u32 result;
+ } test[MAX_SUBTESTS];
+};
+
+static struct bpf_test tests[] = {
+ {
+ "TAX",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_LEN, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 10, 20, 30, 40, 50 },
+ { { 2, 10 }, { 3, 20 }, { 4, 30 } },
+ },
+ {
+ "TXA",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
+ },
+ CLASSIC,
+ { 10, 20, 30, 40, 50 },
+ { { 1, 2 }, { 3, 6 }, { 4, 8 } },
+ },
+ {
+ "ADD_SUB_MUL_K",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 1),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
+ BPF_STMT(BPF_LDX | BPF_IMM, 3),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
+ BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xfffffffd } }
+ },
+ {
+ "DIV_KX",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 8),
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0x40000001 } }
+ },
+ {
+ "AND_OR_LSH_K",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xff),
+ BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
+ BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xf),
+ BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0x800000ff }, { 1, 0x800000ff } },
+ },
+ {
+ "LD_IMM_0",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ },
+ CLASSIC,
+ { },
+ { { 1, 1 } },
+ },
+ {
+ "LD_IND",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
+ BPF_STMT(BPF_RET | BPF_K, 1)
+ },
+ CLASSIC,
+ { },
+ { { 1, 0 }, { 10, 0 }, { 60, 0 } },
+ },
+ {
+ "LD_ABS",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
+ BPF_STMT(BPF_RET | BPF_K, 1)
+ },
+ CLASSIC,
+ { },
+ { { 1, 0 }, { 10, 0 }, { 60, 0 } },
+ },
+ {
+ "LD_ABS_LL",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 1, 2, 3 },
+ { { 1, 0 }, { 2, 3 } },
+ },
+ {
+ "LD_IND_LL",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 1, 2, 3, 0xff },
+ { { 1, 1 }, { 3, 3 }, { 4, 0xff } },
+ },
+ {
+ "LD_ABS_NET",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
+ { { 15, 0 }, { 16, 3 } },
+ },
+ {
+ "LD_IND_NET",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
+ { { 14, 0 }, { 15, 1 }, { 17, 3 } },
+ },
+ {
+ "LD_PKTTYPE",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PKTTYPE),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PKTTYPE),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PKTTYPE),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, 3 }, { 10, 3 } },
+ },
+ {
+ "LD_MARK",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_MARK),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_MARK}, { 10, SKB_MARK} },
+ },
+ {
+ "LD_RXHASH",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_RXHASH),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_HASH}, { 10, SKB_HASH} },
+ },
+ {
+ "LD_QUEUE",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_QUEUE),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
+ },
+ {
+ "LD_PROTOCOL",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PROTOCOL),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 10, 20, 30 },
+ { { 10, ETH_P_IP }, { 100, ETH_P_IP } },
+ },
+ {
+ "LD_VLAN_TAG",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_VLAN_TAG),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ {
+ { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
+ { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
+ },
+ },
+ {
+ "LD_VLAN_TAG_PRESENT",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ {
+ { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
+ { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+ },
+ },
+ {
+ "LD_IFINDEX",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_IFINDEX),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
+ },
+ {
+ "LD_HATYPE",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_HATYPE),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
+ },
+ {
+ "LD_CPU",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_CPU),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_CPU),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, 0 }, { 10, 0 } },
+ },
+ {
+ "LD_NLATTR",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 2),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_LDX | BPF_IMM, 3),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+#ifdef __BIG_ENDIAN
+ { 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 },
+#else
+ { 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
+#endif
+ { { 4, 0 }, { 20, 6 } },
+ },
+ {
+ "LD_NLATTR_NEST",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LDX | BPF_IMM, 3),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+#ifdef __BIG_ENDIAN
+ { 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 },
+#else
+ { 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
+#endif
+ { { 4, 0 }, { 20, 10 } },
+ },
+ {
+ "LD_PAYLOAD_OFF",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
+ * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
+ * id 9737, seq 1, length 64
+ */
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
+ 0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
+ { { 30, 0 }, { 100, 42 } },
+ },
+ {
+ "LD_ANC_XOR",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 10),
+ BPF_STMT(BPF_LDX | BPF_IMM, 300),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_ALU_XOR_X),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 4, 10 ^ 300 }, { 20, 10 ^ 300 } },
+ },
+ {
+ "SPILL_FILL",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_ALU | BPF_RSH, 1),
+ BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+ BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
+ BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
+ BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
+ BPF_STMT(BPF_STX, 15), /* M3 = len */
+ BPF_STMT(BPF_LDX | BPF_MEM, 1),
+ BPF_STMT(BPF_LD | BPF_MEM, 2),
+ BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 15),
+ BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
+ },
+ {
+ "JEQ",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 3, 3, 3, 3, 3 },
+ { { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
+ },
+ {
+ "JGT",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 4, 4, 4, 3, 3 },
+ { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
+ },
+ {
+ "JGE",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 10),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 20),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 40),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 1, 2, 3, 4, 5 },
+ { { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
+ },
+ {
+ "JSET",
+ .u.insns = {
+ BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+ BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
+ BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+ BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 10),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 20),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 0, 0xAA, 0x55, 1 },
+ { { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
+ },
+ {
+ "tcpdump port 22",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 0xffff),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ },
+ CLASSIC,
+ /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
+ * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
+ * seq 1305692979:1305693027, ack 3650467037, win 65535,
+ * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
+ */
+ { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+ 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
+ 0x08, 0x00,
+ 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
+ 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
+ 0x0a, 0x01, 0x01, 0x95, /* ip src */
+ 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
+ 0xc2, 0x24,
+ 0x00, 0x16 /* dst port */ },
+ { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+ },
+ {
+ "tcpdump complex",
+ .u.insns = {
+ /* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
+ * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
+ * (len > 115 or len < 30000000000)' -d
+ */
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
+ BPF_STMT(BPF_ST, 1),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
+ BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
+ BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
+ BPF_STMT(BPF_LD | BPF_MEM, 1),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+ BPF_STMT(BPF_ST, 5),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
+ BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
+ BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
+ BPF_STMT(BPF_LD | BPF_MEM, 5),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
+ BPF_STMT(BPF_LD | BPF_LEN, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0xffff),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ },
+ CLASSIC,
+ { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+ 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
+ 0x08, 0x00,
+ 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
+ 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
+ 0x0a, 0x01, 0x01, 0x95, /* ip src */
+ 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
+ 0xc2, 0x24,
+ 0x00, 0x16 /* dst port */ },
+ { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+ },
+ {
+ "RET_A",
+ .u.insns = {
+ /* check that unitialized X and A contain zeros */
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { {1, 0}, {2, 0} },
+ },
+ {
+ "INT: ADD trivial",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_ADD, R1, 2),
+ BPF_ALU64_IMM(BPF_MOV, R2, 3),
+ BPF_ALU64_REG(BPF_SUB, R1, R2),
+ BPF_ALU64_IMM(BPF_ADD, R1, -1),
+ BPF_ALU64_IMM(BPF_MUL, R1, 3),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffd } }
+ },
+ {
+ "INT: MUL_X",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_ALU64_IMM(BPF_MOV, R1, -1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 3),
+ BPF_ALU64_REG(BPF_MUL, R1, R2),
+ BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "INT: MUL_X2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ BPF_ALU32_IMM(BPF_MOV, R1, -1),
+ BPF_ALU32_IMM(BPF_MOV, R2, 3),
+ BPF_ALU64_REG(BPF_MUL, R1, R2),
+ BPF_ALU64_IMM(BPF_RSH, R1, 8),
+ BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "INT: MUL32_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ BPF_ALU64_IMM(BPF_MOV, R1, -1),
+ BPF_ALU32_IMM(BPF_MOV, R2, 3),
+ BPF_ALU32_REG(BPF_MUL, R1, R2),
+ BPF_ALU64_IMM(BPF_RSH, R1, 8),
+ BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ /* Have to test all register combinations, since
+ * JITing of different registers will produce
+ * different asm code.
+ */
+ "INT: ADD 64-bit",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_IMM(BPF_MOV, R3, 3),
+ BPF_ALU64_IMM(BPF_MOV, R4, 4),
+ BPF_ALU64_IMM(BPF_MOV, R5, 5),
+ BPF_ALU64_IMM(BPF_MOV, R6, 6),
+ BPF_ALU64_IMM(BPF_MOV, R7, 7),
+ BPF_ALU64_IMM(BPF_MOV, R8, 8),
+ BPF_ALU64_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_IMM(BPF_ADD, R0, 20),
+ BPF_ALU64_IMM(BPF_ADD, R1, 20),
+ BPF_ALU64_IMM(BPF_ADD, R2, 20),
+ BPF_ALU64_IMM(BPF_ADD, R3, 20),
+ BPF_ALU64_IMM(BPF_ADD, R4, 20),
+ BPF_ALU64_IMM(BPF_ADD, R5, 20),
+ BPF_ALU64_IMM(BPF_ADD, R6, 20),
+ BPF_ALU64_IMM(BPF_ADD, R7, 20),
+ BPF_ALU64_IMM(BPF_ADD, R8, 20),
+ BPF_ALU64_IMM(BPF_ADD, R9, 20),
+ BPF_ALU64_IMM(BPF_SUB, R0, 10),
+ BPF_ALU64_IMM(BPF_SUB, R1, 10),
+ BPF_ALU64_IMM(BPF_SUB, R2, 10),
+ BPF_ALU64_IMM(BPF_SUB, R3, 10),
+ BPF_ALU64_IMM(BPF_SUB, R4, 10),
+ BPF_ALU64_IMM(BPF_SUB, R5, 10),
+ BPF_ALU64_IMM(BPF_SUB, R6, 10),
+ BPF_ALU64_IMM(BPF_SUB, R7, 10),
+ BPF_ALU64_IMM(BPF_SUB, R8, 10),
+ BPF_ALU64_IMM(BPF_SUB, R9, 10),
+ BPF_ALU64_REG(BPF_ADD, R0, R0),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_ALU64_REG(BPF_ADD, R0, R2),
+ BPF_ALU64_REG(BPF_ADD, R0, R3),
+ BPF_ALU64_REG(BPF_ADD, R0, R4),
+ BPF_ALU64_REG(BPF_ADD, R0, R5),
+ BPF_ALU64_REG(BPF_ADD, R0, R6),
+ BPF_ALU64_REG(BPF_ADD, R0, R7),
+ BPF_ALU64_REG(BPF_ADD, R0, R8),
+ BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
+ BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R1, R0),
+ BPF_ALU64_REG(BPF_ADD, R1, R1),
+ BPF_ALU64_REG(BPF_ADD, R1, R2),
+ BPF_ALU64_REG(BPF_ADD, R1, R3),
+ BPF_ALU64_REG(BPF_ADD, R1, R4),
+ BPF_ALU64_REG(BPF_ADD, R1, R5),
+ BPF_ALU64_REG(BPF_ADD, R1, R6),
+ BPF_ALU64_REG(BPF_ADD, R1, R7),
+ BPF_ALU64_REG(BPF_ADD, R1, R8),
+ BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
+ BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R2, R0),
+ BPF_ALU64_REG(BPF_ADD, R2, R1),
+ BPF_ALU64_REG(BPF_ADD, R2, R2),
+ BPF_ALU64_REG(BPF_ADD, R2, R3),
+ BPF_ALU64_REG(BPF_ADD, R2, R4),
+ BPF_ALU64_REG(BPF_ADD, R2, R5),
+ BPF_ALU64_REG(BPF_ADD, R2, R6),
+ BPF_ALU64_REG(BPF_ADD, R2, R7),
+ BPF_ALU64_REG(BPF_ADD, R2, R8),
+ BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
+ BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R3, R0),
+ BPF_ALU64_REG(BPF_ADD, R3, R1),
+ BPF_ALU64_REG(BPF_ADD, R3, R2),
+ BPF_ALU64_REG(BPF_ADD, R3, R3),
+ BPF_ALU64_REG(BPF_ADD, R3, R4),
+ BPF_ALU64_REG(BPF_ADD, R3, R5),
+ BPF_ALU64_REG(BPF_ADD, R3, R6),
+ BPF_ALU64_REG(BPF_ADD, R3, R7),
+ BPF_ALU64_REG(BPF_ADD, R3, R8),
+ BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
+ BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R4, R0),
+ BPF_ALU64_REG(BPF_ADD, R4, R1),
+ BPF_ALU64_REG(BPF_ADD, R4, R2),
+ BPF_ALU64_REG(BPF_ADD, R4, R3),
+ BPF_ALU64_REG(BPF_ADD, R4, R4),
+ BPF_ALU64_REG(BPF_ADD, R4, R5),
+ BPF_ALU64_REG(BPF_ADD, R4, R6),
+ BPF_ALU64_REG(BPF_ADD, R4, R7),
+ BPF_ALU64_REG(BPF_ADD, R4, R8),
+ BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R5, R0),
+ BPF_ALU64_REG(BPF_ADD, R5, R1),
+ BPF_ALU64_REG(BPF_ADD, R5, R2),
+ BPF_ALU64_REG(BPF_ADD, R5, R3),
+ BPF_ALU64_REG(BPF_ADD, R5, R4),
+ BPF_ALU64_REG(BPF_ADD, R5, R5),
+ BPF_ALU64_REG(BPF_ADD, R5, R6),
+ BPF_ALU64_REG(BPF_ADD, R5, R7),
+ BPF_ALU64_REG(BPF_ADD, R5, R8),
+ BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
+ BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R6, R0),
+ BPF_ALU64_REG(BPF_ADD, R6, R1),
+ BPF_ALU64_REG(BPF_ADD, R6, R2),
+ BPF_ALU64_REG(BPF_ADD, R6, R3),
+ BPF_ALU64_REG(BPF_ADD, R6, R4),
+ BPF_ALU64_REG(BPF_ADD, R6, R5),
+ BPF_ALU64_REG(BPF_ADD, R6, R6),
+ BPF_ALU64_REG(BPF_ADD, R6, R7),
+ BPF_ALU64_REG(BPF_ADD, R6, R8),
+ BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
+ BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R7, R0),
+ BPF_ALU64_REG(BPF_ADD, R7, R1),
+ BPF_ALU64_REG(BPF_ADD, R7, R2),
+ BPF_ALU64_REG(BPF_ADD, R7, R3),
+ BPF_ALU64_REG(BPF_ADD, R7, R4),
+ BPF_ALU64_REG(BPF_ADD, R7, R5),
+ BPF_ALU64_REG(BPF_ADD, R7, R6),
+ BPF_ALU64_REG(BPF_ADD, R7, R7),
+ BPF_ALU64_REG(BPF_ADD, R7, R8),
+ BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
+ BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R8, R0),
+ BPF_ALU64_REG(BPF_ADD, R8, R1),
+ BPF_ALU64_REG(BPF_ADD, R8, R2),
+ BPF_ALU64_REG(BPF_ADD, R8, R3),
+ BPF_ALU64_REG(BPF_ADD, R8, R4),
+ BPF_ALU64_REG(BPF_ADD, R8, R5),
+ BPF_ALU64_REG(BPF_ADD, R8, R6),
+ BPF_ALU64_REG(BPF_ADD, R8, R7),
+ BPF_ALU64_REG(BPF_ADD, R8, R8),
+ BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
+ BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R9, R0),
+ BPF_ALU64_REG(BPF_ADD, R9, R1),
+ BPF_ALU64_REG(BPF_ADD, R9, R2),
+ BPF_ALU64_REG(BPF_ADD, R9, R3),
+ BPF_ALU64_REG(BPF_ADD, R9, R4),
+ BPF_ALU64_REG(BPF_ADD, R9, R5),
+ BPF_ALU64_REG(BPF_ADD, R9, R6),
+ BPF_ALU64_REG(BPF_ADD, R9, R7),
+ BPF_ALU64_REG(BPF_ADD, R9, R8),
+ BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
+ BPF_ALU64_REG(BPF_MOV, R0, R9),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2957380 } }
+ },
+ {
+ "INT: ADD 32-bit",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 20),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R2, 2),
+ BPF_ALU32_IMM(BPF_MOV, R3, 3),
+ BPF_ALU32_IMM(BPF_MOV, R4, 4),
+ BPF_ALU32_IMM(BPF_MOV, R5, 5),
+ BPF_ALU32_IMM(BPF_MOV, R6, 6),
+ BPF_ALU32_IMM(BPF_MOV, R7, 7),
+ BPF_ALU32_IMM(BPF_MOV, R8, 8),
+ BPF_ALU32_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_IMM(BPF_ADD, R1, 10),
+ BPF_ALU64_IMM(BPF_ADD, R2, 10),
+ BPF_ALU64_IMM(BPF_ADD, R3, 10),
+ BPF_ALU64_IMM(BPF_ADD, R4, 10),
+ BPF_ALU64_IMM(BPF_ADD, R5, 10),
+ BPF_ALU64_IMM(BPF_ADD, R6, 10),
+ BPF_ALU64_IMM(BPF_ADD, R7, 10),
+ BPF_ALU64_IMM(BPF_ADD, R8, 10),
+ BPF_ALU64_IMM(BPF_ADD, R9, 10),
+ BPF_ALU32_REG(BPF_ADD, R0, R1),
+ BPF_ALU32_REG(BPF_ADD, R0, R2),
+ BPF_ALU32_REG(BPF_ADD, R0, R3),
+ BPF_ALU32_REG(BPF_ADD, R0, R4),
+ BPF_ALU32_REG(BPF_ADD, R0, R5),
+ BPF_ALU32_REG(BPF_ADD, R0, R6),
+ BPF_ALU32_REG(BPF_ADD, R0, R7),
+ BPF_ALU32_REG(BPF_ADD, R0, R8),
+ BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
+ BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R1, R0),
+ BPF_ALU32_REG(BPF_ADD, R1, R1),
+ BPF_ALU32_REG(BPF_ADD, R1, R2),
+ BPF_ALU32_REG(BPF_ADD, R1, R3),
+ BPF_ALU32_REG(BPF_ADD, R1, R4),
+ BPF_ALU32_REG(BPF_ADD, R1, R5),
+ BPF_ALU32_REG(BPF_ADD, R1, R6),
+ BPF_ALU32_REG(BPF_ADD, R1, R7),
+ BPF_ALU32_REG(BPF_ADD, R1, R8),
+ BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
+ BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R2, R0),
+ BPF_ALU32_REG(BPF_ADD, R2, R1),
+ BPF_ALU32_REG(BPF_ADD, R2, R2),
+ BPF_ALU32_REG(BPF_ADD, R2, R3),
+ BPF_ALU32_REG(BPF_ADD, R2, R4),
+ BPF_ALU32_REG(BPF_ADD, R2, R5),
+ BPF_ALU32_REG(BPF_ADD, R2, R6),
+ BPF_ALU32_REG(BPF_ADD, R2, R7),
+ BPF_ALU32_REG(BPF_ADD, R2, R8),
+ BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
+ BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R3, R0),
+ BPF_ALU32_REG(BPF_ADD, R3, R1),
+ BPF_ALU32_REG(BPF_ADD, R3, R2),
+ BPF_ALU32_REG(BPF_ADD, R3, R3),
+ BPF_ALU32_REG(BPF_ADD, R3, R4),
+ BPF_ALU32_REG(BPF_ADD, R3, R5),
+ BPF_ALU32_REG(BPF_ADD, R3, R6),
+ BPF_ALU32_REG(BPF_ADD, R3, R7),
+ BPF_ALU32_REG(BPF_ADD, R3, R8),
+ BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
+ BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R4, R0),
+ BPF_ALU32_REG(BPF_ADD, R4, R1),
+ BPF_ALU32_REG(BPF_ADD, R4, R2),
+ BPF_ALU32_REG(BPF_ADD, R4, R3),
+ BPF_ALU32_REG(BPF_ADD, R4, R4),
+ BPF_ALU32_REG(BPF_ADD, R4, R5),
+ BPF_ALU32_REG(BPF_ADD, R4, R6),
+ BPF_ALU32_REG(BPF_ADD, R4, R7),
+ BPF_ALU32_REG(BPF_ADD, R4, R8),
+ BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R5, R0),
+ BPF_ALU32_REG(BPF_ADD, R5, R1),
+ BPF_ALU32_REG(BPF_ADD, R5, R2),
+ BPF_ALU32_REG(BPF_ADD, R5, R3),
+ BPF_ALU32_REG(BPF_ADD, R5, R4),
+ BPF_ALU32_REG(BPF_ADD, R5, R5),
+ BPF_ALU32_REG(BPF_ADD, R5, R6),
+ BPF_ALU32_REG(BPF_ADD, R5, R7),
+ BPF_ALU32_REG(BPF_ADD, R5, R8),
+ BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
+ BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R6, R0),
+ BPF_ALU32_REG(BPF_ADD, R6, R1),
+ BPF_ALU32_REG(BPF_ADD, R6, R2),
+ BPF_ALU32_REG(BPF_ADD, R6, R3),
+ BPF_ALU32_REG(BPF_ADD, R6, R4),
+ BPF_ALU32_REG(BPF_ADD, R6, R5),
+ BPF_ALU32_REG(BPF_ADD, R6, R6),
+ BPF_ALU32_REG(BPF_ADD, R6, R7),
+ BPF_ALU32_REG(BPF_ADD, R6, R8),
+ BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
+ BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R7, R0),
+ BPF_ALU32_REG(BPF_ADD, R7, R1),
+ BPF_ALU32_REG(BPF_ADD, R7, R2),
+ BPF_ALU32_REG(BPF_ADD, R7, R3),
+ BPF_ALU32_REG(BPF_ADD, R7, R4),
+ BPF_ALU32_REG(BPF_ADD, R7, R5),
+ BPF_ALU32_REG(BPF_ADD, R7, R6),
+ BPF_ALU32_REG(BPF_ADD, R7, R7),
+ BPF_ALU32_REG(BPF_ADD, R7, R8),
+ BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
+ BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R8, R0),
+ BPF_ALU32_REG(BPF_ADD, R8, R1),
+ BPF_ALU32_REG(BPF_ADD, R8, R2),
+ BPF_ALU32_REG(BPF_ADD, R8, R3),
+ BPF_ALU32_REG(BPF_ADD, R8, R4),
+ BPF_ALU32_REG(BPF_ADD, R8, R5),
+ BPF_ALU32_REG(BPF_ADD, R8, R6),
+ BPF_ALU32_REG(BPF_ADD, R8, R7),
+ BPF_ALU32_REG(BPF_ADD, R8, R8),
+ BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
+ BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R9, R0),
+ BPF_ALU32_REG(BPF_ADD, R9, R1),
+ BPF_ALU32_REG(BPF_ADD, R9, R2),
+ BPF_ALU32_REG(BPF_ADD, R9, R3),
+ BPF_ALU32_REG(BPF_ADD, R9, R4),
+ BPF_ALU32_REG(BPF_ADD, R9, R5),
+ BPF_ALU32_REG(BPF_ADD, R9, R6),
+ BPF_ALU32_REG(BPF_ADD, R9, R7),
+ BPF_ALU32_REG(BPF_ADD, R9, R8),
+ BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
+ BPF_ALU32_REG(BPF_MOV, R0, R9),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2957380 } }
+ },
+ { /* Mainly checking JIT here. */
+ "INT: SUB",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_IMM(BPF_MOV, R3, 3),
+ BPF_ALU64_IMM(BPF_MOV, R4, 4),
+ BPF_ALU64_IMM(BPF_MOV, R5, 5),
+ BPF_ALU64_IMM(BPF_MOV, R6, 6),
+ BPF_ALU64_IMM(BPF_MOV, R7, 7),
+ BPF_ALU64_IMM(BPF_MOV, R8, 8),
+ BPF_ALU64_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_REG(BPF_SUB, R0, R0),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_ALU64_REG(BPF_SUB, R0, R2),
+ BPF_ALU64_REG(BPF_SUB, R0, R3),
+ BPF_ALU64_REG(BPF_SUB, R0, R4),
+ BPF_ALU64_REG(BPF_SUB, R0, R5),
+ BPF_ALU64_REG(BPF_SUB, R0, R6),
+ BPF_ALU64_REG(BPF_SUB, R0, R7),
+ BPF_ALU64_REG(BPF_SUB, R0, R8),
+ BPF_ALU64_REG(BPF_SUB, R0, R9),
+ BPF_ALU64_IMM(BPF_SUB, R0, 10),
+ BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R1, R0),
+ BPF_ALU64_REG(BPF_SUB, R1, R2),
+ BPF_ALU64_REG(BPF_SUB, R1, R3),
+ BPF_ALU64_REG(BPF_SUB, R1, R4),
+ BPF_ALU64_REG(BPF_SUB, R1, R5),
+ BPF_ALU64_REG(BPF_SUB, R1, R6),
+ BPF_ALU64_REG(BPF_SUB, R1, R7),
+ BPF_ALU64_REG(BPF_SUB, R1, R8),
+ BPF_ALU64_REG(BPF_SUB, R1, R9),
+ BPF_ALU64_IMM(BPF_SUB, R1, 10),
+ BPF_ALU64_REG(BPF_SUB, R2, R0),
+ BPF_ALU64_REG(BPF_SUB, R2, R1),
+ BPF_ALU64_REG(BPF_SUB, R2, R3),
+ BPF_ALU64_REG(BPF_SUB, R2, R4),
+ BPF_ALU64_REG(BPF_SUB, R2, R5),
+ BPF_ALU64_REG(BPF_SUB, R2, R6),
+ BPF_ALU64_REG(BPF_SUB, R2, R7),
+ BPF_ALU64_REG(BPF_SUB, R2, R8),
+ BPF_ALU64_REG(BPF_SUB, R2, R9),
+ BPF_ALU64_IMM(BPF_SUB, R2, 10),
+ BPF_ALU64_REG(BPF_SUB, R3, R0),
+ BPF_ALU64_REG(BPF_SUB, R3, R1),
+ BPF_ALU64_REG(BPF_SUB, R3, R2),
+ BPF_ALU64_REG(BPF_SUB, R3, R4),
+ BPF_ALU64_REG(BPF_SUB, R3, R5),
+ BPF_ALU64_REG(BPF_SUB, R3, R6),
+ BPF_ALU64_REG(BPF_SUB, R3, R7),
+ BPF_ALU64_REG(BPF_SUB, R3, R8),
+ BPF_ALU64_REG(BPF_SUB, R3, R9),
+ BPF_ALU64_IMM(BPF_SUB, R3, 10),
+ BPF_ALU64_REG(BPF_SUB, R4, R0),
+ BPF_ALU64_REG(BPF_SUB, R4, R1),
+ BPF_ALU64_REG(BPF_SUB, R4, R2),
+ BPF_ALU64_REG(BPF_SUB, R4, R3),
+ BPF_ALU64_REG(BPF_SUB, R4, R5),
+ BPF_ALU64_REG(BPF_SUB, R4, R6),
+ BPF_ALU64_REG(BPF_SUB, R4, R7),
+ BPF_ALU64_REG(BPF_SUB, R4, R8),
+ BPF_ALU64_REG(BPF_SUB, R4, R9),
+ BPF_ALU64_IMM(BPF_SUB, R4, 10),
+ BPF_ALU64_REG(BPF_SUB, R5, R0),
+ BPF_ALU64_REG(BPF_SUB, R5, R1),
+ BPF_ALU64_REG(BPF_SUB, R5, R2),
+ BPF_ALU64_REG(BPF_SUB, R5, R3),
+ BPF_ALU64_REG(BPF_SUB, R5, R4),
+ BPF_ALU64_REG(BPF_SUB, R5, R6),
+ BPF_ALU64_REG(BPF_SUB, R5, R7),
+ BPF_ALU64_REG(BPF_SUB, R5, R8),
+ BPF_ALU64_REG(BPF_SUB, R5, R9),
+ BPF_ALU64_IMM(BPF_SUB, R5, 10),
+ BPF_ALU64_REG(BPF_SUB, R6, R0),
+ BPF_ALU64_REG(BPF_SUB, R6, R1),
+ BPF_ALU64_REG(BPF_SUB, R6, R2),
+ BPF_ALU64_REG(BPF_SUB, R6, R3),
+ BPF_ALU64_REG(BPF_SUB, R6, R4),
+ BPF_ALU64_REG(BPF_SUB, R6, R5),
+ BPF_ALU64_REG(BPF_SUB, R6, R7),
+ BPF_ALU64_REG(BPF_SUB, R6, R8),
+ BPF_ALU64_REG(BPF_SUB, R6, R9),
+ BPF_ALU64_IMM(BPF_SUB, R6, 10),
+ BPF_ALU64_REG(BPF_SUB, R7, R0),
+ BPF_ALU64_REG(BPF_SUB, R7, R1),
+ BPF_ALU64_REG(BPF_SUB, R7, R2),
+ BPF_ALU64_REG(BPF_SUB, R7, R3),
+ BPF_ALU64_REG(BPF_SUB, R7, R4),
+ BPF_ALU64_REG(BPF_SUB, R7, R5),
+ BPF_ALU64_REG(BPF_SUB, R7, R6),
+ BPF_ALU64_REG(BPF_SUB, R7, R8),
+ BPF_ALU64_REG(BPF_SUB, R7, R9),
+ BPF_ALU64_IMM(BPF_SUB, R7, 10),
+ BPF_ALU64_REG(BPF_SUB, R8, R0),
+ BPF_ALU64_REG(BPF_SUB, R8, R1),
+ BPF_ALU64_REG(BPF_SUB, R8, R2),
+ BPF_ALU64_REG(BPF_SUB, R8, R3),
+ BPF_ALU64_REG(BPF_SUB, R8, R4),
+ BPF_ALU64_REG(BPF_SUB, R8, R5),
+ BPF_ALU64_REG(BPF_SUB, R8, R6),
+ BPF_ALU64_REG(BPF_SUB, R8, R7),
+ BPF_ALU64_REG(BPF_SUB, R8, R9),
+ BPF_ALU64_IMM(BPF_SUB, R8, 10),
+ BPF_ALU64_REG(BPF_SUB, R9, R0),
+ BPF_ALU64_REG(BPF_SUB, R9, R1),
+ BPF_ALU64_REG(BPF_SUB, R9, R2),
+ BPF_ALU64_REG(BPF_SUB, R9, R3),
+ BPF_ALU64_REG(BPF_SUB, R9, R4),
+ BPF_ALU64_REG(BPF_SUB, R9, R5),
+ BPF_ALU64_REG(BPF_SUB, R9, R6),
+ BPF_ALU64_REG(BPF_SUB, R9, R7),
+ BPF_ALU64_REG(BPF_SUB, R9, R8),
+ BPF_ALU64_IMM(BPF_SUB, R9, 10),
+ BPF_ALU64_IMM(BPF_SUB, R0, 10),
+ BPF_ALU64_IMM(BPF_NEG, R0, 0),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_ALU64_REG(BPF_SUB, R0, R2),
+ BPF_ALU64_REG(BPF_SUB, R0, R3),
+ BPF_ALU64_REG(BPF_SUB, R0, R4),
+ BPF_ALU64_REG(BPF_SUB, R0, R5),
+ BPF_ALU64_REG(BPF_SUB, R0, R6),
+ BPF_ALU64_REG(BPF_SUB, R0, R7),
+ BPF_ALU64_REG(BPF_SUB, R0, R8),
+ BPF_ALU64_REG(BPF_SUB, R0, R9),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 11 } }
+ },
+ { /* Mainly checking JIT here. */
+ "INT: XOR",
+ .u.insns_int = {
+ BPF_ALU64_REG(BPF_SUB, R0, R0),
+ BPF_ALU64_REG(BPF_XOR, R1, R1),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, 10),
+ BPF_ALU64_IMM(BPF_MOV, R1, -1),
+ BPF_ALU64_REG(BPF_SUB, R1, R1),
+ BPF_ALU64_REG(BPF_XOR, R2, R2),
+ BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R2, R2),
+ BPF_ALU64_REG(BPF_XOR, R3, R3),
+ BPF_ALU64_IMM(BPF_MOV, R0, 10),
+ BPF_ALU64_IMM(BPF_MOV, R1, -1),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R3, R3),
+ BPF_ALU64_REG(BPF_XOR, R4, R4),
+ BPF_ALU64_IMM(BPF_MOV, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R5, -1),
+ BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R4, R4),
+ BPF_ALU64_REG(BPF_XOR, R5, R5),
+ BPF_ALU64_IMM(BPF_MOV, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R7, -1),
+ BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R5, 1),
+ BPF_ALU64_REG(BPF_SUB, R5, R5),
+ BPF_ALU64_REG(BPF_XOR, R6, R6),
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R8, -1),
+ BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R6, R6),
+ BPF_ALU64_REG(BPF_XOR, R7, R7),
+ BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R7, R7),
+ BPF_ALU64_REG(BPF_XOR, R8, R8),
+ BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R8, R8),
+ BPF_ALU64_REG(BPF_XOR, R9, R9),
+ BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R9, R9),
+ BPF_ALU64_REG(BPF_XOR, R0, R0),
+ BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R1, R1),
+ BPF_ALU64_REG(BPF_XOR, R0, R0),
+ BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ { /* Mainly checking JIT here. */
+ "INT: MUL",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 11),
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_IMM(BPF_MOV, R3, 3),
+ BPF_ALU64_IMM(BPF_MOV, R4, 4),
+ BPF_ALU64_IMM(BPF_MOV, R5, 5),
+ BPF_ALU64_IMM(BPF_MOV, R6, 6),
+ BPF_ALU64_IMM(BPF_MOV, R7, 7),
+ BPF_ALU64_IMM(BPF_MOV, R8, 8),
+ BPF_ALU64_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_REG(BPF_MUL, R0, R0),
+ BPF_ALU64_REG(BPF_MUL, R0, R1),
+ BPF_ALU64_REG(BPF_MUL, R0, R2),
+ BPF_ALU64_REG(BPF_MUL, R0, R3),
+ BPF_ALU64_REG(BPF_MUL, R0, R4),
+ BPF_ALU64_REG(BPF_MUL, R0, R5),
+ BPF_ALU64_REG(BPF_MUL, R0, R6),
+ BPF_ALU64_REG(BPF_MUL, R0, R7),
+ BPF_ALU64_REG(BPF_MUL, R0, R8),
+ BPF_ALU64_REG(BPF_MUL, R0, R9),
+ BPF_ALU64_IMM(BPF_MUL, R0, 10),
+ BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_MUL, R1, R0),
+ BPF_ALU64_REG(BPF_MUL, R1, R2),
+ BPF_ALU64_REG(BPF_MUL, R1, R3),
+ BPF_ALU64_REG(BPF_MUL, R1, R4),
+ BPF_ALU64_REG(BPF_MUL, R1, R5),
+ BPF_ALU64_REG(BPF_MUL, R1, R6),
+ BPF_ALU64_REG(BPF_MUL, R1, R7),
+ BPF_ALU64_REG(BPF_MUL, R1, R8),
+ BPF_ALU64_REG(BPF_MUL, R1, R9),
+ BPF_ALU64_IMM(BPF_MUL, R1, 10),
+ BPF_ALU64_REG(BPF_MOV, R2, R1),
+ BPF_ALU64_IMM(BPF_RSH, R2, 32),
+ BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_LSH, R1, 32),
+ BPF_ALU64_IMM(BPF_ARSH, R1, 32),
+ BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_MUL, R2, R0),
+ BPF_ALU64_REG(BPF_MUL, R2, R1),
+ BPF_ALU64_REG(BPF_MUL, R2, R3),
+ BPF_ALU64_REG(BPF_MUL, R2, R4),
+ BPF_ALU64_REG(BPF_MUL, R2, R5),
+ BPF_ALU64_REG(BPF_MUL, R2, R6),
+ BPF_ALU64_REG(BPF_MUL, R2, R7),
+ BPF_ALU64_REG(BPF_MUL, R2, R8),
+ BPF_ALU64_REG(BPF_MUL, R2, R9),
+ BPF_ALU64_IMM(BPF_MUL, R2, 10),
+ BPF_ALU64_IMM(BPF_RSH, R2, 32),
+ BPF_ALU64_REG(BPF_MOV, R0, R2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x35d97ef2 } }
+ },
+ {
+ "INT: ALU MIX",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 11),
+ BPF_ALU64_IMM(BPF_ADD, R0, -1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_IMM(BPF_XOR, R2, 3),
+ BPF_ALU64_REG(BPF_DIV, R0, R2),
+ BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOD, R0, 3),
+ BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "INT: DIV + ABS",
+ .u.insns_int = {
+ BPF_ALU64_REG(BPF_MOV, R6, R1),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU32_REG(BPF_DIV, R0, R2),
+ BPF_ALU64_REG(BPF_MOV, R8, R0),
+ BPF_LD_ABS(BPF_B, 4),
+ BPF_ALU64_REG(BPF_ADD, R8, R0),
+ BPF_LD_IND(BPF_B, R8, -70),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { 10, 20, 30, 40, 50 },
+ { { 4, 0 }, { 5, 10 } }
+ },
+ {
+ "INT: DIV by zero",
+ .u.insns_int = {
+ BPF_ALU64_REG(BPF_MOV, R6, R1),
+ BPF_ALU64_IMM(BPF_MOV, R7, 0),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU32_REG(BPF_DIV, R0, R7),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { 10, 20, 30, 40, 50 },
+ { { 3, 0 }, { 4, 0 } }
+ },
+ {
+ "check: missing ret",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 1),
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { }
+ },
+ {
+ "check: div_k_0",
+ .u.insns = {
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0)
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { }
+ },
+ {
+ "check: unknown insn",
+ .u.insns = {
+ /* seccomp insn, rejected in socket filter */
+ BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0)
+ },
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ { },
+ { }
+ },
+ {
+ "check: out of range spill/fill",
+ .u.insns = {
+ BPF_STMT(BPF_STX, 16),
+ BPF_STMT(BPF_RET | BPF_K, 0)
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { }
+ },
+ {
+ "JUMPS + HOLES",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ },
+ CLASSIC,
+ { 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
+ 0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
+ 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
+ 0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
+ 0xc0, 0xa8, 0x33, 0x01,
+ 0xc0, 0xa8, 0x33, 0x02,
+ 0xbb, 0xb6,
+ 0xa9, 0xfa,
+ 0x00, 0x14, 0x00, 0x00,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc },
+ { { 88, 0x001b } }
+ },
+ {
+ "check: RET X",
+ .u.insns = {
+ BPF_STMT(BPF_RET | BPF_X, 0),
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ },
+ {
+ "check: LDX + RET X",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 42),
+ BPF_STMT(BPF_RET | BPF_X, 0),
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ },
+ { /* Mainly checking JIT here. */
+ "M[]: alt STX + LDX",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 100),
+ BPF_STMT(BPF_STX, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 1),
+ BPF_STMT(BPF_LDX | BPF_MEM, 1),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 2),
+ BPF_STMT(BPF_LDX | BPF_MEM, 2),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 3),
+ BPF_STMT(BPF_LDX | BPF_MEM, 3),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 4),
+ BPF_STMT(BPF_LDX | BPF_MEM, 4),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 5),
+ BPF_STMT(BPF_LDX | BPF_MEM, 5),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 6),
+ BPF_STMT(BPF_LDX | BPF_MEM, 6),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 7),
+ BPF_STMT(BPF_LDX | BPF_MEM, 7),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 8),
+ BPF_STMT(BPF_LDX | BPF_MEM, 8),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 9),
+ BPF_STMT(BPF_LDX | BPF_MEM, 9),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 10),
+ BPF_STMT(BPF_LDX | BPF_MEM, 10),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 11),
+ BPF_STMT(BPF_LDX | BPF_MEM, 11),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 12),
+ BPF_STMT(BPF_LDX | BPF_MEM, 12),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 13),
+ BPF_STMT(BPF_LDX | BPF_MEM, 13),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 14),
+ BPF_STMT(BPF_LDX | BPF_MEM, 14),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 15),
+ BPF_STMT(BPF_LDX | BPF_MEM, 15),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 116 } },
+ },
+ { /* Mainly checking JIT here. */
+ "M[]: full STX + full LDX",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
+ BPF_STMT(BPF_STX, 0),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
+ BPF_STMT(BPF_STX, 1),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
+ BPF_STMT(BPF_STX, 2),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
+ BPF_STMT(BPF_STX, 3),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
+ BPF_STMT(BPF_STX, 4),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
+ BPF_STMT(BPF_STX, 5),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
+ BPF_STMT(BPF_STX, 6),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
+ BPF_STMT(BPF_STX, 7),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
+ BPF_STMT(BPF_STX, 8),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
+ BPF_STMT(BPF_STX, 9),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
+ BPF_STMT(BPF_STX, 10),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
+ BPF_STMT(BPF_STX, 11),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
+ BPF_STMT(BPF_STX, 12),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
+ BPF_STMT(BPF_STX, 13),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
+ BPF_STMT(BPF_STX, 14),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
+ BPF_STMT(BPF_STX, 15),
+ BPF_STMT(BPF_LDX | BPF_MEM, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 1),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 2),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 3),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 4),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 5),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 6),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 7),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 8),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 9),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 10),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 11),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 12),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 13),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 14),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 15),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0x2a5a5e5 } },
+ },
+ {
+ "check: SKF_AD_MAX",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_MAX),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ },
+ { /* Passes checker but fails during runtime. */
+ "LD [SKF_AD_OFF-1]",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF - 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ },
+ CLASSIC,
+ { },
+ { { 1, 0 } },
+ },
+};
+
+static struct net_device dev;
+
+static struct sk_buff *populate_skb(char *buf, int size)
+{
+ struct sk_buff *skb;
+
+ if (size >= MAX_DATA)
+ return NULL;
+
+ skb = alloc_skb(MAX_DATA, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ memcpy(__skb_put(skb, size), buf, size);
+
+ /* Initialize a fake skb with test pattern. */
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IP);
+ skb->pkt_type = SKB_TYPE;
+ skb->mark = SKB_MARK;
+ skb->hash = SKB_HASH;
+ skb->queue_mapping = SKB_QUEUE_MAP;
+ skb->vlan_tci = SKB_VLAN_TCI;
+ skb->dev = &dev;
+ skb->dev->ifindex = SKB_DEV_IFINDEX;
+ skb->dev->type = SKB_DEV_TYPE;
+ skb_set_network_header(skb, min(size, ETH_HLEN));
+
+ return skb;
+}
+
+static void *generate_test_data(struct bpf_test *test, int sub)
+{
+ if (test->aux & FLAG_NO_DATA)
+ return NULL;
+
+ /* Test case expects an skb, so populate one. Various
+ * subtests generate skbs of different sizes based on
+ * the same data.
+ */
+ return populate_skb(test->data, test->test[sub].data_size);
+}
+
+static void release_test_data(const struct bpf_test *test, void *data)
+{
+ if (test->aux & FLAG_NO_DATA)
+ return;
+
+ kfree_skb(data);
+}
+
+static int probe_filter_length(struct sock_filter *fp)
+{
+ int len = 0;
+
+ for (len = MAX_INSNS - 1; len > 0; --len)
+ if (fp[len].code != 0 || fp[len].k != 0)
+ break;
+
+ return len + 1;
+}
+
+static struct bpf_prog *generate_filter(int which, int *err)
+{
+ struct bpf_prog *fp;
+ struct sock_fprog_kern fprog;
+ unsigned int flen = probe_filter_length(tests[which].u.insns);
+ __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
+
+ switch (test_type) {
+ case CLASSIC:
+ fprog.filter = tests[which].u.insns;
+ fprog.len = flen;
+
+ *err = bpf_prog_create(&fp, &fprog);
+ if (tests[which].aux & FLAG_EXPECTED_FAIL) {
+ if (*err == -EINVAL) {
+ pr_cont("PASS\n");
+ /* Verifier rejected filter as expected. */
+ *err = 0;
+ return NULL;
+ } else {
+ pr_cont("UNEXPECTED_PASS\n");
+ /* Verifier didn't reject the test that's
+ * bad enough, just return!
+ */
+ *err = -EINVAL;
+ return NULL;
+ }
+ }
+ /* We don't expect to fail. */
+ if (*err) {
+ pr_cont("FAIL to attach err=%d len=%d\n",
+ *err, fprog.len);
+ return NULL;
+ }
+ break;
+
+ case INTERNAL:
+ fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
+ if (fp == NULL) {
+ pr_cont("UNEXPECTED_FAIL no memory left\n");
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ fp->len = flen;
+ memcpy(fp->insnsi, tests[which].u.insns_int,
+ fp->len * sizeof(struct bpf_insn));
+
+ bpf_prog_select_runtime(fp);
+ break;
+ }
+
+ *err = 0;
+ return fp;
+}
+
+static void release_filter(struct bpf_prog *fp, int which)
+{
+ __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
+
+ switch (test_type) {
+ case CLASSIC:
+ bpf_prog_destroy(fp);
+ break;
+ case INTERNAL:
+ bpf_prog_free(fp);
+ break;
+ }
+}
+
+static int __run_one(const struct bpf_prog *fp, const void *data,
+ int runs, u64 *duration)
+{
+ u64 start, finish;
+ int ret, i;
+
+ start = ktime_to_us(ktime_get());
+
+ for (i = 0; i < runs; i++)
+ ret = BPF_PROG_RUN(fp, data);
+
+ finish = ktime_to_us(ktime_get());
+
+ *duration = (finish - start) * 1000ULL;
+ do_div(*duration, runs);
+
+ return ret;
+}
+
+static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
+{
+ int err_cnt = 0, i, runs = MAX_TESTRUNS;
+
+ for (i = 0; i < MAX_SUBTESTS; i++) {
+ void *data;
+ u64 duration;
+ u32 ret;
+
+ if (test->test[i].data_size == 0 &&
+ test->test[i].result == 0)
+ break;
+
+ data = generate_test_data(test, i);
+ ret = __run_one(fp, data, runs, &duration);
+ release_test_data(test, data);
+
+ if (ret == test->test[i].result) {
+ pr_cont("%lld ", duration);
+ } else {
+ pr_cont("ret %d != %d ", ret,
+ test->test[i].result);
+ err_cnt++;
+ }
+ }
+
+ return err_cnt;
+}
+
+static __init int test_bpf(void)
+{
+ int i, err_cnt = 0, pass_cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ struct bpf_prog *fp;
+ int err;
+
+ pr_info("#%d %s ", i, tests[i].descr);
+
+ fp = generate_filter(i, &err);
+ if (fp == NULL) {
+ if (err == 0) {
+ pass_cnt++;
+ continue;
+ }
+
+ return err;
+ }
+ err = run_one(fp, &tests[i]);
+ release_filter(fp, i);
+
+ if (err) {
+ pr_cont("FAIL (%d times)\n", err);
+ err_cnt++;
+ } else {
+ pr_cont("PASS\n");
+ pass_cnt++;
+ }
+ }
+
+ pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
+ return err_cnt ? -EINVAL : 0;
+}
+
+static int __init test_bpf_init(void)
+{
+ return test_bpf();
+}
+
+static void __exit test_bpf_exit(void)
+{
+}
+
+module_init(test_bpf_init);
+module_exit(test_bpf_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
new file mode 100644
index 000000000000..86374c1c49a4
--- /dev/null
+++ b/lib/test_firmware.c
@@ -0,0 +1,117 @@
+/*
+ * This module provides an interface to trigger and test firmware loading.
+ *
+ * It is designed to be used for basic evaluation of the firmware loading
+ * subsystem (for example when validating firmware verification). It lacks
+ * any extra dependencies, and will not normally be loaded by the system
+ * unless explicitly requested by name.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+static DEFINE_MUTEX(test_fw_mutex);
+static const struct firmware *test_firmware;
+
+static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
+ size_t size, loff_t *offset)
+{
+ ssize_t rc = 0;
+
+ mutex_lock(&test_fw_mutex);
+ if (test_firmware)
+ rc = simple_read_from_buffer(buf, size, offset,
+ test_firmware->data,
+ test_firmware->size);
+ mutex_unlock(&test_fw_mutex);
+ return rc;
+}
+
+static const struct file_operations test_fw_fops = {
+ .owner = THIS_MODULE,
+ .read = test_fw_misc_read,
+};
+
+static struct miscdevice test_fw_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "test_firmware",
+ .fops = &test_fw_fops,
+};
+
+static ssize_t trigger_request_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ char *name;
+
+ name = kzalloc(count + 1, GFP_KERNEL);
+ if (!name)
+ return -ENOSPC;
+ memcpy(name, buf, count);
+
+ pr_info("loading '%s'\n", name);
+
+ mutex_lock(&test_fw_mutex);
+ release_firmware(test_firmware);
+ test_firmware = NULL;
+ rc = request_firmware(&test_firmware, name, dev);
+ if (rc)
+ pr_info("load of '%s' failed: %d\n", name, rc);
+ pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0);
+ mutex_unlock(&test_fw_mutex);
+
+ kfree(name);
+
+ return count;
+}
+static DEVICE_ATTR_WO(trigger_request);
+
+static int __init test_firmware_init(void)
+{
+ int rc;
+
+ rc = misc_register(&test_fw_misc_device);
+ if (rc) {
+ pr_err("could not register misc device: %d\n", rc);
+ return rc;
+ }
+ rc = device_create_file(test_fw_misc_device.this_device,
+ &dev_attr_trigger_request);
+ if (rc) {
+ pr_err("could not create sysfs interface: %d\n", rc);
+ goto dereg;
+ }
+
+ pr_warn("interface ready\n");
+
+ return 0;
+dereg:
+ misc_deregister(&test_fw_misc_device);
+ return rc;
+}
+
+module_init(test_firmware_init);
+
+static void __exit test_firmware_exit(void)
+{
+ release_firmware(test_firmware);
+ device_remove_file(test_fw_misc_device.this_device,
+ &dev_attr_trigger_request);
+ misc_deregister(&test_fw_misc_device);
+ pr_warn("removed interface\n");
+}
+
+module_exit(test_firmware_exit);
+
+MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/textsearch.c b/lib/textsearch.c
index e0cc0146ae62..0c7e9ab2d88f 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -159,6 +159,7 @@ errout:
spin_unlock(&ts_mod_lock);
return err;
}
+EXPORT_SYMBOL(textsearch_register);
/**
* textsearch_unregister - unregister a textsearch module
@@ -190,6 +191,7 @@ out:
spin_unlock(&ts_mod_lock);
return err;
}
+EXPORT_SYMBOL(textsearch_unregister);
struct ts_linear_state
{
@@ -236,6 +238,7 @@ unsigned int textsearch_find_continuous(struct ts_config *conf,
return textsearch_find(conf, state);
}
+EXPORT_SYMBOL(textsearch_find_continuous);
/**
* textsearch_prepare - Prepare a search
@@ -298,6 +301,7 @@ errout:
return ERR_PTR(err);
}
+EXPORT_SYMBOL(textsearch_prepare);
/**
* textsearch_destroy - destroy a search configuration
@@ -316,9 +320,4 @@ void textsearch_destroy(struct ts_config *conf)
kfree(conf);
}
-
-EXPORT_SYMBOL(textsearch_register);
-EXPORT_SYMBOL(textsearch_unregister);
-EXPORT_SYMBOL(textsearch_prepare);
-EXPORT_SYMBOL(textsearch_find_continuous);
EXPORT_SYMBOL(textsearch_destroy);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 5e2cf6f342f8..6fe2c84eb055 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -364,7 +364,6 @@ enum format_type {
FORMAT_TYPE_SHORT,
FORMAT_TYPE_UINT,
FORMAT_TYPE_INT,
- FORMAT_TYPE_NRCHARS,
FORMAT_TYPE_SIZE_T,
FORMAT_TYPE_PTRDIFF
};
@@ -1538,10 +1537,6 @@ qualifier:
return fmt - start;
/* skip alnum */
- case 'n':
- spec->type = FORMAT_TYPE_NRCHARS;
- return ++fmt - start;
-
case '%':
spec->type = FORMAT_TYPE_PERCENT_CHAR;
return ++fmt - start;
@@ -1564,6 +1559,15 @@ qualifier:
case 'u':
break;
+ case 'n':
+ /*
+ * Since %n poses a greater security risk than utility, treat
+ * it as an invalid format specifier. Warn about its use so
+ * that new instances don't get added.
+ */
+ WARN_ONCE(1, "Please remove ignored %%n in '%s'\n", fmt);
+ /* Fall-through */
+
default:
spec->type = FORMAT_TYPE_INVALID;
return fmt - start;
@@ -1737,20 +1741,6 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
++str;
break;
- case FORMAT_TYPE_NRCHARS: {
- /*
- * Since %n poses a greater security risk than
- * utility, ignore %n and skip its argument.
- */
- void *skip_arg;
-
- WARN_ONCE(1, "Please remove ignored %%n in '%s'\n",
- old_fmt);
-
- skip_arg = va_arg(args, void *);
- break;
- }
-
default:
switch (spec.type) {
case FORMAT_TYPE_LONG_LONG:
@@ -2025,19 +2015,6 @@ do { \
fmt++;
break;
- case FORMAT_TYPE_NRCHARS: {
- /* skip %n 's argument */
- u8 qualifier = spec.qualifier;
- void *skip_arg;
- if (qualifier == 'l')
- skip_arg = va_arg(args, long *);
- else if (_tolower(qualifier) == 'z')
- skip_arg = va_arg(args, size_t *);
- else
- skip_arg = va_arg(args, int *);
- break;
- }
-
default:
switch (spec.type) {
@@ -2196,10 +2173,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
++str;
break;
- case FORMAT_TYPE_NRCHARS:
- /* skip */
- break;
-
default: {
unsigned long long num;
@@ -2374,7 +2347,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
break;
base = 10;
- is_sign = 0;
+ is_sign = false;
switch (*fmt++) {
case 'c':
@@ -2413,7 +2386,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
case 'i':
base = 0;
case 'd':
- is_sign = 1;
+ is_sign = true;
case 'u':
break;
case '%':
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index 08837db52d94..12d2d777f36b 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -9,33 +9,33 @@ config XZ_DEC
if XZ_DEC
config XZ_DEC_X86
- bool "x86 BCJ filter decoder"
- default y if X86
+ bool "x86 BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_POWERPC
- bool "PowerPC BCJ filter decoder"
- default y if PPC
+ bool "PowerPC BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_IA64
- bool "IA-64 BCJ filter decoder"
- default y if IA64
+ bool "IA-64 BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_ARM
- bool "ARM BCJ filter decoder"
- default y if ARM
+ bool "ARM BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_ARMTHUMB
- bool "ARM-Thumb BCJ filter decoder"
- default y if (ARM && ARM_THUMB)
+ bool "ARM-Thumb BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_SPARC
- bool "SPARC BCJ filter decoder"
- default y if SPARC
+ bool "SPARC BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
endif
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index a6cdc969ea42..08c3c8049998 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -1043,6 +1043,8 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
s->lzma2.sequence = SEQ_LZMA_PREPARE;
+ /* Fall through */
+
case SEQ_LZMA_PREPARE:
if (s->lzma2.compressed < RC_INIT_BYTES)
return XZ_DATA_ERROR;
@@ -1053,6 +1055,8 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
s->lzma2.compressed -= RC_INIT_BYTES;
s->lzma2.sequence = SEQ_LZMA_RUN;
+ /* Fall through */
+
case SEQ_LZMA_RUN:
/*
* Set dictionary limit to indicate how much we want