summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/.gitignore2
-rw-r--r--lib/Kconfig14
-rw-r--r--lib/Kconfig.debug79
-rw-r--r--lib/Kconfig.kasan11
-rw-r--r--lib/Kconfig.ubsan11
-rw-r--r--lib/Makefile23
-rw-r--r--lib/atomic64.c14
-rw-r--r--lib/bch.c32
-rw-r--r--lib/bitmap.c51
-rw-r--r--lib/bucket_locks.c11
-rw-r--r--lib/chacha20.c6
-rw-r--r--lib/cpumask.c4
-rw-r--r--lib/crc-t10dif.c57
-rw-r--r--lib/crc32.c22
-rw-r--r--lib/crc32defs.h14
-rw-r--r--lib/crc64.c56
-rw-r--r--lib/debug_locks.c6
-rw-r--r--lib/debugobjects.c10
-rw-r--r--lib/decompress_bunzip2.c3
-rw-r--r--lib/devres.c36
-rw-r--r--lib/fonts/font_7x14.c256
-rw-r--r--lib/fonts/font_8x16.c256
-rw-r--r--lib/fonts/font_8x8.c256
-rw-r--r--lib/fonts/font_pearl_8x8.c256
-rw-r--r--lib/gen_crc32table.c5
-rw-r--r--lib/gen_crc64table.c68
-rw-r--r--lib/idr.c462
-rw-r--r--lib/ioremap.c4
-rw-r--r--lib/iov_iter.c200
-rw-r--r--lib/klist.c10
-rw-r--r--lib/kobject.c30
-rw-r--r--lib/kstrtox.c16
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--lib/lz4/lz4_decompress.c481
-rw-r--r--lib/lz4/lz4defs.h9
-rw-r--r--lib/memcat_p.c34
-rw-r--r--lib/mpi/mpi-pow.c3
-rw-r--r--lib/nlattr.c269
-rw-r--r--lib/nmi_backtrace.c3
-rw-r--r--lib/parser.c16
-rw-r--r--lib/percpu-refcount.c28
-rw-r--r--lib/percpu_counter.c1
-rw-r--r--lib/percpu_ida.c370
-rw-r--r--lib/radix-tree.c845
-rw-r--r--lib/raid6/s390vx.uc34
-rw-r--r--lib/reciprocal_div.c41
-rw-r--r--lib/reed_solomon/reed_solomon.c2
-rw-r--r--lib/refcount.c55
-rw-r--r--lib/rhashtable.c78
-rw-r--r--lib/sg_pool.c7
-rw-r--r--lib/string.c1
-rw-r--r--lib/test_bitfield.c168
-rw-r--r--lib/test_bpf.c1
-rw-r--r--lib/test_debug_virtual.c2
-rw-r--r--lib/test_hexdump.c28
-rw-r--r--lib/test_ida.c177
-rw-r--r--lib/test_kasan.c70
-rw-r--r--lib/test_memcat_p.c115
-rw-r--r--lib/test_overflow.c198
-rw-r--r--lib/test_printf.c24
-rw-r--r--lib/test_rhashtable.c8
-rw-r--r--lib/test_xarray.c1238
-rw-r--r--lib/vsprintf.c221
-rw-r--r--lib/xarray.c2036
-rw-r--r--lib/xz/xz_crc32.c2
-rw-r--r--lib/xz/xz_private.h4
-rw-r--r--lib/zlib_inflate/inflate.c12
67 files changed, 6371 insertions, 2493 deletions
diff --git a/lib/.gitignore b/lib/.gitignore
index 09aae85418ab..f2a39c9e5485 100644
--- a/lib/.gitignore
+++ b/lib/.gitignore
@@ -2,5 +2,7 @@
# Generated files
#
gen_crc32table
+gen_crc64table
crc32table.h
+crc64table.h
oid_registry_data.c
diff --git a/lib/Kconfig b/lib/Kconfig
index 706836ec314d..a9965f4af4dd 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -170,6 +170,14 @@ config CRC32_BIT
endchoice
+config CRC64
+ tristate "CRC64 functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC64 functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC64
+ functions require M here.
+
config CRC4
tristate "CRC4 functions"
help
@@ -223,7 +231,6 @@ config AUDIT_COMPAT_GENERIC
config RANDOM32_SELFTEST
bool "PRNG perform self test on init"
- default n
help
This option enables the 32 bit PRNG library functions to perform a
self test on initialization.
@@ -392,8 +399,11 @@ config INTERVAL_TREE
for more information.
-config RADIX_TREE_MULTIORDER
+config XARRAY_MULTI
bool
+ help
+ Support entries which occupy multiple consecutive indices in the
+ XArray.
config ASSOCIATIVE_ARRAY
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8838d1158d19..1af29b8224fd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1,3 +1,5 @@
+menu "Kernel hacking"
+
menu "printk and dmesg options"
config PRINTK_TIME
@@ -30,6 +32,17 @@ config CONSOLE_LOGLEVEL_DEFAULT
usage in the kernel. That is controlled by the MESSAGE_LOGLEVEL_DEFAULT
option.
+config CONSOLE_LOGLEVEL_QUIET
+ int "quiet console loglevel (1-15)"
+ range 1 15
+ default "4"
+ help
+ loglevel to use when "quiet" is passed on the kernel commandline.
+
+ When "quiet" is passed on the kernel commandline this loglevel
+ will be used as the loglevel. IOW passing "quiet" will be the
+ equivalent of passing "loglevel=<CONSOLE_LOGLEVEL_QUIET>"
+
config MESSAGE_LOGLEVEL_DEFAULT
int "Default message log level (1-7)"
range 1 7
@@ -198,14 +211,6 @@ config GDB_SCRIPTS
instance. See Documentation/dev-tools/gdb-kernel-debugging.rst
for further details.
-config ENABLE_WARN_DEPRECATED
- bool "Enable __deprecated logic"
- default y
- help
- Enable the __deprecated logic in the kernel build.
- Disable this to suppress the "warning: 'foo' is deprecated
- (declared at kernel/power/somefile.c:1234)" messages.
-
config ENABLE_MUST_CHECK
bool "Enable __must_check logic"
default y
@@ -1174,7 +1179,7 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !X86
+ select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
select KALLSYMS
select KALLSYMS_ALL
@@ -1193,6 +1198,7 @@ config DEBUG_ATOMIC_SLEEP
bool "Sleep inside atomic section checking"
select PREEMPT_COUNT
depends on DEBUG_KERNEL
+ depends on !ARCH_NO_PREEMPT
help
If you say Y here, various routines which may sleep will become very
noisy if they are called inside atomic sections: when a spinlock is
@@ -1214,7 +1220,6 @@ config LOCK_TORTURE_TEST
tristate "torture tests for locking"
depends on DEBUG_KERNEL
select TORTURE_TEST
- default n
help
This option provides a kernel module that runs torture tests
on kernel locking primitives. The kernel module may be built
@@ -1272,13 +1277,13 @@ config WARN_ALL_UNSEEDED_RANDOM
time. This is really bad from a security perspective, and
so architecture maintainers really need to do what they can
to get the CRNG seeded sooner after the system is booted.
- However, since users can not do anything actionble to
+ However, since users cannot do anything actionable to
address this, by default the kernel will issue only a single
warning for the first use of unseeded randomness.
Say Y here if you want to receive warnings for all uses of
unseeded randomness. This will be of use primarily for
- those developers interersted in improving the security of
+ those developers interested in improving the security of
Linux kernels running on their architecture (or
subarchitecture).
@@ -1287,7 +1292,7 @@ config DEBUG_KOBJECT
depends on DEBUG_KERNEL
help
If you say Y here, some extra kobject debugging messages will be sent
- to the syslog.
+ to the syslog.
config DEBUG_KOBJECT_RELEASE
bool "kobject release debugging"
@@ -1585,7 +1590,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
help
Provide stacktrace filter for fault-injection capabilities
@@ -1594,7 +1599,7 @@ config LATENCYTOP
depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
depends on PROC_FS
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
@@ -1681,7 +1686,6 @@ config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_FS
depends on BLOCK
- default n
help
This module enables testing of the different dumping mechanisms by
inducing system failures at predefined crash points.
@@ -1715,10 +1719,9 @@ config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
depends on KPROBES
- default n
help
This option provides for testing basic kprobes functionality on
- boot. A sample kprobe, jprobe and kretprobe are inserted and
+ boot. Samples of kprobe and kretprobe are inserted and
verified for functionality.
Say N if you are unsure.
@@ -1726,7 +1729,6 @@ config KPROBES_SANITY_TEST
config BACKTRACE_SELF_TEST
tristate "Self test for the backtrace code"
depends on DEBUG_KERNEL
- default n
help
This option provides a kernel module that can be used to test
the kernel stack backtrace code. This option is not useful
@@ -1796,21 +1798,29 @@ config TEST_PRINTF
config TEST_BITMAP
tristate "Test bitmap_*() family of functions at runtime"
- default n
help
Enable this option to test the bitmap functions at boot.
If unsure, say N.
+config TEST_BITFIELD
+ tristate "Test bitfield functions at runtime"
+ help
+ Enable this option to test the bitfield functions at boot.
+
+ If unsure, say N.
+
config TEST_UUID
tristate "Test functions located in the uuid module at runtime"
+config TEST_XARRAY
+ tristate "Test the XArray code at runtime"
+
config TEST_OVERFLOW
tristate "Test check_*_overflow() functions at runtime"
config TEST_RHASHTABLE
tristate "Perform selftest on resizable hash table"
- default n
help
Enable this option to test the rhashtable functions at boot.
@@ -1818,7 +1828,6 @@ config TEST_RHASHTABLE
config TEST_HASH
tristate "Perform selftest on hash functions"
- default n
help
Enable this option to test the kernel's integer (<linux/hash.h>),
string (<linux/stringhash.h>), and siphash (<linux/siphash.h>)
@@ -1827,9 +1836,11 @@ config TEST_HASH
This is intended to help people writing architecture-specific
optimized versions. If unsure, say N.
+config TEST_IDA
+ tristate "Perform selftest on IDA functions"
+
config TEST_PARMAN
tristate "Perform selftest on priority array manager"
- default n
depends on PARMAN
help
Enable this option to test priority array manager on boot
@@ -1839,7 +1850,6 @@ config TEST_PARMAN
config TEST_LKM
tristate "Test module loading with 'hello world' module"
- default n
depends on m
help
This builds the "test_module" module that emits "Hello, world"
@@ -1853,7 +1863,6 @@ config TEST_LKM
config TEST_USER_COPY
tristate "Test user/kernel boundary protections"
- default n
depends on m
help
This builds the "test_user_copy" module that runs sanity checks
@@ -1866,7 +1875,6 @@ config TEST_USER_COPY
config TEST_BPF
tristate "Test BPF filter functionality"
- default n
depends on m && NET
help
This builds the "test_bpf" module that runs various test vectors
@@ -1880,7 +1888,6 @@ config TEST_BPF
config FIND_BIT_BENCHMARK
tristate "Test find_bit functions"
- default n
help
This builds the "test_find_bit" module that measure find_*_bit()
functions performance.
@@ -1889,7 +1896,6 @@ config FIND_BIT_BENCHMARK
config TEST_FIRMWARE
tristate "Test firmware loading via userspace interface"
- default n
depends on FW_LOADER
help
This builds the "test_firmware" module that creates a userspace
@@ -1902,7 +1908,6 @@ config TEST_FIRMWARE
config TEST_SYSCTL
tristate "sysctl test driver"
- default n
depends on PROC_SYSCTL
help
This builds the "test_sysctl" module. This driver enables to test the
@@ -1913,7 +1918,6 @@ config TEST_SYSCTL
config TEST_UDELAY
tristate "udelay test driver"
- default n
help
This builds the "udelay_test" module that helps to make sure
that udelay() is working properly.
@@ -1922,7 +1926,6 @@ config TEST_UDELAY
config TEST_STATIC_KEYS
tristate "Test static keys"
- default n
depends on m
help
Test the static key interfaces.
@@ -1931,7 +1934,6 @@ config TEST_STATIC_KEYS
config TEST_KMOD
tristate "kmod stress tester"
- default n
depends on m
depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
depends on NETDEVICES && NET_CORE && INET # for TUN
@@ -1966,11 +1968,18 @@ config TEST_DEBUG_VIRTUAL
If unsure, say N.
+config TEST_MEMCAT_P
+ tristate "Test memcat_p() helper function"
+ help
+ Test the memcat_p() helper for correctly merging two
+ pointer arrays together.
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config MEMTEST
bool "Memtest"
- depends on HAVE_MEMBLOCK
---help---
This option adds a kernel parameter 'memtest', which allows memtest
to be set.
@@ -2034,3 +2043,7 @@ config IO_STRICT_DEVMEM
if the driver using a given range cannot be disabled.
If in doubt, say Y.
+
+source "arch/$(SRCARCH)/Kconfig.debug"
+
+endmenu # Kernel hacking
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index c253c1b46c6b..d0bad1bd9a2b 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -5,7 +5,7 @@ if HAVE_ARCH_KASAN
config KASAN
bool "KASan: runtime memory debugger"
- depends on SLUB || (SLAB && !DEBUG_SLAB)
+ depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
select STACKDEPOT
@@ -57,6 +57,15 @@ config KASAN_INLINE
endchoice
+config KASAN_S390_4_LEVEL_PAGING
+ bool "KASan: use 4-level paging"
+ depends on KASAN && S390
+ help
+ Compiling the kernel with KASan disables automatic 3-level vs
+ 4-level paging selection. 3-level paging is used by default (up
+ to 3TB of RAM with KASan enabled). This options allows to force
+ 4-level paging instead.
+
config TEST_KASAN
tristate "Module for testing kasan for bug detection"
depends on m && KASAN
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 19d42ea75ec2..98fa559ebd80 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -1,9 +1,6 @@
config ARCH_HAS_UBSAN_SANITIZE_ALL
bool
-config ARCH_WANTS_UBSAN_NO_NULL
- def_bool n
-
config UBSAN
bool "Undefined behaviour sanity checker"
help
@@ -39,14 +36,6 @@ config UBSAN_ALIGNMENT
Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives.
-config UBSAN_NULL
- bool "Enable checking of null pointers"
- depends on UBSAN
- default y if !ARCH_WANTS_UBSAN_NO_NULL
- help
- This option enables detection of memory accesses via a
- null pointer.
-
config TEST_UBSAN
tristate "Module for testing for undefined behavior detection"
depends on m && UBSAN
diff --git a/lib/Makefile b/lib/Makefile
index 90dc5520b784..db06d1237898 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -18,13 +18,13 @@ KCOV_INSTRUMENT_debugobjects.o := n
KCOV_INSTRUMENT_dynamic_debug.o := n
lib-y := ctype.o string.o vsprintf.o cmdline.o \
- rbtree.o radix-tree.o timerqueue.o\
+ rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o int_sqrt.o extable.o \
sha1.o chacha20.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
- nmi_backtrace.o nodemask.o win_minmax.o
+ nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o
lib-$(CONFIG_PRINTK) += dump_stack.o
lib-$(CONFIG_MMU) += ioremap.o
@@ -37,7 +37,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
+ percpu-refcount.o rhashtable.o reciprocal_div.o \
once.o refcount.o usercopy.o errseq.o bucket_locks.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
@@ -50,9 +50,12 @@ obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
+obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_KASAN) += test_kasan.o
CFLAGS_test_kasan.o += -fno-builtin
+CFLAGS_test_kasan.o += $(call cc-disable-warning, vla)
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
+CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
@@ -65,10 +68,13 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
+obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
+obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -102,6 +108,7 @@ obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
+obj-$(CONFIG_CRC64) += crc64.o
obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o
obj-$(CONFIG_CRC4) += crc4.o
obj-$(CONFIG_CRC7) += crc7.o
@@ -215,7 +222,9 @@ obj-$(CONFIG_FONT_SUPPORT) += fonts/
obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
hostprogs-y := gen_crc32table
+hostprogs-y += gen_crc64table
clean-files := crc32table.h
+clean-files += crc64table.h
$(obj)/crc32.o: $(obj)/crc32table.h
@@ -225,6 +234,14 @@ quiet_cmd_crc32 = GEN $@
$(obj)/crc32table.h: $(obj)/gen_crc32table
$(call cmd,crc32)
+$(obj)/crc64.o: $(obj)/crc64table.h
+
+quiet_cmd_crc64 = GEN $@
+ cmd_crc64 = $< > $@
+
+$(obj)/crc64table.h: $(obj)/gen_crc64table
+ $(call cmd,crc64)
+
#
# Build a fast OID lookip registry from include/linux/oid_registry.h
#
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 53c2d5edc826..1d91e31eceec 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -178,18 +178,18 @@ long long atomic64_xchg(atomic64_t *v, long long new)
}
EXPORT_SYMBOL(atomic64_xchg);
-int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- int ret = 0;
+ long long val;
raw_spin_lock_irqsave(lock, flags);
- if (v->counter != u) {
+ val = v->counter;
+ if (val != u)
v->counter += a;
- ret = 1;
- }
raw_spin_unlock_irqrestore(lock, flags);
- return ret;
+
+ return val;
}
-EXPORT_SYMBOL(atomic64_add_unless);
+EXPORT_SYMBOL(atomic64_fetch_add_unless);
diff --git a/lib/bch.c b/lib/bch.c
index bc89dfe4d1b3..5db6d3a4c8a6 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -78,15 +78,21 @@
#define GF_M(_p) (CONFIG_BCH_CONST_M)
#define GF_T(_p) (CONFIG_BCH_CONST_T)
#define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1)
+#define BCH_MAX_M (CONFIG_BCH_CONST_M)
+#define BCH_MAX_T (CONFIG_BCH_CONST_T)
#else
#define GF_M(_p) ((_p)->m)
#define GF_T(_p) ((_p)->t)
#define GF_N(_p) ((_p)->n)
+#define BCH_MAX_M 15 /* 2KB */
+#define BCH_MAX_T 64 /* 64 bit correction */
#endif
#define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32)
#define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8)
+#define BCH_ECC_MAX_WORDS DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32)
+
#ifndef dbg
#define dbg(_fmt, args...) do {} while (0)
#endif
@@ -187,18 +193,22 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
const unsigned int l = BCH_ECC_WORDS(bch)-1;
unsigned int i, mlen;
unsigned long m;
- uint32_t w, r[l+1];
+ uint32_t w, r[BCH_ECC_MAX_WORDS];
+ const size_t r_bytes = BCH_ECC_WORDS(bch) * sizeof(*r);
const uint32_t * const tab0 = bch->mod8_tab;
const uint32_t * const tab1 = tab0 + 256*(l+1);
const uint32_t * const tab2 = tab1 + 256*(l+1);
const uint32_t * const tab3 = tab2 + 256*(l+1);
const uint32_t *pdata, *p0, *p1, *p2, *p3;
+ if (WARN_ON(r_bytes > sizeof(r)))
+ return;
+
if (ecc) {
/* load ecc parity bytes into internal 32-bit buffer */
load_ecc8(bch, bch->ecc_buf, ecc);
} else {
- memset(bch->ecc_buf, 0, sizeof(r));
+ memset(bch->ecc_buf, 0, r_bytes);
}
/* process first unaligned data bytes */
@@ -215,7 +225,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
mlen = len/4;
data += 4*mlen;
len -= 4*mlen;
- memcpy(r, bch->ecc_buf, sizeof(r));
+ memcpy(r, bch->ecc_buf, r_bytes);
/*
* split each 32-bit word into 4 polynomials of weight 8 as follows:
@@ -241,7 +251,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
r[l] = p0[l]^p1[l]^p2[l]^p3[l];
}
- memcpy(bch->ecc_buf, r, sizeof(r));
+ memcpy(bch->ecc_buf, r, r_bytes);
/* process last unaligned bytes */
if (len)
@@ -434,7 +444,7 @@ static int solve_linear_system(struct bch_control *bch, unsigned int *rows,
{
const int m = GF_M(bch);
unsigned int tmp, mask;
- int rem, c, r, p, k, param[m];
+ int rem, c, r, p, k, param[BCH_MAX_M];
k = 0;
mask = 1 << m;
@@ -1114,7 +1124,7 @@ static int build_deg2_base(struct bch_control *bch)
{
const int m = GF_M(bch);
int i, j, r;
- unsigned int sum, x, y, remaining, ak = 0, xi[m];
+ unsigned int sum, x, y, remaining, ak = 0, xi[BCH_MAX_M];
/* find k s.t. Tr(a^k) = 1 and 0 <= k < m */
for (i = 0; i < m; i++) {
@@ -1254,7 +1264,6 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
struct bch_control *bch = NULL;
const int min_m = 5;
- const int max_m = 15;
/* default primitive polynomials */
static const unsigned int prim_poly_tab[] = {
@@ -1270,7 +1279,7 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
goto fail;
}
#endif
- if ((m < min_m) || (m > max_m))
+ if ((m < min_m) || (m > BCH_MAX_M))
/*
* values of m greater than 15 are not currently supported;
* supporting m > 15 would require changing table base type
@@ -1278,6 +1287,13 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
*/
goto fail;
+ if (t > BCH_MAX_T)
+ /*
+ * we can support larger than 64 bits if necessary, at the
+ * cost of higher stack usage.
+ */
+ goto fail;
+
/* sanity checks */
if ((t < 1) || (m*t >= ((1 << m)-1)))
/* invalid t value */
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 58f9750e49c6..eead55aa7170 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -13,6 +13,8 @@
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
@@ -35,11 +37,6 @@
* carefully filter out these unused bits from impacting their
* results.
*
- * These operations actually hold to a slightly stronger rule:
- * if you don't input any bitmaps to these ops that have some
- * unused bits set, then they won't output any set unused bits
- * in output bitmaps.
- *
* The byte ordering of bitmaps is more natural on little
* endian architectures. See the big-endian headers
* include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
@@ -465,20 +462,18 @@ EXPORT_SYMBOL(bitmap_parse_user);
* ranges if list is specified or hex digits grouped into comma-separated
* sets of 8 digits/set. Returns the number of characters written to buf.
*
- * It is assumed that @buf is a pointer into a PAGE_SIZE area and that
- * sufficient storage remains at @buf to accommodate the
- * bitmap_print_to_pagebuf() output.
+ * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
+ * area and that sufficient storage remains at @buf to accommodate the
+ * bitmap_print_to_pagebuf() output. Returns the number of characters
+ * actually printed to @buf, excluding terminating '\0'.
*/
int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
int nmaskbits)
{
- ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
- int n = 0;
+ ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
- if (len > 1)
- n = list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
- scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
- return n;
+ return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
+ scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
}
EXPORT_SYMBOL(bitmap_print_to_pagebuf);
@@ -1125,6 +1120,25 @@ void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int n
EXPORT_SYMBOL(bitmap_copy_le);
#endif
+unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
+{
+ return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
+ flags);
+}
+EXPORT_SYMBOL(bitmap_alloc);
+
+unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
+{
+ return bitmap_alloc(nbits, flags | __GFP_ZERO);
+}
+EXPORT_SYMBOL(bitmap_zalloc);
+
+void bitmap_free(const unsigned long *bitmap)
+{
+ kfree(bitmap);
+}
+EXPORT_SYMBOL(bitmap_free);
+
#if BITS_PER_LONG == 64
/**
* bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
@@ -1132,14 +1146,10 @@ EXPORT_SYMBOL(bitmap_copy_le);
* @buf: array of u32 (in host byte order), the source bitmap
* @nbits: number of bits in @bitmap
*/
-void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
- unsigned int nbits)
+void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits)
{
unsigned int i, halfwords;
- if (!nbits)
- return;
-
halfwords = DIV_ROUND_UP(nbits, 32);
for (i = 0; i < halfwords; i++) {
bitmap[i/2] = (unsigned long) buf[i];
@@ -1163,9 +1173,6 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
{
unsigned int i, halfwords;
- if (!nbits)
- return;
-
halfwords = DIV_ROUND_UP(nbits, 32);
for (i = 0; i < halfwords; i++) {
buf[i] = (u32) (bitmap[i/2] & UINT_MAX);
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c
index ade3ce6c4af6..64b92e1dbace 100644
--- a/lib/bucket_locks.c
+++ b/lib/bucket_locks.c
@@ -11,8 +11,9 @@
* to a power of 2 to be suitable as a hash table.
*/
-int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
- size_t max_size, unsigned int cpu_mult, gfp_t gfp)
+int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
+ size_t max_size, unsigned int cpu_mult, gfp_t gfp,
+ const char *name, struct lock_class_key *key)
{
spinlock_t *tlocks = NULL;
unsigned int i, size;
@@ -33,8 +34,10 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
if (!tlocks)
return -ENOMEM;
- for (i = 0; i < size; i++)
+ for (i = 0; i < size; i++) {
spin_lock_init(&tlocks[i]);
+ lockdep_init_map(&tlocks[i].dep_map, name, key, 0);
+ }
}
*locks = tlocks;
@@ -42,7 +45,7 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
return 0;
}
-EXPORT_SYMBOL(alloc_bucket_spinlocks);
+EXPORT_SYMBOL(__alloc_bucket_spinlocks);
void free_bucket_spinlocks(spinlock_t *locks)
{
diff --git a/lib/chacha20.c b/lib/chacha20.c
index c1cc50fb68c9..d907fec6a9ed 100644
--- a/lib/chacha20.c
+++ b/lib/chacha20.c
@@ -16,9 +16,9 @@
#include <asm/unaligned.h>
#include <crypto/chacha20.h>
-void chacha20_block(u32 *state, u32 *stream)
+void chacha20_block(u32 *state, u8 *stream)
{
- u32 x[16], *out = stream;
+ u32 x[16];
int i;
for (i = 0; i < ARRAY_SIZE(x); i++)
@@ -67,7 +67,7 @@ void chacha20_block(u32 *state, u32 *stream)
}
for (i = 0; i < ARRAY_SIZE(x); i++)
- out[i] = cpu_to_le32(x[i] + state[i]);
+ put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]);
state[12]++;
}
diff --git a/lib/cpumask.c b/lib/cpumask.c
index beca6244671a..8d666ab84b5c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -4,7 +4,7 @@
#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
/**
* cpumask_next - get the next cpu in a cpumask
@@ -163,7 +163,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var);
*/
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
- *mask = memblock_virt_alloc(cpumask_size(), 0);
+ *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
}
/**
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 1ad33e555805..4d0d47c1ffbd 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -14,10 +14,47 @@
#include <linux/err.h>
#include <linux/init.h>
#include <crypto/hash.h>
+#include <crypto/algapi.h>
#include <linux/static_key.h>
+#include <linux/notifier.h>
-static struct crypto_shash *crct10dif_tfm;
+static struct crypto_shash __rcu *crct10dif_tfm;
static struct static_key crct10dif_fallback __read_mostly;
+static DEFINE_MUTEX(crc_t10dif_mutex);
+
+static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct crypto_alg *alg = data;
+ struct crypto_shash *new, *old;
+
+ if (val != CRYPTO_MSG_ALG_LOADED ||
+ static_key_false(&crct10dif_fallback) ||
+ strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING)))
+ return 0;
+
+ mutex_lock(&crc_t10dif_mutex);
+ old = rcu_dereference_protected(crct10dif_tfm,
+ lockdep_is_held(&crc_t10dif_mutex));
+ if (!old) {
+ mutex_unlock(&crc_t10dif_mutex);
+ return 0;
+ }
+ new = crypto_alloc_shash("crct10dif", 0, 0);
+ if (IS_ERR(new)) {
+ mutex_unlock(&crc_t10dif_mutex);
+ return 0;
+ }
+ rcu_assign_pointer(crct10dif_tfm, new);
+ mutex_unlock(&crc_t10dif_mutex);
+
+ synchronize_rcu();
+ crypto_free_shash(old);
+ return 0;
+}
+
+static struct notifier_block crc_t10dif_nb = {
+ .notifier_call = crc_t10dif_rehash,
+};
__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
{
@@ -30,11 +67,14 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
if (static_key_false(&crct10dif_fallback))
return crc_t10dif_generic(crc, buffer, len);
- desc.shash.tfm = crct10dif_tfm;
+ rcu_read_lock();
+ desc.shash.tfm = rcu_dereference(crct10dif_tfm);
desc.shash.flags = 0;
*(__u16 *)desc.ctx = crc;
err = crypto_shash_update(&desc.shash, buffer, len);
+ rcu_read_unlock();
+
BUG_ON(err);
return *(__u16 *)desc.ctx;
@@ -49,6 +89,7 @@ EXPORT_SYMBOL(crc_t10dif);
static int __init crc_t10dif_mod_init(void)
{
+ crypto_register_notifier(&crc_t10dif_nb);
crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
if (IS_ERR(crct10dif_tfm)) {
static_key_slow_inc(&crct10dif_fallback);
@@ -59,12 +100,24 @@ static int __init crc_t10dif_mod_init(void)
static void __exit crc_t10dif_mod_fini(void)
{
+ crypto_unregister_notifier(&crc_t10dif_nb);
crypto_free_shash(crct10dif_tfm);
}
module_init(crc_t10dif_mod_init);
module_exit(crc_t10dif_mod_fini);
+static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
+{
+ if (static_key_false(&crct10dif_fallback))
+ return sprintf(buffer, "fallback\n");
+
+ return sprintf(buffer, "%s\n",
+ crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm)));
+}
+
+module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644);
+
MODULE_DESCRIPTION("T10 DIF CRC calculation");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crct10dif");
diff --git a/lib/crc32.c b/lib/crc32.c
index 2ef20fe84b69..45b1d67a1767 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -27,6 +27,7 @@
/* see: Documentation/crc32.txt for a description of algorithms */
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sched.h>
@@ -182,21 +183,21 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
}
#if CRC_LE_BITS == 1
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len, NULL, CRCPOLY_LE);
+ return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
}
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE);
}
#else
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len,
- (const u32 (*)[256])crc32table_le, CRCPOLY_LE);
+ (const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
}
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len,
(const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
@@ -205,6 +206,9 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(__crc32c_le);
+u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
+u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
+
/*
* This multiplies the polynomials x and y modulo the given modulus.
* This follows the "little-endian" CRC convention that the lsbit
@@ -268,7 +272,7 @@ static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
{
- return crc32_generic_shift(crc, len, CRCPOLY_LE);
+ return crc32_generic_shift(crc, len, CRC32_POLY_LE);
}
u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
@@ -330,13 +334,13 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
#if CRC_LE_BITS == 1
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_be_generic(crc, p, len, NULL, CRCPOLY_BE);
+ return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
}
#else
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
return crc32_be_generic(crc, p, len,
- (const u32 (*)[256])crc32table_be, CRCPOLY_BE);
+ (const u32 (*)[256])crc32table_be, CRC32_POLY_BE);
}
#endif
EXPORT_SYMBOL(crc32_be);
diff --git a/lib/crc32defs.h b/lib/crc32defs.h
index cb275a28a750..0c8fb5923e7e 100644
--- a/lib/crc32defs.h
+++ b/lib/crc32defs.h
@@ -1,18 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * There are multiple 16-bit CRC polynomials in common use, but this is
- * *the* standard CRC-32 polynomial, first popularized by Ethernet.
- * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
- */
-#define CRCPOLY_LE 0xedb88320
-#define CRCPOLY_BE 0x04c11db7
-
-/*
- * This is the CRC32c polynomial, as outlined by Castagnoli.
- * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
- * x^8+x^6+x^0
- */
-#define CRC32C_POLY_LE 0x82F63B78
/* Try to choose an implementation variant via Kconfig */
#ifdef CONFIG_CRC32_SLICEBY8
diff --git a/lib/crc64.c b/lib/crc64.c
new file mode 100644
index 000000000000..0ef8ae6ac047
--- /dev/null
+++ b/lib/crc64.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Normal 64-bit CRC calculation.
+ *
+ * This is a basic crc64 implementation following ECMA-182 specification,
+ * which can be found from,
+ * http://www.ecma-international.org/publications/standards/Ecma-182.htm
+ *
+ * Dr. Ross N. Williams has a great document to introduce the idea of CRC
+ * algorithm, here the CRC64 code is also inspired by the table-driven
+ * algorithm and detail example from this paper. This paper can be found
+ * from,
+ * http://www.ross.net/crc/download/crc_v3.txt
+ *
+ * crc64table[256] is the lookup table of a table-driven 64-bit CRC
+ * calculation, which is generated by gen_crc64table.c in kernel build
+ * time. The polynomial of crc64 arithmetic is from ECMA-182 specification
+ * as well, which is defined as,
+ *
+ * x^64 + x^62 + x^57 + x^55 + x^54 + x^53 + x^52 + x^47 + x^46 + x^45 +
+ * x^40 + x^39 + x^38 + x^37 + x^35 + x^33 + x^32 + x^31 + x^29 + x^27 +
+ * x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
+ * x^7 + x^4 + x + 1
+ *
+ * Copyright 2018 SUSE Linux.
+ * Author: Coly Li <colyli@suse.de>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include "crc64table.h"
+
+MODULE_DESCRIPTION("CRC64 calculations");
+MODULE_LICENSE("GPL v2");
+
+/**
+ * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
+ * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
+ or the previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ */
+u64 __pure crc64_be(u64 crc, const void *p, size_t len)
+{
+ size_t i, t;
+
+ const unsigned char *_p = p;
+
+ for (i = 0; i < len; i++) {
+ t = ((crc >> 56) ^ (*_p++)) & 0xFF;
+ crc = crc64table[t] ^ (crc << 8);
+ }
+
+ return crc;
+}
+EXPORT_SYMBOL_GPL(crc64_be);
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 96c4c633d95e..ce51749cc145 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -21,7 +21,7 @@
* that would just muddy the log. So we report the first one and
* shut up after that.
*/
-int debug_locks = 1;
+int debug_locks __read_mostly = 1;
EXPORT_SYMBOL_GPL(debug_locks);
/*
@@ -29,7 +29,7 @@ EXPORT_SYMBOL_GPL(debug_locks);
* 'silent failure': nothing is printed to the console when
* a locking bug is detected.
*/
-int debug_locks_silent;
+int debug_locks_silent __read_mostly;
EXPORT_SYMBOL_GPL(debug_locks_silent);
/*
@@ -37,7 +37,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
*/
int debug_locks_off(void)
{
- if (__debug_locks_off()) {
+ if (debug_locks && __debug_locks_off()) {
if (!debug_locks_silent) {
console_verbose();
return 1;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 994be4805cec..70935ed91125 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -360,9 +360,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
limit++;
if (is_on_stack)
- pr_warn("object is on stack, but not annotated\n");
+ pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
+ task_stack_page(current));
else
- pr_warn("object is not on stack, but annotated\n");
+ pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
+ task_stack_page(current));
+
WARN_ON(1);
}
@@ -1185,8 +1188,7 @@ void __init debug_objects_mem_init(void)
if (!obj_cache || debug_objects_replace_static_objects()) {
debug_objects_enabled = 0;
- if (obj_cache)
- kmem_cache_destroy(obj_cache);
+ kmem_cache_destroy(obj_cache);
pr_warn("out of memory.\n");
} else
debug_objects_selftest();
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 0234361b24b8..7c4932eed748 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -51,6 +51,7 @@
#endif /* STATIC */
#include <linux/decompress/mm.h>
+#include <linux/crc32poly.h>
#ifndef INT_MAX
#define INT_MAX 0x7fffffff
@@ -654,7 +655,7 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
for (i = 0; i < 256; i++) {
c = i << 24;
for (j = 8; j; j--)
- c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
+ c = c&0x80000000 ? (c << 1)^(CRC32_POLY_BE) : (c << 1);
bd->crc32Table[i] = c;
}
diff --git a/lib/devres.c b/lib/devres.c
index 5bec1120b392..faccf1a037d0 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -4,6 +4,7 @@
#include <linux/io.h>
#include <linux/gfp.h>
#include <linux/export.h>
+#include <linux/of_address.h>
enum devm_ioremap_type {
DEVM_IOREMAP = 0,
@@ -162,6 +163,41 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
}
EXPORT_SYMBOL(devm_ioremap_resource);
+/*
+ * devm_of_iomap - Requests a resource and maps the memory mapped IO
+ * for a given device_node managed by a given device
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps it. All operations are managed and will be undone
+ * on driver detach of the device.
+ *
+ * This is to be used when a device requests/maps resources described
+ * by other device tree nodes (children or otherwise).
+ *
+ * @dev: The device "managing" the resource
+ * @node: The device-tree node where the resource resides
+ * @index: index of the MMIO range in the "reg" property
+ * @size: Returns the size of the resource (pass NULL if not needed)
+ * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
+ * error code on failure. Usage example:
+ *
+ * base = devm_of_iomap(&pdev->dev, node, 0, NULL);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ */
+void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
+ resource_size_t *size)
+{
+ struct resource res;
+
+ if (of_address_to_resource(node, index, &res))
+ return IOMEM_ERR_PTR(-EINVAL);
+ if (size)
+ *size = resource_size(&res);
+ return devm_ioremap_resource(dev, &res);
+}
+EXPORT_SYMBOL(devm_of_iomap);
+
#ifdef CONFIG_HAS_IOPORT_MAP
/*
* Generic iomap devres
diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c
index 9ae5b62c8a0d..89752d0b23e8 100644
--- a/lib/fonts/font_7x14.c
+++ b/lib/fonts/font_7x14.c
@@ -2058,7 +2058,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 128 0x80 '€' */
+ /* 128 0x80 'Ç' */
0x00, /* 0000000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -2074,7 +2074,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x70, /* 0111000 */
0x00, /* 0000000 */
- /* 129 0x81 '' */
+ /* 129 0x81 'ü' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2090,7 +2090,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 130 0x82 '‚' */
+ /* 130 0x82 'é' */
0x0c, /* 0000110 */
0x18, /* 0001100 */
0x30, /* 0011000 */
@@ -2106,7 +2106,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 131 0x83 'ƒ' */
+ /* 131 0x83 'â' */
0x10, /* 0001000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -2122,7 +2122,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 132 0x84 '„' */
+ /* 132 0x84 'ä' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2138,7 +2138,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 133 0x85 '…' */
+ /* 133 0x85 'à' */
0x60, /* 0110000 */
0x30, /* 0011000 */
0x18, /* 0001100 */
@@ -2154,7 +2154,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 134 0x86 '†' */
+ /* 134 0x86 'Ã¥' */
0x38, /* 0011100 */
0x6c, /* 0110110 */
0x38, /* 0011100 */
@@ -2170,7 +2170,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 135 0x87 '‡' */
+ /* 135 0x87 'ç' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2186,7 +2186,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0xe0, /* 1110000 */
- /* 136 0x88 'ˆ' */
+ /* 136 0x88 'ê' */
0x10, /* 0001000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -2202,7 +2202,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 137 0x89 '‰' */
+ /* 137 0x89 'ë' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2218,7 +2218,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 138 0x8a 'Š' */
+ /* 138 0x8a 'è' */
0xc0, /* 1100000 */
0x60, /* 0110000 */
0x30, /* 0011000 */
@@ -2234,7 +2234,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 139 0x8b '‹' */
+ /* 139 0x8b 'ï' */
0x00, /* 0000000 */
0x6c, /* 0110110 */
0x00, /* 0000000 */
@@ -2250,7 +2250,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 140 0x8c 'Œ' */
+ /* 140 0x8c 'î' */
0x30, /* 0011000 */
0x78, /* 0111100 */
0xcc, /* 1100110 */
@@ -2266,7 +2266,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 141 0x8d '' */
+ /* 141 0x8d 'ì' */
0xc0, /* 1100000 */
0x60, /* 0110000 */
0x30, /* 0011000 */
@@ -2282,7 +2282,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 142 0x8e 'Ž' */
+ /* 142 0x8e 'Ä' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2298,7 +2298,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 143 0x8f '' */
+ /* 143 0x8f 'Ã…' */
0x30, /* 0011000 */
0x48, /* 0100100 */
0x48, /* 0100100 */
@@ -2314,7 +2314,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 144 0x90 '' */
+ /* 144 0x90 'É' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0xfc, /* 1111110 */
@@ -2330,7 +2330,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 145 0x91 '‘' */
+ /* 145 0x91 'æ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2346,7 +2346,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 146 0x92 '’' */
+ /* 146 0x92 'Æ' */
0x00, /* 0000000 */
0x3e, /* 0011111 */
0x6c, /* 0110110 */
@@ -2362,7 +2362,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 147 0x93 '“' */
+ /* 147 0x93 'ô' */
0x10, /* 0001000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -2378,7 +2378,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 148 0x94 '”' */
+ /* 148 0x94 'ö' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2394,7 +2394,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 149 0x95 '•' */
+ /* 149 0x95 'ò' */
0xc0, /* 1100000 */
0x60, /* 0110000 */
0x30, /* 0011000 */
@@ -2410,7 +2410,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 150 0x96 '–' */
+ /* 150 0x96 'û' */
0x30, /* 0011000 */
0x78, /* 0111100 */
0xcc, /* 1100110 */
@@ -2426,7 +2426,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 151 0x97 '—' */
+ /* 151 0x97 'ù' */
0x60, /* 0110000 */
0x30, /* 0011000 */
0x18, /* 0001100 */
@@ -2442,7 +2442,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 152 0x98 '˜' */
+ /* 152 0x98 'ÿ' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2458,7 +2458,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x18, /* 0001100 */
0x70, /* 0111000 */
- /* 153 0x99 '™' */
+ /* 153 0x99 'Ö' */
0xcc, /* 1100110 */
0x00, /* 0000000 */
0x78, /* 0111100 */
@@ -2474,7 +2474,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 154 0x9a 'š' */
+ /* 154 0x9a 'Ü' */
0xcc, /* 1100110 */
0x00, /* 0000000 */
0xcc, /* 1100110 */
@@ -2490,7 +2490,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 155 0x9b '›' */
+ /* 155 0x9b '¢' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x7c, /* 0111110 */
@@ -2506,7 +2506,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 156 0x9c 'œ' */
+ /* 156 0x9c '£' */
0x38, /* 0011100 */
0x6c, /* 0110110 */
0x64, /* 0110010 */
@@ -2522,7 +2522,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 157 0x9d '' */
+ /* 157 0x9d 'Â¥' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0xcc, /* 1100110 */
@@ -2538,7 +2538,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 158 0x9e 'ž' */
+ /* 158 0x9e 'â‚§' */
0xf8, /* 1111100 */
0xcc, /* 1100110 */
0xcc, /* 1100110 */
@@ -2554,7 +2554,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 159 0x9f 'Ÿ' */
+ /* 159 0x9f 'Æ’' */
0x1c, /* 0001110 */
0x36, /* 0011011 */
0x30, /* 0011000 */
@@ -2570,7 +2570,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 160 0xa0 ' ' */
+ /* 160 0xa0 'á' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0x60, /* 0110000 */
@@ -2586,7 +2586,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 161 0xa1 '¡' */
+ /* 161 0xa1 'í' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0x60, /* 0110000 */
@@ -2602,7 +2602,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 162 0xa2 '¢' */
+ /* 162 0xa2 'ó' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0x60, /* 0110000 */
@@ -2618,7 +2618,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 163 0xa3 '£' */
+ /* 163 0xa3 'ú' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0x60, /* 0110000 */
@@ -2634,7 +2634,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 164 0xa4 '¤' */
+ /* 164 0xa4 'ñ' */
0x00, /* 0000000 */
0x76, /* 0111011 */
0xdc, /* 1101110 */
@@ -2650,7 +2650,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 165 0xa5 '¥' */
+ /* 165 0xa5 'Ñ' */
0x76, /* 0111011 */
0xdc, /* 1101110 */
0x00, /* 0000000 */
@@ -2666,7 +2666,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 166 0xa6 '¦' */
+ /* 166 0xa6 'ª' */
0x00, /* 0000000 */
0x78, /* 0111100 */
0xd8, /* 1101100 */
@@ -2682,7 +2682,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 167 0xa7 '§' */
+ /* 167 0xa7 'º' */
0x00, /* 0000000 */
0x70, /* 0111000 */
0xd8, /* 1101100 */
@@ -2698,7 +2698,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 168 0xa8 '¨' */
+ /* 168 0xa8 '¿' */
0x00, /* 0000000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2714,7 +2714,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 169 0xa9 '©' */
+ /* 169 0xa9 'âŒ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2730,7 +2730,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 170 0xaa 'ª' */
+ /* 170 0xaa '¬' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2746,7 +2746,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 171 0xab '«' */
+ /* 171 0xab '½' */
0x60, /* 0110000 */
0xe0, /* 1110000 */
0x62, /* 0110001 */
@@ -2762,7 +2762,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x7c, /* 0111110 */
- /* 172 0xac '¬' */
+ /* 172 0xac '¼' */
0x60, /* 0110000 */
0xe0, /* 1110000 */
0x62, /* 0110001 */
@@ -2778,7 +2778,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x0c, /* 0000110 */
0x00, /* 0000000 */
- /* 173 0xad '­' */
+ /* 173 0xad '¡' */
0x00, /* 0000000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2794,7 +2794,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 174 0xae '®' */
+ /* 174 0xae '«' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2810,7 +2810,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 175 0xaf '¯' */
+ /* 175 0xaf '»' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2826,7 +2826,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 176 0xb0 '°' */
+ /* 176 0xb0 'â–‘' */
0x88, /* 1000100 */
0x22, /* 0010001 */
0x88, /* 1000100 */
@@ -2842,7 +2842,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x88, /* 1000100 */
0x22, /* 0010001 */
- /* 177 0xb1 '±' */
+ /* 177 0xb1 'â–’' */
0x54, /* 0101010 */
0xaa, /* 1010101 */
0x54, /* 0101010 */
@@ -2858,7 +2858,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x54, /* 0101010 */
0xaa, /* 1010101 */
- /* 178 0xb2 '²' */
+ /* 178 0xb2 'â–“' */
0xee, /* 1110111 */
0xba, /* 1011101 */
0xee, /* 1110111 */
@@ -2874,7 +2874,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xee, /* 1110111 */
0xba, /* 1011101 */
- /* 179 0xb3 '³' */
+ /* 179 0xb3 '│' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2890,7 +2890,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 180 0xb4 '´' */
+ /* 180 0xb4 '┤' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2906,7 +2906,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 181 0xb5 'µ' */
+ /* 181 0xb5 'â•¡' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2922,7 +2922,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 182 0xb6 '¶' */
+ /* 182 0xb6 'â•¢' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -2938,7 +2938,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 183 0xb7 '·' */
+ /* 183 0xb7 'â•–' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2954,7 +2954,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 184 0xb8 '¸' */
+ /* 184 0xb8 'â••' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2970,7 +2970,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 185 0xb9 '¹' */
+ /* 185 0xb9 'â•£' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -2986,7 +2986,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 186 0xba 'º' */
+ /* 186 0xba 'â•‘' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3002,7 +3002,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 187 0xbb '»' */
+ /* 187 0xbb 'â•—' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3018,7 +3018,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 188 0xbc '¼' */
+ /* 188 0xbc 'â•' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3034,7 +3034,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 189 0xbd '½' */
+ /* 189 0xbd '╜' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3050,7 +3050,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 190 0xbe '¾' */
+ /* 190 0xbe 'â•›' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3066,7 +3066,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 191 0xbf '¿' */
+ /* 191 0xbf 'â”' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3082,7 +3082,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 192 0xc0 'À' */
+ /* 192 0xc0 'â””' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3098,7 +3098,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 193 0xc1 'Á' */
+ /* 193 0xc1 'â”´' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3114,7 +3114,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 194 0xc2 'Â' */
+ /* 194 0xc2 '┬' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3130,7 +3130,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 195 0xc3 'Ã' */
+ /* 195 0xc3 '├' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3146,7 +3146,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 196 0xc4 'Ä' */
+ /* 196 0xc4 '─' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3162,7 +3162,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 197 0xc5 'Å' */
+ /* 197 0xc5 '┼' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3178,7 +3178,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 198 0xc6 'Æ' */
+ /* 198 0xc6 '╞' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3194,7 +3194,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 199 0xc7 'Ç' */
+ /* 199 0xc7 '╟' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3210,7 +3210,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 200 0xc8 'È' */
+ /* 200 0xc8 '╚' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3226,7 +3226,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 201 0xc9 'É' */
+ /* 201 0xc9 'â•”' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3242,7 +3242,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 202 0xca 'Ê' */
+ /* 202 0xca 'â•©' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3258,7 +3258,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 203 0xcb 'Ë' */
+ /* 203 0xcb '╦' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3274,7 +3274,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 204 0xcc 'Ì' */
+ /* 204 0xcc 'â• ' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3290,7 +3290,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 205 0xcd 'Í' */
+ /* 205 0xcd 'â•' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3306,7 +3306,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 206 0xce 'Î' */
+ /* 206 0xce '╬' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3322,7 +3322,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 207 0xcf 'Ï' */
+ /* 207 0xcf 'â•§' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3338,7 +3338,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 208 0xd0 'Ð' */
+ /* 208 0xd0 '╨' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3354,7 +3354,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 209 0xd1 'Ñ' */
+ /* 209 0xd1 '╤' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3370,7 +3370,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 210 0xd2 'Ò' */
+ /* 210 0xd2 'â•¥' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3386,7 +3386,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 211 0xd3 'Ó' */
+ /* 211 0xd3 'â•™' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3402,7 +3402,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 212 0xd4 'Ô' */
+ /* 212 0xd4 '╘' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3418,7 +3418,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 213 0xd5 'Õ' */
+ /* 213 0xd5 'â•’' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3434,7 +3434,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 214 0xd6 'Ö' */
+ /* 214 0xd6 'â•“' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3450,7 +3450,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 215 0xd7 '×' */
+ /* 215 0xd7 'â•«' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3466,7 +3466,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 216 0xd8 'Ø' */
+ /* 216 0xd8 '╪' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3482,7 +3482,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 217 0xd9 'Ù' */
+ /* 217 0xd9 '┘' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3498,7 +3498,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 218 0xda 'Ú' */
+ /* 218 0xda '┌' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3514,7 +3514,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 219 0xdb 'Û' */
+ /* 219 0xdb 'â–ˆ' */
0xfe, /* 1111111 */
0xfe, /* 1111111 */
0xfe, /* 1111111 */
@@ -3530,7 +3530,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xfe, /* 1111111 */
0xfe, /* 1111111 */
- /* 220 0xdc 'Ü' */
+ /* 220 0xdc 'â–„' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3546,7 +3546,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xfe, /* 1111111 */
0xfe, /* 1111111 */
- /* 221 0xdd 'Ý' */
+ /* 221 0xdd '▌' */
0xe0, /* 1110000 */
0xe0, /* 1110000 */
0xe0, /* 1110000 */
@@ -3562,7 +3562,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xe0, /* 1110000 */
0xe0, /* 1110000 */
- /* 222 0xde 'Þ' */
+ /* 222 0xde 'â–' */
0x1e, /* 0001111 */
0x1e, /* 0001111 */
0x1e, /* 0001111 */
@@ -3578,7 +3578,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x1e, /* 0001111 */
0x1e, /* 0001111 */
- /* 223 0xdf 'ß' */
+ /* 223 0xdf 'â–€' */
0xfe, /* 1111111 */
0xfe, /* 1111111 */
0xfe, /* 1111111 */
@@ -3594,7 +3594,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 224 0xe0 'à' */
+ /* 224 0xe0 'α' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3610,7 +3610,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 225 0xe1 'á' */
+ /* 225 0xe1 'ß' */
0x00, /* 0000000 */
0x78, /* 0111100 */
0xcc, /* 1100110 */
@@ -3626,7 +3626,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 226 0xe2 'â' */
+ /* 226 0xe2 'Γ' */
0x00, /* 0000000 */
0xfc, /* 1111110 */
0xcc, /* 1100110 */
@@ -3642,7 +3642,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 227 0xe3 'ã' */
+ /* 227 0xe3 'Ï€' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0xfe, /* 1111111 */
@@ -3658,7 +3658,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 228 0xe4 'ä' */
+ /* 228 0xe4 'Σ' */
0x00, /* 0000000 */
0xfc, /* 1111110 */
0xcc, /* 1100110 */
@@ -3674,7 +3674,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 229 0xe5 'å' */
+ /* 229 0xe5 'σ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3690,7 +3690,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 230 0xe6 'æ' */
+ /* 230 0xe6 'µ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3706,7 +3706,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xc0, /* 1100000 */
0x80, /* 1000000 */
- /* 231 0xe7 'ç' */
+ /* 231 0xe7 'Ï„' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3722,7 +3722,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 232 0xe8 'è' */
+ /* 232 0xe8 'Φ' */
0x00, /* 0000000 */
0xfc, /* 1111110 */
0x30, /* 0011000 */
@@ -3738,7 +3738,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 233 0xe9 'é' */
+ /* 233 0xe9 'Θ' */
0x00, /* 0000000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -3754,7 +3754,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 234 0xea 'ê' */
+ /* 234 0xea 'Ω' */
0x00, /* 0000000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -3770,7 +3770,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 235 0xeb 'ë' */
+ /* 235 0xeb 'δ' */
0x00, /* 0000000 */
0x3c, /* 0011110 */
0x60, /* 0110000 */
@@ -3786,7 +3786,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 236 0xec 'ì' */
+ /* 236 0xec '∞' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3802,7 +3802,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 237 0xed 'í' */
+ /* 237 0xed 'φ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x06, /* 0000011 */
@@ -3818,7 +3818,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 238 0xee 'î' */
+ /* 238 0xee 'ε' */
0x00, /* 0000000 */
0x1c, /* 0001110 */
0x30, /* 0011000 */
@@ -3834,7 +3834,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 239 0xef 'ï' */
+ /* 239 0xef '∩' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x78, /* 0111100 */
@@ -3850,7 +3850,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 240 0xf0 'ð' */
+ /* 240 0xf0 '≡' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3866,7 +3866,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 241 0xf1 'ñ' */
+ /* 241 0xf1 '±' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3882,7 +3882,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 242 0xf2 'ò' */
+ /* 242 0xf2 '≥' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x60, /* 0110000 */
@@ -3898,7 +3898,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 243 0xf3 'ó' */
+ /* 243 0xf3 '≤' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x18, /* 0001100 */
@@ -3914,7 +3914,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 244 0xf4 'ô' */
+ /* 244 0xf4 '⌠' */
0x00, /* 0000000 */
0x1c, /* 0001110 */
0x36, /* 0011011 */
@@ -3930,7 +3930,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 245 0xf5 'õ' */
+ /* 245 0xf5 '⌡' */
0x18, /* 0001100 */
0x18, /* 0001100 */
0x18, /* 0001100 */
@@ -3946,7 +3946,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 246 0xf6 'ö' */
+ /* 246 0xf6 '÷' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3962,7 +3962,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 247 0xf7 '÷' */
+ /* 247 0xf7 '≈' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3978,7 +3978,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 248 0xf8 'ø' */
+ /* 248 0xf8 '°' */
0x38, /* 0011100 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3994,7 +3994,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 249 0xf9 'ù' */
+ /* 249 0xf9 '·' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -4010,7 +4010,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 250 0xfa 'ú' */
+ /* 250 0xfa '•' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -4026,7 +4026,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 251 0xfb 'û' */
+ /* 251 0xfb '√' */
0x1e, /* 0001111 */
0x18, /* 0001100 */
0x18, /* 0001100 */
@@ -4042,7 +4042,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 252 0xfc 'ü' */
+ /* 252 0xfc 'â¿' */
0xd8, /* 1101100 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -4058,7 +4058,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 253 0xfd 'ý' */
+ /* 253 0xfd '²' */
0x78, /* 0111100 */
0xcc, /* 1100110 */
0x18, /* 0001100 */
@@ -4074,7 +4074,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 254 0xfe 'þ' */
+ /* 254 0xfe 'â– ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -4090,7 +4090,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 255 0xff 'ÿ' */
+ /* 255 0xff ' ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c
index 34292cdfaa23..b7ab1f5fbdb8 100644
--- a/lib/fonts/font_8x16.c
+++ b/lib/fonts/font_8x16.c
@@ -2316,7 +2316,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 128 0x80 '€' */
+ /* 128 0x80 'Ç' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3c, /* 00111100 */
@@ -2334,7 +2334,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 129 0x81 '' */
+ /* 129 0x81 'ü' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xcc, /* 11001100 */
@@ -2352,7 +2352,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 130 0x82 '‚' */
+ /* 130 0x82 'é' */
0x00, /* 00000000 */
0x0c, /* 00001100 */
0x18, /* 00011000 */
@@ -2370,7 +2370,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 131 0x83 'ƒ' */
+ /* 131 0x83 'â' */
0x00, /* 00000000 */
0x10, /* 00010000 */
0x38, /* 00111000 */
@@ -2388,7 +2388,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 132 0x84 '„' */
+ /* 132 0x84 'ä' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xcc, /* 11001100 */
@@ -2406,7 +2406,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 133 0x85 '…' */
+ /* 133 0x85 'à' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2424,7 +2424,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 134 0x86 '†' */
+ /* 134 0x86 'Ã¥' */
0x00, /* 00000000 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -2442,7 +2442,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 135 0x87 '‡' */
+ /* 135 0x87 'ç' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2460,7 +2460,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 136 0x88 'ˆ' */
+ /* 136 0x88 'ê' */
0x00, /* 00000000 */
0x10, /* 00010000 */
0x38, /* 00111000 */
@@ -2478,7 +2478,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 137 0x89 '‰' */
+ /* 137 0x89 'ë' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -2496,7 +2496,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 138 0x8a 'Š' */
+ /* 138 0x8a 'è' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2514,7 +2514,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 139 0x8b '‹' */
+ /* 139 0x8b 'ï' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x66, /* 01100110 */
@@ -2532,7 +2532,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 140 0x8c 'Œ' */
+ /* 140 0x8c 'î' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x3c, /* 00111100 */
@@ -2550,7 +2550,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 141 0x8d '' */
+ /* 141 0x8d 'ì' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2568,7 +2568,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 142 0x8e 'Ž' */
+ /* 142 0x8e 'Ä' */
0x00, /* 00000000 */
0xc6, /* 11000110 */
0x00, /* 00000000 */
@@ -2586,7 +2586,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 143 0x8f '' */
+ /* 143 0x8f 'Ã…' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x38, /* 00111000 */
@@ -2604,7 +2604,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 144 0x90 '' */
+ /* 144 0x90 'É' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -2622,7 +2622,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 145 0x91 '‘' */
+ /* 145 0x91 'æ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2640,7 +2640,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 146 0x92 '’' */
+ /* 146 0x92 'Æ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3e, /* 00111110 */
@@ -2658,7 +2658,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 147 0x93 '“' */
+ /* 147 0x93 'ô' */
0x00, /* 00000000 */
0x10, /* 00010000 */
0x38, /* 00111000 */
@@ -2676,7 +2676,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 148 0x94 '”' */
+ /* 148 0x94 'ö' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -2694,7 +2694,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 149 0x95 '•' */
+ /* 149 0x95 'ò' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2712,7 +2712,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 150 0x96 '–' */
+ /* 150 0x96 'û' */
0x00, /* 00000000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -2730,7 +2730,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 151 0x97 '—' */
+ /* 151 0x97 'ù' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2748,7 +2748,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 152 0x98 '˜' */
+ /* 152 0x98 'ÿ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -2766,7 +2766,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x78, /* 01111000 */
0x00, /* 00000000 */
- /* 153 0x99 '™' */
+ /* 153 0x99 'Ö' */
0x00, /* 00000000 */
0xc6, /* 11000110 */
0x00, /* 00000000 */
@@ -2784,7 +2784,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 154 0x9a 'š' */
+ /* 154 0x9a 'Ü' */
0x00, /* 00000000 */
0xc6, /* 11000110 */
0x00, /* 00000000 */
@@ -2802,7 +2802,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 155 0x9b '›' */
+ /* 155 0x9b '¢' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2820,7 +2820,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 156 0x9c 'œ' */
+ /* 156 0x9c '£' */
0x00, /* 00000000 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -2838,7 +2838,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 157 0x9d '' */
+ /* 157 0x9d 'Â¥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x66, /* 01100110 */
@@ -2856,7 +2856,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 158 0x9e 'ž' */
+ /* 158 0x9e 'â‚§' */
0x00, /* 00000000 */
0xf8, /* 11111000 */
0xcc, /* 11001100 */
@@ -2874,7 +2874,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 159 0x9f 'Ÿ' */
+ /* 159 0x9f 'Æ’' */
0x00, /* 00000000 */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
@@ -2892,7 +2892,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 160 0xa0 ' ' */
+ /* 160 0xa0 'á' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2910,7 +2910,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 161 0xa1 '¡' */
+ /* 161 0xa1 'í' */
0x00, /* 00000000 */
0x0c, /* 00001100 */
0x18, /* 00011000 */
@@ -2928,7 +2928,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 162 0xa2 '¢' */
+ /* 162 0xa2 'ó' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2946,7 +2946,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 163 0xa3 '£' */
+ /* 163 0xa3 'ú' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2964,7 +2964,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 164 0xa4 '¤' */
+ /* 164 0xa4 'ñ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x76, /* 01110110 */
@@ -2982,7 +2982,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 165 0xa5 '¥' */
+ /* 165 0xa5 'Ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -3000,7 +3000,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 166 0xa6 '¦' */
+ /* 166 0xa6 'ª' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3c, /* 00111100 */
@@ -3018,7 +3018,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 167 0xa7 '§' */
+ /* 167 0xa7 'º' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -3036,7 +3036,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 168 0xa8 '¨' */
+ /* 168 0xa8 '¿' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x30, /* 00110000 */
@@ -3054,7 +3054,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 169 0xa9 '©' */
+ /* 169 0xa9 'âŒ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3072,7 +3072,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 170 0xaa 'ª' */
+ /* 170 0xaa '¬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3090,7 +3090,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 171 0xab '«' */
+ /* 171 0xab '½' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0xe0, /* 11100000 */
@@ -3108,7 +3108,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 172 0xac '¬' */
+ /* 172 0xac '¼' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0xe0, /* 11100000 */
@@ -3126,7 +3126,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 173 0xad '­' */
+ /* 173 0xad '¡' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -3144,7 +3144,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 174 0xae '®' */
+ /* 174 0xae '«' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3162,7 +3162,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 175 0xaf '¯' */
+ /* 175 0xaf '»' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3180,7 +3180,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 176 0xb0 '°' */
+ /* 176 0xb0 'â–‘' */
0x11, /* 00010001 */
0x44, /* 01000100 */
0x11, /* 00010001 */
@@ -3198,7 +3198,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x11, /* 00010001 */
0x44, /* 01000100 */
- /* 177 0xb1 '±' */
+ /* 177 0xb1 'â–’' */
0x55, /* 01010101 */
0xaa, /* 10101010 */
0x55, /* 01010101 */
@@ -3216,7 +3216,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x55, /* 01010101 */
0xaa, /* 10101010 */
- /* 178 0xb2 '²' */
+ /* 178 0xb2 'â–“' */
0xdd, /* 11011101 */
0x77, /* 01110111 */
0xdd, /* 11011101 */
@@ -3234,7 +3234,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xdd, /* 11011101 */
0x77, /* 01110111 */
- /* 179 0xb3 '³' */
+ /* 179 0xb3 '│' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3252,7 +3252,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 180 0xb4 '´' */
+ /* 180 0xb4 '┤' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3270,7 +3270,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 181 0xb5 'µ' */
+ /* 181 0xb5 'â•¡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3288,7 +3288,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 182 0xb6 '¶' */
+ /* 182 0xb6 'â•¢' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3306,7 +3306,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 183 0xb7 '·' */
+ /* 183 0xb7 'â•–' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3324,7 +3324,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 184 0xb8 '¸' */
+ /* 184 0xb8 'â••' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3342,7 +3342,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 185 0xb9 '¹' */
+ /* 185 0xb9 'â•£' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3360,7 +3360,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 186 0xba 'º' */
+ /* 186 0xba 'â•‘' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3378,7 +3378,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 187 0xbb '»' */
+ /* 187 0xbb 'â•—' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3396,7 +3396,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 188 0xbc '¼' */
+ /* 188 0xbc 'â•' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3414,7 +3414,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 189 0xbd '½' */
+ /* 189 0xbd '╜' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3432,7 +3432,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 190 0xbe '¾' */
+ /* 190 0xbe 'â•›' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3450,7 +3450,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 191 0xbf '¿' */
+ /* 191 0xbf 'â”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3468,7 +3468,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 192 0xc0 'À' */
+ /* 192 0xc0 'â””' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3486,7 +3486,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 193 0xc1 'Á' */
+ /* 193 0xc1 'â”´' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3504,7 +3504,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 194 0xc2 'Â' */
+ /* 194 0xc2 '┬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3522,7 +3522,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 195 0xc3 'Ã' */
+ /* 195 0xc3 '├' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3540,7 +3540,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 196 0xc4 'Ä' */
+ /* 196 0xc4 '─' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3558,7 +3558,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 197 0xc5 'Å' */
+ /* 197 0xc5 '┼' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3576,7 +3576,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 198 0xc6 'Æ' */
+ /* 198 0xc6 '╞' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3594,7 +3594,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 199 0xc7 'Ç' */
+ /* 199 0xc7 '╟' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3612,7 +3612,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 200 0xc8 'È' */
+ /* 200 0xc8 '╚' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3630,7 +3630,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 201 0xc9 'É' */
+ /* 201 0xc9 'â•”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3648,7 +3648,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 202 0xca 'Ê' */
+ /* 202 0xca 'â•©' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3666,7 +3666,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 203 0xcb 'Ë' */
+ /* 203 0xcb '╦' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3684,7 +3684,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 204 0xcc 'Ì' */
+ /* 204 0xcc 'â• ' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3702,7 +3702,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 205 0xcd 'Í' */
+ /* 205 0xcd 'â•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3720,7 +3720,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 206 0xce 'Î' */
+ /* 206 0xce '╬' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3738,7 +3738,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 207 0xcf 'Ï' */
+ /* 207 0xcf 'â•§' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3756,7 +3756,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 208 0xd0 'Ð' */
+ /* 208 0xd0 '╨' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3774,7 +3774,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 209 0xd1 'Ñ' */
+ /* 209 0xd1 '╤' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3792,7 +3792,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 210 0xd2 'Ò' */
+ /* 210 0xd2 'â•¥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3810,7 +3810,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 211 0xd3 'Ó' */
+ /* 211 0xd3 'â•™' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3828,7 +3828,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 212 0xd4 'Ô' */
+ /* 212 0xd4 '╘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3846,7 +3846,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 213 0xd5 'Õ' */
+ /* 213 0xd5 'â•’' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3864,7 +3864,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 214 0xd6 'Ö' */
+ /* 214 0xd6 'â•“' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3882,7 +3882,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 215 0xd7 '×' */
+ /* 215 0xd7 'â•«' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3900,7 +3900,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 216 0xd8 'Ø' */
+ /* 216 0xd8 '╪' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3918,7 +3918,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 217 0xd9 'Ù' */
+ /* 217 0xd9 '┘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3936,7 +3936,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 218 0xda 'Ú' */
+ /* 218 0xda '┌' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3954,7 +3954,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 219 0xdb 'Û' */
+ /* 219 0xdb 'â–ˆ' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -3972,7 +3972,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 220 0xdc 'Ü' */
+ /* 220 0xdc 'â–„' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3990,7 +3990,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 221 0xdd 'Ý' */
+ /* 221 0xdd '▌' */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
@@ -4008,7 +4008,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xf0, /* 11110000 */
0xf0, /* 11110000 */
- /* 222 0xde 'Þ' */
+ /* 222 0xde 'â–' */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
@@ -4026,7 +4026,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x0f, /* 00001111 */
0x0f, /* 00001111 */
- /* 223 0xdf 'ß' */
+ /* 223 0xdf 'â–€' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -4044,7 +4044,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 224 0xe0 'à' */
+ /* 224 0xe0 'α' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4062,7 +4062,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 225 0xe1 'á' */
+ /* 225 0xe1 'ß' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x78, /* 01111000 */
@@ -4080,7 +4080,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 226 0xe2 'â' */
+ /* 226 0xe2 'Γ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -4098,7 +4098,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 227 0xe3 'ã' */
+ /* 227 0xe3 'Ï€' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4116,7 +4116,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 228 0xe4 'ä' */
+ /* 228 0xe4 'Σ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -4134,7 +4134,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 229 0xe5 'å' */
+ /* 229 0xe5 'σ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4152,7 +4152,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 230 0xe6 'æ' */
+ /* 230 0xe6 'µ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4170,7 +4170,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xc0, /* 11000000 */
0x00, /* 00000000 */
- /* 231 0xe7 'ç' */
+ /* 231 0xe7 'Ï„' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4188,7 +4188,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 232 0xe8 'è' */
+ /* 232 0xe8 'Φ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -4206,7 +4206,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 233 0xe9 'é' */
+ /* 233 0xe9 'Θ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -4224,7 +4224,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 234 0xea 'ê' */
+ /* 234 0xea 'Ω' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -4242,7 +4242,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 235 0xeb 'ë' */
+ /* 235 0xeb 'δ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x1e, /* 00011110 */
@@ -4260,7 +4260,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 236 0xec 'ì' */
+ /* 236 0xec '∞' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4278,7 +4278,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 237 0xed 'í' */
+ /* 237 0xed 'φ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4296,7 +4296,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 238 0xee 'î' */
+ /* 238 0xee 'ε' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x1c, /* 00011100 */
@@ -4314,7 +4314,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 239 0xef 'ï' */
+ /* 239 0xef '∩' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4332,7 +4332,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 240 0xf0 'ð' */
+ /* 240 0xf0 '≡' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4350,7 +4350,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 241 0xf1 'ñ' */
+ /* 241 0xf1 '±' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4368,7 +4368,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 242 0xf2 'ò' */
+ /* 242 0xf2 '≥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4386,7 +4386,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 243 0xf3 'ó' */
+ /* 243 0xf3 '≤' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4404,7 +4404,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 244 0xf4 'ô' */
+ /* 244 0xf4 '⌠' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x0e, /* 00001110 */
@@ -4422,7 +4422,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 245 0xf5 'õ' */
+ /* 245 0xf5 '⌡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -4440,7 +4440,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 246 0xf6 'ö' */
+ /* 246 0xf6 '÷' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4458,7 +4458,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 247 0xf7 '÷' */
+ /* 247 0xf7 '≈' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4476,7 +4476,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 248 0xf8 'ø' */
+ /* 248 0xf8 '°' */
0x00, /* 00000000 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -4494,7 +4494,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 249 0xf9 'ù' */
+ /* 249 0xf9 '·' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4512,7 +4512,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 250 0xfa 'ú' */
+ /* 250 0xfa '•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4530,7 +4530,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 251 0xfb 'û' */
+ /* 251 0xfb '√' */
0x00, /* 00000000 */
0x0f, /* 00001111 */
0x0c, /* 00001100 */
@@ -4548,7 +4548,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 252 0xfc 'ü' */
+ /* 252 0xfc 'â¿' */
0x00, /* 00000000 */
0x6c, /* 01101100 */
0x36, /* 00110110 */
@@ -4566,7 +4566,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 253 0xfd 'ý' */
+ /* 253 0xfd '²' */
0x00, /* 00000000 */
0x3c, /* 00111100 */
0x66, /* 01100110 */
@@ -4584,7 +4584,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 254 0xfe 'þ' */
+ /* 254 0xfe 'â– ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4602,7 +4602,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 255 0xff 'ÿ' */
+ /* 255 0xff ' ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c
index 751becf3c521..2328ebc8bab5 100644
--- a/lib/fonts/font_8x8.c
+++ b/lib/fonts/font_8x8.c
@@ -1291,7 +1291,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 128 0x80 '€' */
+ /* 128 0x80 'Ç' */
0x7c, /* 01111100 */
0xc6, /* 11000110 */
0xc0, /* 11000000 */
@@ -1301,7 +1301,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x0c, /* 00001100 */
0x78, /* 01111000 */
- /* 129 0x81 '' */
+ /* 129 0x81 'ü' */
0xcc, /* 11001100 */
0x00, /* 00000000 */
0xcc, /* 11001100 */
@@ -1311,7 +1311,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 130 0x82 '‚' */
+ /* 130 0x82 'é' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1321,7 +1321,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 131 0x83 'ƒ' */
+ /* 131 0x83 'â' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x78, /* 01111000 */
@@ -1331,7 +1331,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 132 0x84 '„' */
+ /* 132 0x84 'ä' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x78, /* 01111000 */
@@ -1341,7 +1341,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 133 0x85 '…' */
+ /* 133 0x85 'à' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x78, /* 01111000 */
@@ -1351,7 +1351,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 134 0x86 '†' */
+ /* 134 0x86 'Ã¥' */
0x30, /* 00110000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -1361,7 +1361,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 135 0x87 '‡' */
+ /* 135 0x87 'ç' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -1371,7 +1371,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x0c, /* 00001100 */
0x38, /* 00111000 */
- /* 136 0x88 'ˆ' */
+ /* 136 0x88 'ê' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x7c, /* 01111100 */
@@ -1381,7 +1381,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 137 0x89 '‰' */
+ /* 137 0x89 'ë' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x7c, /* 01111100 */
@@ -1391,7 +1391,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 138 0x8a 'Š' */
+ /* 138 0x8a 'è' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1401,7 +1401,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 139 0x8b '‹' */
+ /* 139 0x8b 'ï' */
0x66, /* 01100110 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -1411,7 +1411,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 140 0x8c 'Œ' */
+ /* 140 0x8c 'î' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x38, /* 00111000 */
@@ -1421,7 +1421,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 141 0x8d '' */
+ /* 141 0x8d 'ì' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -1431,7 +1431,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 142 0x8e 'Ž' */
+ /* 142 0x8e 'Ä' */
0xc6, /* 11000110 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -1441,7 +1441,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 143 0x8f '' */
+ /* 143 0x8f 'Ã…' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x7c, /* 01111100 */
@@ -1451,7 +1451,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 144 0x90 '' */
+ /* 144 0x90 'É' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0xfe, /* 11111110 */
@@ -1461,7 +1461,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 145 0x91 '‘' */
+ /* 145 0x91 'æ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -1471,7 +1471,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 146 0x92 '’' */
+ /* 146 0x92 'Æ' */
0x3e, /* 00111110 */
0x6c, /* 01101100 */
0xcc, /* 11001100 */
@@ -1481,7 +1481,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xce, /* 11001110 */
0x00, /* 00000000 */
- /* 147 0x93 '“' */
+ /* 147 0x93 'ô' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x7c, /* 01111100 */
@@ -1491,7 +1491,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 148 0x94 '”' */
+ /* 148 0x94 'ö' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x7c, /* 01111100 */
@@ -1501,7 +1501,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 149 0x95 '•' */
+ /* 149 0x95 'ò' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1511,7 +1511,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 150 0x96 '–' */
+ /* 150 0x96 'û' */
0x78, /* 01111000 */
0x84, /* 10000100 */
0x00, /* 00000000 */
@@ -1521,7 +1521,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 151 0x97 '—' */
+ /* 151 0x97 'ù' */
0x60, /* 01100000 */
0x30, /* 00110000 */
0xcc, /* 11001100 */
@@ -1531,7 +1531,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 152 0x98 '˜' */
+ /* 152 0x98 'ÿ' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -1541,7 +1541,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x06, /* 00000110 */
0xfc, /* 11111100 */
- /* 153 0x99 '™' */
+ /* 153 0x99 'Ö' */
0xc6, /* 11000110 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -1551,7 +1551,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x38, /* 00111000 */
0x00, /* 00000000 */
- /* 154 0x9a 'š' */
+ /* 154 0x9a 'Ü' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -1561,7 +1561,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 155 0x9b '›' */
+ /* 155 0x9b '¢' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x7e, /* 01111110 */
@@ -1571,7 +1571,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 156 0x9c 'œ' */
+ /* 156 0x9c '£' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x64, /* 01100100 */
@@ -1581,7 +1581,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xfc, /* 11111100 */
0x00, /* 00000000 */
- /* 157 0x9d '' */
+ /* 157 0x9d 'Â¥' */
0x66, /* 01100110 */
0x66, /* 01100110 */
0x3c, /* 00111100 */
@@ -1591,7 +1591,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 158 0x9e 'ž' */
+ /* 158 0x9e 'â‚§' */
0xf8, /* 11111000 */
0xcc, /* 11001100 */
0xcc, /* 11001100 */
@@ -1601,7 +1601,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0xc7, /* 11000111 */
- /* 159 0x9f 'Ÿ' */
+ /* 159 0x9f 'Æ’' */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
0x18, /* 00011000 */
@@ -1611,7 +1611,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x70, /* 01110000 */
0x00, /* 00000000 */
- /* 160 0xa0 ' ' */
+ /* 160 0xa0 'á' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -1621,7 +1621,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 161 0xa1 '¡' */
+ /* 161 0xa1 'í' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -1631,7 +1631,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 162 0xa2 '¢' */
+ /* 162 0xa2 'ó' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1641,7 +1641,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 163 0xa3 '£' */
+ /* 163 0xa3 'ú' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0xcc, /* 11001100 */
@@ -1651,7 +1651,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 164 0xa4 '¤' */
+ /* 164 0xa4 'ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -1661,7 +1661,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x66, /* 01100110 */
0x00, /* 00000000 */
- /* 165 0xa5 '¥' */
+ /* 165 0xa5 'Ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -1671,7 +1671,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xce, /* 11001110 */
0x00, /* 00000000 */
- /* 166 0xa6 '¦' */
+ /* 166 0xa6 'ª' */
0x3c, /* 00111100 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -1681,7 +1681,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 167 0xa7 '§' */
+ /* 167 0xa7 'º' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -1691,7 +1691,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 168 0xa8 '¨' */
+ /* 168 0xa8 '¿' */
0x18, /* 00011000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -1701,7 +1701,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3e, /* 00111110 */
0x00, /* 00000000 */
- /* 169 0xa9 '©' */
+ /* 169 0xa9 'âŒ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1711,7 +1711,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 170 0xaa 'ª' */
+ /* 170 0xaa '¬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1721,7 +1721,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 171 0xab '«' */
+ /* 171 0xab '½' */
0x63, /* 01100011 */
0xe6, /* 11100110 */
0x6c, /* 01101100 */
@@ -1731,7 +1731,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xcc, /* 11001100 */
0x0f, /* 00001111 */
- /* 172 0xac '¬' */
+ /* 172 0xac '¼' */
0x63, /* 01100011 */
0xe6, /* 11100110 */
0x6c, /* 01101100 */
@@ -1741,7 +1741,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xdf, /* 11011111 */
0x06, /* 00000110 */
- /* 173 0xad '­' */
+ /* 173 0xad '¡' */
0x18, /* 00011000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -1751,7 +1751,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x00, /* 00000000 */
- /* 174 0xae '®' */
+ /* 174 0xae '«' */
0x00, /* 00000000 */
0x33, /* 00110011 */
0x66, /* 01100110 */
@@ -1761,7 +1761,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 175 0xaf '¯' */
+ /* 175 0xaf '»' */
0x00, /* 00000000 */
0xcc, /* 11001100 */
0x66, /* 01100110 */
@@ -1771,7 +1771,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 176 0xb0 '°' */
+ /* 176 0xb0 'â–‘' */
0x22, /* 00100010 */
0x88, /* 10001000 */
0x22, /* 00100010 */
@@ -1781,7 +1781,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x22, /* 00100010 */
0x88, /* 10001000 */
- /* 177 0xb1 '±' */
+ /* 177 0xb1 'â–’' */
0x55, /* 01010101 */
0xaa, /* 10101010 */
0x55, /* 01010101 */
@@ -1791,7 +1791,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x55, /* 01010101 */
0xaa, /* 10101010 */
- /* 178 0xb2 '²' */
+ /* 178 0xb2 'â–“' */
0x77, /* 01110111 */
0xdd, /* 11011101 */
0x77, /* 01110111 */
@@ -1801,7 +1801,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x77, /* 01110111 */
0xdd, /* 11011101 */
- /* 179 0xb3 '³' */
+ /* 179 0xb3 '│' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1811,7 +1811,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 180 0xb4 '´' */
+ /* 180 0xb4 '┤' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1821,7 +1821,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 181 0xb5 'µ' */
+ /* 181 0xb5 'â•¡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xf8, /* 11111000 */
@@ -1831,7 +1831,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 182 0xb6 '¶' */
+ /* 182 0xb6 'â•¢' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1841,7 +1841,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 183 0xb7 '·' */
+ /* 183 0xb7 'â•–' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1851,7 +1851,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 184 0xb8 '¸' */
+ /* 184 0xb8 'â••' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xf8, /* 11111000 */
@@ -1861,7 +1861,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 185 0xb9 '¹' */
+ /* 185 0xb9 'â•£' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf6, /* 11110110 */
@@ -1871,7 +1871,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 186 0xba 'º' */
+ /* 186 0xba 'â•‘' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1881,7 +1881,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 187 0xbb '»' */
+ /* 187 0xbb 'â•—' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -1891,7 +1891,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 188 0xbc '¼' */
+ /* 188 0xbc 'â•' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf6, /* 11110110 */
@@ -1901,7 +1901,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 189 0xbd '½' */
+ /* 189 0xbd '╜' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1911,7 +1911,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 190 0xbe '¾' */
+ /* 190 0xbe 'â•›' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xf8, /* 11111000 */
@@ -1921,7 +1921,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 191 0xbf '¿' */
+ /* 191 0xbf 'â”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1931,7 +1931,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 192 0xc0 'À' */
+ /* 192 0xc0 'â””' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1941,7 +1941,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 193 0xc1 'Á' */
+ /* 193 0xc1 'â”´' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1951,7 +1951,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 194 0xc2 'Â' */
+ /* 194 0xc2 '┬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1961,7 +1961,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 195 0xc3 'Ã' */
+ /* 195 0xc3 '├' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1971,7 +1971,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 196 0xc4 'Ä' */
+ /* 196 0xc4 '─' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1981,7 +1981,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 197 0xc5 'Å' */
+ /* 197 0xc5 '┼' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1991,7 +1991,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 198 0xc6 'Æ' */
+ /* 198 0xc6 '╞' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x1f, /* 00011111 */
@@ -2001,7 +2001,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 199 0xc7 'Ç' */
+ /* 199 0xc7 '╟' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2011,7 +2011,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 200 0xc8 'È' */
+ /* 200 0xc8 '╚' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x37, /* 00110111 */
@@ -2021,7 +2021,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 201 0xc9 'É' */
+ /* 201 0xc9 'â•”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3f, /* 00111111 */
@@ -2031,7 +2031,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 202 0xca 'Ê' */
+ /* 202 0xca 'â•©' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf7, /* 11110111 */
@@ -2041,7 +2041,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 203 0xcb 'Ë' */
+ /* 203 0xcb '╦' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2051,7 +2051,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 204 0xcc 'Ì' */
+ /* 204 0xcc 'â• ' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x37, /* 00110111 */
@@ -2061,7 +2061,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 205 0xcd 'Í' */
+ /* 205 0xcd 'â•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2071,7 +2071,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 206 0xce 'Î' */
+ /* 206 0xce '╬' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf7, /* 11110111 */
@@ -2081,7 +2081,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 207 0xcf 'Ï' */
+ /* 207 0xcf 'â•§' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xff, /* 11111111 */
@@ -2091,7 +2091,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 208 0xd0 'Ð' */
+ /* 208 0xd0 '╨' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2101,7 +2101,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 209 0xd1 'Ñ' */
+ /* 209 0xd1 '╤' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2111,7 +2111,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 210 0xd2 'Ò' */
+ /* 210 0xd2 'â•¥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2121,7 +2121,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 211 0xd3 'Ó' */
+ /* 211 0xd3 'â•™' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2131,7 +2131,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 212 0xd4 'Ô' */
+ /* 212 0xd4 '╘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x1f, /* 00011111 */
@@ -2141,7 +2141,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 213 0xd5 'Õ' */
+ /* 213 0xd5 'â•’' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x1f, /* 00011111 */
@@ -2151,7 +2151,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 214 0xd6 'Ö' */
+ /* 214 0xd6 'â•“' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2161,7 +2161,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 215 0xd7 '×' */
+ /* 215 0xd7 'â•«' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2171,7 +2171,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 216 0xd8 'Ø' */
+ /* 216 0xd8 '╪' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xff, /* 11111111 */
@@ -2181,7 +2181,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 217 0xd9 'Ù' */
+ /* 217 0xd9 '┘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2191,7 +2191,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 218 0xda 'Ú' */
+ /* 218 0xda '┌' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2201,7 +2201,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 219 0xdb 'Û' */
+ /* 219 0xdb 'â–ˆ' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -2211,7 +2211,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 220 0xdc 'Ü' */
+ /* 220 0xdc 'â–„' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2221,7 +2221,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 221 0xdd 'Ý' */
+ /* 221 0xdd '▌' */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
@@ -2231,7 +2231,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xf0, /* 11110000 */
0xf0, /* 11110000 */
- /* 222 0xde 'Þ' */
+ /* 222 0xde 'â–' */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
@@ -2241,7 +2241,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x0f, /* 00001111 */
0x0f, /* 00001111 */
- /* 223 0xdf 'ß' */
+ /* 223 0xdf 'â–€' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -2251,7 +2251,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 224 0xe0 'à' */
+ /* 224 0xe0 'α' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x76, /* 01110110 */
@@ -2261,7 +2261,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 225 0xe1 'á' */
+ /* 225 0xe1 'ß' */
0x78, /* 01111000 */
0xcc, /* 11001100 */
0xcc, /* 11001100 */
@@ -2271,7 +2271,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xcc, /* 11001100 */
0x00, /* 00000000 */
- /* 226 0xe2 'â' */
+ /* 226 0xe2 'Γ' */
0xfe, /* 11111110 */
0xc6, /* 11000110 */
0xc0, /* 11000000 */
@@ -2281,7 +2281,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc0, /* 11000000 */
0x00, /* 00000000 */
- /* 227 0xe3 'ã' */
+ /* 227 0xe3 'Ï€' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -2291,7 +2291,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x6c, /* 01101100 */
0x00, /* 00000000 */
- /* 228 0xe4 'ä' */
+ /* 228 0xe4 'Σ' */
0xfe, /* 11111110 */
0xc6, /* 11000110 */
0x60, /* 01100000 */
@@ -2301,7 +2301,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 229 0xe5 'å' */
+ /* 229 0xe5 'σ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -2311,7 +2311,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x70, /* 01110000 */
0x00, /* 00000000 */
- /* 230 0xe6 'æ' */
+ /* 230 0xe6 'µ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x66, /* 01100110 */
@@ -2321,7 +2321,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0xc0, /* 11000000 */
- /* 231 0xe7 'ç' */
+ /* 231 0xe7 'Ï„' */
0x00, /* 00000000 */
0x76, /* 01110110 */
0xdc, /* 11011100 */
@@ -2331,7 +2331,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x00, /* 00000000 */
- /* 232 0xe8 'è' */
+ /* 232 0xe8 'Φ' */
0x7e, /* 01111110 */
0x18, /* 00011000 */
0x3c, /* 00111100 */
@@ -2341,7 +2341,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x7e, /* 01111110 */
- /* 233 0xe9 'é' */
+ /* 233 0xe9 'Θ' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0xc6, /* 11000110 */
@@ -2351,7 +2351,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x38, /* 00111000 */
0x00, /* 00000000 */
- /* 234 0xea 'ê' */
+ /* 234 0xea 'Ω' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0xc6, /* 11000110 */
@@ -2361,7 +2361,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xee, /* 11101110 */
0x00, /* 00000000 */
- /* 235 0xeb 'ë' */
+ /* 235 0xeb 'δ' */
0x0e, /* 00001110 */
0x18, /* 00011000 */
0x0c, /* 00001100 */
@@ -2371,7 +2371,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 236 0xec 'ì' */
+ /* 236 0xec '∞' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -2381,7 +2381,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 237 0xed 'í' */
+ /* 237 0xed 'φ' */
0x06, /* 00000110 */
0x0c, /* 00001100 */
0x7e, /* 01111110 */
@@ -2391,7 +2391,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x60, /* 01100000 */
0xc0, /* 11000000 */
- /* 238 0xee 'î' */
+ /* 238 0xee 'ε' */
0x1e, /* 00011110 */
0x30, /* 00110000 */
0x60, /* 01100000 */
@@ -2401,7 +2401,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x1e, /* 00011110 */
0x00, /* 00000000 */
- /* 239 0xef 'ï' */
+ /* 239 0xef '∩' */
0x00, /* 00000000 */
0x7c, /* 01111100 */
0xc6, /* 11000110 */
@@ -2411,7 +2411,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 240 0xf0 'ð' */
+ /* 240 0xf0 '≡' */
0x00, /* 00000000 */
0xfe, /* 11111110 */
0x00, /* 00000000 */
@@ -2421,7 +2421,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 241 0xf1 'ñ' */
+ /* 241 0xf1 '±' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x7e, /* 01111110 */
@@ -2431,7 +2431,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 242 0xf2 'ò' */
+ /* 242 0xf2 '≥' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x0c, /* 00001100 */
@@ -2441,7 +2441,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 243 0xf3 'ó' */
+ /* 243 0xf3 '≤' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2451,7 +2451,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 244 0xf4 'ô' */
+ /* 244 0xf4 '⌠' */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
0x1b, /* 00011011 */
@@ -2461,7 +2461,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 245 0xf5 'õ' */
+ /* 245 0xf5 '⌡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2471,7 +2471,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xd8, /* 11011000 */
0x70, /* 01110000 */
- /* 246 0xf6 'ö' */
+ /* 246 0xf6 '÷' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -2481,7 +2481,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 247 0xf7 '÷' */
+ /* 247 0xf7 '≈' */
0x00, /* 00000000 */
0x76, /* 01110110 */
0xdc, /* 11011100 */
@@ -2491,7 +2491,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 248 0xf8 'ø' */
+ /* 248 0xf8 '°' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -2501,7 +2501,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 249 0xf9 'ù' */
+ /* 249 0xf9 '·' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2511,7 +2511,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 250 0xfa 'ú' */
+ /* 250 0xfa '•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2521,7 +2521,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 251 0xfb 'û' */
+ /* 251 0xfb '√' */
0x0f, /* 00001111 */
0x0c, /* 00001100 */
0x0c, /* 00001100 */
@@ -2531,7 +2531,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x1c, /* 00011100 */
- /* 252 0xfc 'ü' */
+ /* 252 0xfc 'â¿' */
0x6c, /* 01101100 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2541,7 +2541,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 253 0xfd 'ý' */
+ /* 253 0xfd '²' */
0x78, /* 01111000 */
0x0c, /* 00001100 */
0x18, /* 00011000 */
@@ -2551,7 +2551,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 254 0xfe 'þ' */
+ /* 254 0xfe 'â– ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3c, /* 00111100 */
@@ -2561,7 +2561,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 255 0xff 'ÿ' */
+ /* 255 0xff ' ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c
index b0514c0a7445..b15d3c342c5b 100644
--- a/lib/fonts/font_pearl_8x8.c
+++ b/lib/fonts/font_pearl_8x8.c
@@ -1296,7 +1296,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 128 0x80 '€' */
+ /* 128 0x80 'Ç' */
0x7c, /* 01111100 */
0xc6, /* 11000110 */
0xc0, /* 11000000 */
@@ -1306,7 +1306,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x0c, /* 00001100 */
0x78, /* 01111000 */
- /* 129 0x81 '' */
+ /* 129 0x81 'ü' */
0xcc, /* 11001100 */
0x00, /* 00000000 */
0xcc, /* 11001100 */
@@ -1316,7 +1316,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 130 0x82 '‚' */
+ /* 130 0x82 'é' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1326,7 +1326,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 131 0x83 'ƒ' */
+ /* 131 0x83 'â' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x78, /* 01111000 */
@@ -1336,7 +1336,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 132 0x84 '„' */
+ /* 132 0x84 'ä' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x78, /* 01111000 */
@@ -1346,7 +1346,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 133 0x85 '…' */
+ /* 133 0x85 'à' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x78, /* 01111000 */
@@ -1356,7 +1356,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 134 0x86 '†' */
+ /* 134 0x86 'Ã¥' */
0x30, /* 00110000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -1366,7 +1366,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 135 0x87 '‡' */
+ /* 135 0x87 'ç' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -1376,7 +1376,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x0c, /* 00001100 */
0x38, /* 00111000 */
- /* 136 0x88 'ˆ' */
+ /* 136 0x88 'ê' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x7c, /* 01111100 */
@@ -1386,7 +1386,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 137 0x89 '‰' */
+ /* 137 0x89 'ë' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x7c, /* 01111100 */
@@ -1396,7 +1396,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 138 0x8a 'Š' */
+ /* 138 0x8a 'è' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1406,7 +1406,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 139 0x8b '‹' */
+ /* 139 0x8b 'ï' */
0x66, /* 01100110 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -1416,7 +1416,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 140 0x8c 'Œ' */
+ /* 140 0x8c 'î' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x38, /* 00111000 */
@@ -1426,7 +1426,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 141 0x8d '' */
+ /* 141 0x8d 'ì' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -1436,7 +1436,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 142 0x8e 'Ž' */
+ /* 142 0x8e 'Ä' */
0xc6, /* 11000110 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -1446,7 +1446,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 143 0x8f '' */
+ /* 143 0x8f 'Ã…' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x7c, /* 01111100 */
@@ -1456,7 +1456,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 144 0x90 '' */
+ /* 144 0x90 'É' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0xfe, /* 11111110 */
@@ -1466,7 +1466,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 145 0x91 '‘' */
+ /* 145 0x91 'æ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -1476,7 +1476,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 146 0x92 '’' */
+ /* 146 0x92 'Æ' */
0x3e, /* 00111110 */
0x6c, /* 01101100 */
0xcc, /* 11001100 */
@@ -1486,7 +1486,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xce, /* 11001110 */
0x00, /* 00000000 */
- /* 147 0x93 '“' */
+ /* 147 0x93 'ô' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x7c, /* 01111100 */
@@ -1496,7 +1496,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 148 0x94 '”' */
+ /* 148 0x94 'ö' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x7c, /* 01111100 */
@@ -1506,7 +1506,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 149 0x95 '•' */
+ /* 149 0x95 'ò' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1516,7 +1516,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 150 0x96 '–' */
+ /* 150 0x96 'û' */
0x78, /* 01111000 */
0x84, /* 10000100 */
0x00, /* 00000000 */
@@ -1526,7 +1526,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 151 0x97 '—' */
+ /* 151 0x97 'ù' */
0x60, /* 01100000 */
0x30, /* 00110000 */
0xcc, /* 11001100 */
@@ -1536,7 +1536,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 152 0x98 '˜' */
+ /* 152 0x98 'ÿ' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -1546,7 +1546,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x06, /* 00000110 */
0xfc, /* 11111100 */
- /* 153 0x99 '™' */
+ /* 153 0x99 'Ö' */
0xc6, /* 11000110 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -1556,7 +1556,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x38, /* 00111000 */
0x00, /* 00000000 */
- /* 154 0x9a 'š' */
+ /* 154 0x9a 'Ü' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -1566,7 +1566,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 155 0x9b '›' */
+ /* 155 0x9b '¢' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x7e, /* 01111110 */
@@ -1576,7 +1576,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 156 0x9c 'œ' */
+ /* 156 0x9c '£' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x64, /* 01100100 */
@@ -1586,7 +1586,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xfc, /* 11111100 */
0x00, /* 00000000 */
- /* 157 0x9d '' */
+ /* 157 0x9d 'Â¥' */
0x66, /* 01100110 */
0x66, /* 01100110 */
0x3c, /* 00111100 */
@@ -1596,7 +1596,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 158 0x9e 'ž' */
+ /* 158 0x9e 'â‚§' */
0xf8, /* 11111000 */
0xcc, /* 11001100 */
0xcc, /* 11001100 */
@@ -1606,7 +1606,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0xc7, /* 11000111 */
- /* 159 0x9f 'Ÿ' */
+ /* 159 0x9f 'Æ’' */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
0x18, /* 00011000 */
@@ -1616,7 +1616,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x70, /* 01110000 */
0x00, /* 00000000 */
- /* 160 0xa0 ' ' */
+ /* 160 0xa0 'á' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -1626,7 +1626,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 161 0xa1 '¡' */
+ /* 161 0xa1 'í' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -1636,7 +1636,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 162 0xa2 '¢' */
+ /* 162 0xa2 'ó' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1646,7 +1646,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 163 0xa3 '£' */
+ /* 163 0xa3 'ú' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0xcc, /* 11001100 */
@@ -1656,7 +1656,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 164 0xa4 '¤' */
+ /* 164 0xa4 'ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -1666,7 +1666,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x66, /* 01100110 */
0x00, /* 00000000 */
- /* 165 0xa5 '¥' */
+ /* 165 0xa5 'Ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -1676,7 +1676,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xce, /* 11001110 */
0x00, /* 00000000 */
- /* 166 0xa6 '¦' */
+ /* 166 0xa6 'ª' */
0x3c, /* 00111100 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -1686,7 +1686,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 167 0xa7 '§' */
+ /* 167 0xa7 'º' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -1696,7 +1696,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 168 0xa8 '¨' */
+ /* 168 0xa8 '¿' */
0x18, /* 00011000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -1706,7 +1706,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3e, /* 00111110 */
0x00, /* 00000000 */
- /* 169 0xa9 '©' */
+ /* 169 0xa9 'âŒ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1716,7 +1716,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 170 0xaa 'ª' */
+ /* 170 0xaa '¬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1726,7 +1726,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 171 0xab '«' */
+ /* 171 0xab '½' */
0x63, /* 01100011 */
0xe6, /* 11100110 */
0x6c, /* 01101100 */
@@ -1736,7 +1736,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xcc, /* 11001100 */
0x0f, /* 00001111 */
- /* 172 0xac '¬' */
+ /* 172 0xac '¼' */
0x63, /* 01100011 */
0xe6, /* 11100110 */
0x6c, /* 01101100 */
@@ -1746,7 +1746,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xdf, /* 11011111 */
0x06, /* 00000110 */
- /* 173 0xad '­' */
+ /* 173 0xad '¡' */
0x18, /* 00011000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -1756,7 +1756,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x00, /* 00000000 */
- /* 174 0xae '®' */
+ /* 174 0xae '«' */
0x00, /* 00000000 */
0x33, /* 00110011 */
0x66, /* 01100110 */
@@ -1766,7 +1766,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 175 0xaf '¯' */
+ /* 175 0xaf '»' */
0x00, /* 00000000 */
0xcc, /* 11001100 */
0x66, /* 01100110 */
@@ -1776,7 +1776,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 176 0xb0 '°' */
+ /* 176 0xb0 'â–‘' */
0x22, /* 00100010 */
0x88, /* 10001000 */
0x22, /* 00100010 */
@@ -1786,7 +1786,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x22, /* 00100010 */
0x88, /* 10001000 */
- /* 177 0xb1 '±' */
+ /* 177 0xb1 'â–’' */
0x55, /* 01010101 */
0xaa, /* 10101010 */
0x55, /* 01010101 */
@@ -1796,7 +1796,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x55, /* 01010101 */
0xaa, /* 10101010 */
- /* 178 0xb2 '²' */
+ /* 178 0xb2 'â–“' */
0x77, /* 01110111 */
0xdd, /* 11011101 */
0x77, /* 01110111 */
@@ -1806,7 +1806,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x77, /* 01110111 */
0xdd, /* 11011101 */
- /* 179 0xb3 '³' */
+ /* 179 0xb3 '│' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1816,7 +1816,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 180 0xb4 '´' */
+ /* 180 0xb4 '┤' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1826,7 +1826,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 181 0xb5 'µ' */
+ /* 181 0xb5 'â•¡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xf8, /* 11111000 */
@@ -1836,7 +1836,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 182 0xb6 '¶' */
+ /* 182 0xb6 'â•¢' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1846,7 +1846,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 183 0xb7 '·' */
+ /* 183 0xb7 'â•–' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1856,7 +1856,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 184 0xb8 '¸' */
+ /* 184 0xb8 'â••' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xf8, /* 11111000 */
@@ -1866,7 +1866,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 185 0xb9 '¹' */
+ /* 185 0xb9 'â•£' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf6, /* 11110110 */
@@ -1876,7 +1876,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 186 0xba 'º' */
+ /* 186 0xba 'â•‘' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1886,7 +1886,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 187 0xbb '»' */
+ /* 187 0xbb 'â•—' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -1896,7 +1896,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 188 0xbc '¼' */
+ /* 188 0xbc 'â•' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf6, /* 11110110 */
@@ -1906,7 +1906,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 189 0xbd '½' */
+ /* 189 0xbd '╜' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1916,7 +1916,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 190 0xbe '¾' */
+ /* 190 0xbe 'â•›' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xf8, /* 11111000 */
@@ -1926,7 +1926,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 191 0xbf '¿' */
+ /* 191 0xbf 'â”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1936,7 +1936,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 192 0xc0 'À' */
+ /* 192 0xc0 'â””' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1946,7 +1946,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 193 0xc1 'Á' */
+ /* 193 0xc1 'â”´' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1956,7 +1956,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 194 0xc2 'Â' */
+ /* 194 0xc2 '┬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1966,7 +1966,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 195 0xc3 'Ã' */
+ /* 195 0xc3 '├' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1976,7 +1976,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 196 0xc4 'Ä' */
+ /* 196 0xc4 '─' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1986,7 +1986,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 197 0xc5 'Å' */
+ /* 197 0xc5 '┼' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1996,7 +1996,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 198 0xc6 'Æ' */
+ /* 198 0xc6 '╞' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x1f, /* 00011111 */
@@ -2006,7 +2006,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 199 0xc7 'Ç' */
+ /* 199 0xc7 '╟' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2016,7 +2016,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 200 0xc8 'È' */
+ /* 200 0xc8 '╚' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x37, /* 00110111 */
@@ -2026,7 +2026,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 201 0xc9 'É' */
+ /* 201 0xc9 'â•”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3f, /* 00111111 */
@@ -2036,7 +2036,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 202 0xca 'Ê' */
+ /* 202 0xca 'â•©' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf7, /* 11110111 */
@@ -2046,7 +2046,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 203 0xcb 'Ë' */
+ /* 203 0xcb '╦' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2056,7 +2056,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 204 0xcc 'Ì' */
+ /* 204 0xcc 'â• ' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x37, /* 00110111 */
@@ -2066,7 +2066,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 205 0xcd 'Í' */
+ /* 205 0xcd 'â•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2076,7 +2076,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 206 0xce 'Î' */
+ /* 206 0xce '╬' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf7, /* 11110111 */
@@ -2086,7 +2086,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 207 0xcf 'Ï' */
+ /* 207 0xcf 'â•§' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xff, /* 11111111 */
@@ -2096,7 +2096,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 208 0xd0 'Ð' */
+ /* 208 0xd0 '╨' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2106,7 +2106,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 209 0xd1 'Ñ' */
+ /* 209 0xd1 '╤' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2116,7 +2116,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 210 0xd2 'Ò' */
+ /* 210 0xd2 'â•¥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2126,7 +2126,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 211 0xd3 'Ó' */
+ /* 211 0xd3 'â•™' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2136,7 +2136,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 212 0xd4 'Ô' */
+ /* 212 0xd4 '╘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x1f, /* 00011111 */
@@ -2146,7 +2146,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 213 0xd5 'Õ' */
+ /* 213 0xd5 'â•’' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x1f, /* 00011111 */
@@ -2156,7 +2156,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 214 0xd6 'Ö' */
+ /* 214 0xd6 'â•“' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2166,7 +2166,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 215 0xd7 '×' */
+ /* 215 0xd7 'â•«' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2176,7 +2176,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 216 0xd8 'Ø' */
+ /* 216 0xd8 '╪' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xff, /* 11111111 */
@@ -2186,7 +2186,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 217 0xd9 'Ù' */
+ /* 217 0xd9 '┘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2196,7 +2196,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 218 0xda 'Ú' */
+ /* 218 0xda '┌' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2206,7 +2206,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 219 0xdb 'Û' */
+ /* 219 0xdb 'â–ˆ' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -2216,7 +2216,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 220 0xdc 'Ü' */
+ /* 220 0xdc 'â–„' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2226,7 +2226,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 221 0xdd 'Ý' */
+ /* 221 0xdd '▌' */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
@@ -2236,7 +2236,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xf0, /* 11110000 */
0xf0, /* 11110000 */
- /* 222 0xde 'Þ' */
+ /* 222 0xde 'â–' */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
@@ -2246,7 +2246,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x0f, /* 00001111 */
0x0f, /* 00001111 */
- /* 223 0xdf 'ß' */
+ /* 223 0xdf 'â–€' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -2256,7 +2256,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 224 0xe0 'à' */
+ /* 224 0xe0 'α' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x76, /* 01110110 */
@@ -2266,7 +2266,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 225 0xe1 'á' */
+ /* 225 0xe1 'ß' */
0x78, /* 01111000 */
0xcc, /* 11001100 */
0xcc, /* 11001100 */
@@ -2276,7 +2276,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xcc, /* 11001100 */
0x00, /* 00000000 */
- /* 226 0xe2 'â' */
+ /* 226 0xe2 'Γ' */
0xfe, /* 11111110 */
0xc6, /* 11000110 */
0xc0, /* 11000000 */
@@ -2286,7 +2286,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc0, /* 11000000 */
0x00, /* 00000000 */
- /* 227 0xe3 'ã' */
+ /* 227 0xe3 'Ï€' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -2296,7 +2296,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x6c, /* 01101100 */
0x00, /* 00000000 */
- /* 228 0xe4 'ä' */
+ /* 228 0xe4 'Σ' */
0xfe, /* 11111110 */
0xc6, /* 11000110 */
0x60, /* 01100000 */
@@ -2306,7 +2306,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 229 0xe5 'å' */
+ /* 229 0xe5 'σ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -2316,7 +2316,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x70, /* 01110000 */
0x00, /* 00000000 */
- /* 230 0xe6 'æ' */
+ /* 230 0xe6 'µ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x66, /* 01100110 */
@@ -2326,7 +2326,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0xc0, /* 11000000 */
- /* 231 0xe7 'ç' */
+ /* 231 0xe7 'Ï„' */
0x00, /* 00000000 */
0x76, /* 01110110 */
0xdc, /* 11011100 */
@@ -2336,7 +2336,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x00, /* 00000000 */
- /* 232 0xe8 'è' */
+ /* 232 0xe8 'Φ' */
0x7e, /* 01111110 */
0x18, /* 00011000 */
0x3c, /* 00111100 */
@@ -2346,7 +2346,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x7e, /* 01111110 */
- /* 233 0xe9 'é' */
+ /* 233 0xe9 'Θ' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0xc6, /* 11000110 */
@@ -2356,7 +2356,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x38, /* 00111000 */
0x00, /* 00000000 */
- /* 234 0xea 'ê' */
+ /* 234 0xea 'Ω' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0xc6, /* 11000110 */
@@ -2366,7 +2366,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xee, /* 11101110 */
0x00, /* 00000000 */
- /* 235 0xeb 'ë' */
+ /* 235 0xeb 'δ' */
0x0e, /* 00001110 */
0x18, /* 00011000 */
0x0c, /* 00001100 */
@@ -2376,7 +2376,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 236 0xec 'ì' */
+ /* 236 0xec '∞' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -2386,7 +2386,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 237 0xed 'í' */
+ /* 237 0xed 'φ' */
0x06, /* 00000110 */
0x0c, /* 00001100 */
0x7e, /* 01111110 */
@@ -2396,7 +2396,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x60, /* 01100000 */
0xc0, /* 11000000 */
- /* 238 0xee 'î' */
+ /* 238 0xee 'ε' */
0x1e, /* 00011110 */
0x30, /* 00110000 */
0x60, /* 01100000 */
@@ -2406,7 +2406,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x1e, /* 00011110 */
0x00, /* 00000000 */
- /* 239 0xef 'ï' */
+ /* 239 0xef '∩' */
0x00, /* 00000000 */
0x7c, /* 01111100 */
0xc6, /* 11000110 */
@@ -2416,7 +2416,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 240 0xf0 'ð' */
+ /* 240 0xf0 '≡' */
0x00, /* 00000000 */
0xfe, /* 11111110 */
0x00, /* 00000000 */
@@ -2426,7 +2426,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 241 0xf1 'ñ' */
+ /* 241 0xf1 '±' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x7e, /* 01111110 */
@@ -2436,7 +2436,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 242 0xf2 'ò' */
+ /* 242 0xf2 '≥' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x0c, /* 00001100 */
@@ -2446,7 +2446,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 243 0xf3 'ó' */
+ /* 243 0xf3 '≤' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2456,7 +2456,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 244 0xf4 'ô' */
+ /* 244 0xf4 '⌠' */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
0x1b, /* 00011011 */
@@ -2466,7 +2466,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 245 0xf5 'õ' */
+ /* 245 0xf5 '⌡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2476,7 +2476,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xd8, /* 11011000 */
0x70, /* 01110000 */
- /* 246 0xf6 'ö' */
+ /* 246 0xf6 '÷' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -2486,7 +2486,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 247 0xf7 '÷' */
+ /* 247 0xf7 '≈' */
0x00, /* 00000000 */
0x76, /* 01110110 */
0xdc, /* 11011100 */
@@ -2496,7 +2496,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 248 0xf8 'ø' */
+ /* 248 0xf8 '°' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -2506,7 +2506,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 249 0xf9 'ù' */
+ /* 249 0xf9 '·' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2516,7 +2516,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 250 0xfa 'ú' */
+ /* 250 0xfa '•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2526,7 +2526,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 251 0xfb 'û' */
+ /* 251 0xfb '√' */
0x0f, /* 00001111 */
0x0c, /* 00001100 */
0x0c, /* 00001100 */
@@ -2536,7 +2536,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x1c, /* 00011100 */
- /* 252 0xfc 'ü' */
+ /* 252 0xfc 'â¿' */
0x6c, /* 01101100 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2546,7 +2546,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 253 0xfd 'ý' */
+ /* 253 0xfd '²' */
0x78, /* 01111000 */
0x0c, /* 00001100 */
0x18, /* 00011000 */
@@ -2556,7 +2556,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 254 0xfe 'þ' */
+ /* 254 0xfe 'â– ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3c, /* 00111100 */
@@ -2566,7 +2566,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 255 0xff 'ÿ' */
+ /* 255 0xff ' ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index 8f26660ea10a..f755b997b967 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
+#include "../include/linux/crc32poly.h"
#include "../include/generated/autoconf.h"
#include "crc32defs.h"
#include <inttypes.h>
@@ -57,7 +58,7 @@ static void crc32init_le_generic(const uint32_t polynomial,
static void crc32init_le(void)
{
- crc32init_le_generic(CRCPOLY_LE, crc32table_le);
+ crc32init_le_generic(CRC32_POLY_LE, crc32table_le);
}
static void crc32cinit_le(void)
@@ -76,7 +77,7 @@ static void crc32init_be(void)
crc32table_be[0][0] = 0;
for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
- crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
+ crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
for (j = 0; j < i; j++)
crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
}
diff --git a/lib/gen_crc64table.c b/lib/gen_crc64table.c
new file mode 100644
index 000000000000..9011926e4162
--- /dev/null
+++ b/lib/gen_crc64table.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generate lookup table for the table-driven CRC64 calculation.
+ *
+ * gen_crc64table is executed in kernel build time and generates
+ * lib/crc64table.h. This header is included by lib/crc64.c for
+ * the table-driven CRC64 calculation.
+ *
+ * See lib/crc64.c for more information about which specification
+ * and polynomial arithmetic that gen_crc64table.c follows to
+ * generate the lookup table.
+ *
+ * Copyright 2018 SUSE Linux.
+ * Author: Coly Li <colyli@suse.de>
+ */
+#include <inttypes.h>
+#include <stdio.h>
+
+#include <linux/swab.h>
+
+#define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL
+
+static uint64_t crc64_table[256] = {0};
+
+static void generate_crc64_table(void)
+{
+ uint64_t i, j, c, crc;
+
+ for (i = 0; i < 256; i++) {
+ crc = 0;
+ c = i << 56;
+
+ for (j = 0; j < 8; j++) {
+ if ((crc ^ c) & 0x8000000000000000ULL)
+ crc = (crc << 1) ^ CRC64_ECMA182_POLY;
+ else
+ crc <<= 1;
+ c <<= 1;
+ }
+
+ crc64_table[i] = crc;
+ }
+}
+
+static void print_crc64_table(void)
+{
+ int i;
+
+ printf("/* this file is generated - do not edit */\n\n");
+ printf("#include <linux/types.h>\n");
+ printf("#include <linux/cache.h>\n\n");
+ printf("static const u64 ____cacheline_aligned crc64table[256] = {\n");
+ for (i = 0; i < 256; i++) {
+ printf("\t0x%016" PRIx64 "ULL", crc64_table[i]);
+ if (i & 0x1)
+ printf(",\n");
+ else
+ printf(", ");
+ }
+ printf("};\n");
+}
+
+int main(int argc, char *argv[])
+{
+ generate_crc64_table();
+ print_crc64_table();
+ return 0;
+}
diff --git a/lib/idr.c b/lib/idr.c
index ed9c169c12bd..cb1db9b8d3f6 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -6,8 +6,6 @@
#include <linux/spinlock.h>
#include <linux/xarray.h>
-DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
-
/**
* idr_alloc_u32() - Allocate an ID.
* @idr: IDR handle.
@@ -39,10 +37,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
unsigned int base = idr->idr_base;
unsigned int id = *nextid;
- if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
- return -EINVAL;
- if (WARN_ON_ONCE(!(idr->idr_rt.gfp_mask & ROOT_IS_IDR)))
- idr->idr_rt.gfp_mask |= IDR_RT_MARKER;
+ if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
+ idr->idr_rt.xa_flags |= IDR_RT_MARKER;
id = (id < base) ? 0 : id - base;
radix_tree_iter_init(&iter, id);
@@ -295,15 +291,13 @@ void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
void __rcu **slot = NULL;
void *entry;
- if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
- return ERR_PTR(-EINVAL);
id -= idr->idr_base;
entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
return ERR_PTR(-ENOENT);
- __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL);
+ __radix_tree_replace(&idr->idr_rt, node, slot, ptr);
return entry;
}
@@ -317,18 +311,15 @@ EXPORT_SYMBOL(idr_replace);
* bit per ID, and so is more space efficient than an IDR. To use an IDA,
* define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
* then initialise it using ida_init()). To allocate a new ID, call
- * ida_simple_get(). To free an ID, call ida_simple_remove().
+ * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
+ * To free an ID, call ida_free().
*
- * If you have more complex locking requirements, use a loop around
- * ida_pre_get() and ida_get_new() to allocate a new ID. Then use
- * ida_remove() to free an ID. You must make sure that ida_get_new() and
- * ida_remove() cannot be called at the same time as each other for the
- * same IDA.
+ * ida_destroy() can be used to dispose of an IDA without needing to
+ * free the individual IDs in it. You can use ida_is_empty() to find
+ * out whether the IDA has any IDs currently allocated.
*
- * You can also use ida_get_new_above() if you need an ID to be allocated
- * above a particular number. ida_destroy() can be used to dispose of an
- * IDA without needing to free the individual IDs in it. You can use
- * ida_is_empty() to find out whether the IDA has any IDs currently allocated.
+ * The IDA handles its own locking. It is safe to call any of the IDA
+ * functions without synchronisation in your code.
*
* IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
* limitation, it should be quite straightforward to raise the maximum.
@@ -337,286 +328,265 @@ EXPORT_SYMBOL(idr_replace);
/*
* Developer's notes:
*
- * The IDA uses the functionality provided by the IDR & radix tree to store
- * bitmaps in each entry. The IDR_FREE tag means there is at least one bit
- * free, unlike the IDR where it means at least one entry is free.
+ * The IDA uses the functionality provided by the XArray to store bitmaps in
+ * each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap
+ * have been set.
*
- * I considered telling the radix tree that each slot is an order-10 node
- * and storing the bit numbers in the radix tree, but the radix tree can't
- * allow a single multiorder entry at index 0, which would significantly
- * increase memory consumption for the IDA. So instead we divide the index
- * by the number of bits in the leaf bitmap before doing a radix tree lookup.
+ * I considered telling the XArray that each slot is an order-10 node
+ * and indexing by bit number, but the XArray can't allow a single multi-index
+ * entry in the head, which would significantly increase memory consumption
+ * for the IDA. So instead we divide the index by the number of bits in the
+ * leaf bitmap before doing a radix tree lookup.
*
* As an optimisation, if there are only a few low bits set in any given
- * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional
- * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits
- * directly in the entry. By being really tricksy, we could store
- * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising
- * for 0-3 allocated IDs.
- *
- * We allow the radix tree 'exceptional' count to get out of date. Nothing
- * in the IDA nor the radix tree code checks it. If it becomes important
- * to maintain an accurate exceptional count, switch the rcu_assign_pointer()
- * calls to radix_tree_iter_replace() which will correct the exceptional
- * count.
- *
- * The IDA always requires a lock to alloc/free. If we add a 'test_bit'
+ * leaf, instead of allocating a 128-byte bitmap, we store the bits
+ * as a value entry. Value entries never have the XA_FREE_MARK cleared
+ * because we can always convert them into a bitmap entry.
+ *
+ * It would be possible to optimise further; once we've run out of a
+ * single 128-byte bitmap, we currently switch to a 576-byte node, put
+ * the 128-byte bitmap in the first entry and then start allocating extra
+ * 128-byte entries. We could instead use the 512 bytes of the node's
+ * data as a bitmap before moving to that scheme. I do not believe this
+ * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
+ * users of the IDA and almost none of them use more than 1024 entries.
+ * Those that do use more than the 8192 IDs that the 512 bytes would
+ * provide.
+ *
+ * The IDA always uses a lock to alloc/free. If we add a 'test_bit'
* equivalent, it will still need locking. Going to RCU lookup would require
* using RCU to free bitmaps, and that's not trivial without embedding an
* RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
* bitmap, which is excessive.
*/
-#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1)
-
/**
- * ida_get_new_above - allocate new ID above or equal to a start id
- * @ida: ida handle
- * @start: id to start search at
- * @id: pointer to the allocated handle
- *
- * Allocate new ID above or equal to @start. It should be called
- * with any required locks to ensure that concurrent calls to
- * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed.
- * Consider using ida_simple_get() if you do not have complex locking
- * requirements.
- *
- * If memory is required, it will return %-EAGAIN, you should unlock
- * and go back to the ida_pre_get() call. If the ida is full, it will
- * return %-ENOSPC. On success, it will return 0.
- *
- * @id returns a value in the range @start ... %0x7fffffff.
+ * ida_alloc_range() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @min: Lowest ID to allocate.
+ * @max: Highest ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between @min and @max, inclusive. The allocated ID will
+ * not exceed %INT_MAX, even if @max is larger.
+ *
+ * Context: Any context.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
*/
-int ida_get_new_above(struct ida *ida, int start, int *id)
+int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
+ gfp_t gfp)
{
- struct radix_tree_root *root = &ida->ida_rt;
- void __rcu **slot;
- struct radix_tree_iter iter;
- struct ida_bitmap *bitmap;
- unsigned long index;
- unsigned bit, ebit;
- int new;
-
- index = start / IDA_BITMAP_BITS;
- bit = start % IDA_BITMAP_BITS;
- ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT;
-
- slot = radix_tree_iter_init(&iter, index);
- for (;;) {
- if (slot)
- slot = radix_tree_next_slot(slot, &iter,
- RADIX_TREE_ITER_TAGGED);
- if (!slot) {
- slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX);
- if (IS_ERR(slot)) {
- if (slot == ERR_PTR(-ENOMEM))
- return -EAGAIN;
- return PTR_ERR(slot);
+ XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
+ unsigned bit = min % IDA_BITMAP_BITS;
+ unsigned long flags;
+ struct ida_bitmap *bitmap, *alloc = NULL;
+
+ if ((int)min < 0)
+ return -ENOSPC;
+
+ if ((int)max < 0)
+ max = INT_MAX;
+
+retry:
+ xas_lock_irqsave(&xas, flags);
+next:
+ bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
+ if (xas.xa_index > min / IDA_BITMAP_BITS)
+ bit = 0;
+ if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
+ goto nospc;
+
+ if (xa_is_value(bitmap)) {
+ unsigned long tmp = xa_to_value(bitmap);
+
+ if (bit < BITS_PER_XA_VALUE) {
+ bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
+ if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
+ goto nospc;
+ if (bit < BITS_PER_XA_VALUE) {
+ tmp |= 1UL << bit;
+ xas_store(&xas, xa_mk_value(tmp));
+ goto out;
}
}
- if (iter.index > index) {
- bit = 0;
- ebit = RADIX_TREE_EXCEPTIONAL_SHIFT;
- }
- new = iter.index * IDA_BITMAP_BITS;
- bitmap = rcu_dereference_raw(*slot);
- if (radix_tree_exception(bitmap)) {
- unsigned long tmp = (unsigned long)bitmap;
- ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit);
- if (ebit < BITS_PER_LONG) {
- tmp |= 1UL << ebit;
- rcu_assign_pointer(*slot, (void *)tmp);
- *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT;
- return 0;
- }
- bitmap = this_cpu_xchg(ida_bitmap, NULL);
- if (!bitmap)
- return -EAGAIN;
- bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
- rcu_assign_pointer(*slot, bitmap);
+ bitmap = alloc;
+ if (!bitmap)
+ bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
+ if (!bitmap)
+ goto alloc;
+ bitmap->bitmap[0] = tmp;
+ xas_store(&xas, bitmap);
+ if (xas_error(&xas)) {
+ bitmap->bitmap[0] = 0;
+ goto out;
}
+ }
- if (bitmap) {
- bit = find_next_zero_bit(bitmap->bitmap,
- IDA_BITMAP_BITS, bit);
- new += bit;
- if (new < 0)
- return -ENOSPC;
- if (bit == IDA_BITMAP_BITS)
- continue;
+ if (bitmap) {
+ bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
+ if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
+ goto nospc;
+ if (bit == IDA_BITMAP_BITS)
+ goto next;
- __set_bit(bit, bitmap->bitmap);
- if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
- radix_tree_iter_tag_clear(root, &iter,
- IDR_FREE);
+ __set_bit(bit, bitmap->bitmap);
+ if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ } else {
+ if (bit < BITS_PER_XA_VALUE) {
+ bitmap = xa_mk_value(1UL << bit);
} else {
- new += bit;
- if (new < 0)
- return -ENOSPC;
- if (ebit < BITS_PER_LONG) {
- bitmap = (void *)((1UL << ebit) |
- RADIX_TREE_EXCEPTIONAL_ENTRY);
- radix_tree_iter_replace(root, &iter, slot,
- bitmap);
- *id = new;
- return 0;
- }
- bitmap = this_cpu_xchg(ida_bitmap, NULL);
+ bitmap = alloc;
+ if (!bitmap)
+ bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
if (!bitmap)
- return -EAGAIN;
+ goto alloc;
__set_bit(bit, bitmap->bitmap);
- radix_tree_iter_replace(root, &iter, slot, bitmap);
}
-
- *id = new;
- return 0;
+ xas_store(&xas, bitmap);
+ }
+out:
+ xas_unlock_irqrestore(&xas, flags);
+ if (xas_nomem(&xas, gfp)) {
+ xas.xa_index = min / IDA_BITMAP_BITS;
+ bit = min % IDA_BITMAP_BITS;
+ goto retry;
}
+ if (bitmap != alloc)
+ kfree(alloc);
+ if (xas_error(&xas))
+ return xas_error(&xas);
+ return xas.xa_index * IDA_BITMAP_BITS + bit;
+alloc:
+ xas_unlock_irqrestore(&xas, flags);
+ alloc = kzalloc(sizeof(*bitmap), gfp);
+ if (!alloc)
+ return -ENOMEM;
+ xas_set(&xas, min / IDA_BITMAP_BITS);
+ bit = min % IDA_BITMAP_BITS;
+ goto retry;
+nospc:
+ xas_unlock_irqrestore(&xas, flags);
+ return -ENOSPC;
}
-EXPORT_SYMBOL(ida_get_new_above);
+EXPORT_SYMBOL(ida_alloc_range);
/**
- * ida_remove - Free the given ID
- * @ida: ida handle
- * @id: ID to free
+ * ida_free() - Release an allocated ID.
+ * @ida: IDA handle.
+ * @id: Previously allocated ID.
*
- * This function should not be called at the same time as ida_get_new_above().
+ * Context: Any context.
*/
-void ida_remove(struct ida *ida, int id)
+void ida_free(struct ida *ida, unsigned int id)
{
- unsigned long index = id / IDA_BITMAP_BITS;
- unsigned offset = id % IDA_BITMAP_BITS;
+ XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
+ unsigned bit = id % IDA_BITMAP_BITS;
struct ida_bitmap *bitmap;
- unsigned long *btmp;
- struct radix_tree_iter iter;
- void __rcu **slot;
+ unsigned long flags;
- slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index);
- if (!slot)
- goto err;
+ BUG_ON((int)id < 0);
- bitmap = rcu_dereference_raw(*slot);
- if (radix_tree_exception(bitmap)) {
- btmp = (unsigned long *)slot;
- offset += RADIX_TREE_EXCEPTIONAL_SHIFT;
- if (offset >= BITS_PER_LONG)
+ xas_lock_irqsave(&xas, flags);
+ bitmap = xas_load(&xas);
+
+ if (xa_is_value(bitmap)) {
+ unsigned long v = xa_to_value(bitmap);
+ if (bit >= BITS_PER_XA_VALUE)
+ goto err;
+ if (!(v & (1UL << bit)))
goto err;
+ v &= ~(1UL << bit);
+ if (!v)
+ goto delete;
+ xas_store(&xas, xa_mk_value(v));
} else {
- btmp = bitmap->bitmap;
- }
- if (!test_bit(offset, btmp))
- goto err;
-
- __clear_bit(offset, btmp);
- radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE);
- if (radix_tree_exception(bitmap)) {
- if (rcu_dereference_raw(*slot) ==
- (void *)RADIX_TREE_EXCEPTIONAL_ENTRY)
- radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
- } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) {
- kfree(bitmap);
- radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
+ if (!test_bit(bit, bitmap->bitmap))
+ goto err;
+ __clear_bit(bit, bitmap->bitmap);
+ xas_set_mark(&xas, XA_FREE_MARK);
+ if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
+ kfree(bitmap);
+delete:
+ xas_store(&xas, NULL);
+ }
}
+ xas_unlock_irqrestore(&xas, flags);
return;
err:
- WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
+ xas_unlock_irqrestore(&xas, flags);
+ WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
}
-EXPORT_SYMBOL(ida_remove);
+EXPORT_SYMBOL(ida_free);
/**
- * ida_destroy - Free the contents of an ida
- * @ida: ida handle
+ * ida_destroy() - Free all IDs.
+ * @ida: IDA handle.
+ *
+ * Calling this function frees all IDs and releases all resources used
+ * by an IDA. When this call returns, the IDA is empty and can be reused
+ * or freed. If the IDA is already empty, there is no need to call this
+ * function.
*
- * Calling this function releases all resources associated with an IDA. When
- * this call returns, the IDA is empty and can be reused or freed. The caller
- * should not allow ida_remove() or ida_get_new_above() to be called at the
- * same time.
+ * Context: Any context.
*/
void ida_destroy(struct ida *ida)
{
- struct radix_tree_iter iter;
- void __rcu **slot;
+ XA_STATE(xas, &ida->xa, 0);
+ struct ida_bitmap *bitmap;
+ unsigned long flags;
- radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) {
- struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
- if (!radix_tree_exception(bitmap))
+ xas_lock_irqsave(&xas, flags);
+ xas_for_each(&xas, bitmap, ULONG_MAX) {
+ if (!xa_is_value(bitmap))
kfree(bitmap);
- radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
+ xas_store(&xas, NULL);
}
+ xas_unlock_irqrestore(&xas, flags);
}
EXPORT_SYMBOL(ida_destroy);
-/**
- * ida_simple_get - get a new id.
- * @ida: the (initialized) ida.
- * @start: the minimum id (inclusive, < 0x8000000)
- * @end: the maximum id (exclusive, < 0x8000000 or 0)
- * @gfp_mask: memory allocation flags
- *
- * Allocates an id in the range start <= id < end, or returns -ENOSPC.
- * On memory allocation failure, returns -ENOMEM.
- *
- * Compared to ida_get_new_above() this function does its own locking, and
- * should be used unless there are special requirements.
- *
- * Use ida_simple_remove() to get rid of an id.
- */
-int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
- gfp_t gfp_mask)
-{
- int ret, id;
- unsigned int max;
- unsigned long flags;
-
- BUG_ON((int)start < 0);
- BUG_ON((int)end < 0);
-
- if (end == 0)
- max = 0x80000000;
- else {
- BUG_ON(end < start);
- max = end - 1;
- }
+#ifndef __KERNEL__
+extern void xa_dump_index(unsigned long index, unsigned int shift);
+#define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS)
-again:
- if (!ida_pre_get(ida, gfp_mask))
- return -ENOMEM;
+static void ida_dump_entry(void *entry, unsigned long index)
+{
+ unsigned long i;
+
+ if (!entry)
+ return;
+
+ if (xa_is_node(entry)) {
+ struct xa_node *node = xa_to_node(entry);
+ unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
+ XA_CHUNK_SHIFT;
+
+ xa_dump_index(index * IDA_BITMAP_BITS, shift);
+ xa_dump_node(node);
+ for (i = 0; i < XA_CHUNK_SIZE; i++)
+ ida_dump_entry(node->slots[i],
+ index | (i << node->shift));
+ } else if (xa_is_value(entry)) {
+ xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
+ pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
+ } else {
+ struct ida_bitmap *bitmap = entry;
- xa_lock_irqsave(&ida->ida_rt, flags);
- ret = ida_get_new_above(ida, start, &id);
- if (!ret) {
- if (id > max) {
- ida_remove(ida, id);
- ret = -ENOSPC;
- } else {
- ret = id;
- }
+ xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
+ pr_cont("bitmap: %p data", bitmap);
+ for (i = 0; i < IDA_BITMAP_LONGS; i++)
+ pr_cont(" %lx", bitmap->bitmap[i]);
+ pr_cont("\n");
}
- xa_unlock_irqrestore(&ida->ida_rt, flags);
-
- if (unlikely(ret == -EAGAIN))
- goto again;
-
- return ret;
}
-EXPORT_SYMBOL(ida_simple_get);
-/**
- * ida_simple_remove - remove an allocated id.
- * @ida: the (initialized) ida.
- * @id: the id returned by ida_simple_get.
- *
- * Use to release an id allocated with ida_simple_get().
- *
- * Compared to ida_remove() this function does its own locking, and should be
- * used unless there are special requirements.
- */
-void ida_simple_remove(struct ida *ida, unsigned int id)
+static void ida_dump(struct ida *ida)
{
- unsigned long flags;
-
- BUG_ON((int)id < 0);
- xa_lock_irqsave(&ida->ida_rt, flags);
- ida_remove(ida, id);
- xa_unlock_irqrestore(&ida->ida_rt, flags);
+ struct xarray *xa = &ida->xa;
+ pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
+ xa->xa_flags >> ROOT_TAG_SHIFT);
+ ida_dump_entry(xa->xa_head, 0);
}
-EXPORT_SYMBOL(ida_simple_remove);
+#endif
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 54e5bbaa3200..517f5853ffed 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
- pmd_free_pte_page(pmd)) {
+ pmd_free_pte_page(pmd, addr)) {
if (pmd_set_huge(pmd, phys_addr + addr, prot))
continue;
}
@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
if (ioremap_pud_enabled() &&
((next - addr) == PUD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
- pud_free_pmd_page(pud)) {
+ pud_free_pmd_page(pud, addr)) {
if (pud_set_huge(pud, phys_addr + addr, prot))
continue;
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 7e43cd54c84c..7ebccb5c1637 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -83,6 +83,7 @@
const struct kvec *kvec; \
struct kvec v; \
iterate_kvec(i, n, v, kvec, skip, (K)) \
+ } else if (unlikely(i->type & ITER_DISCARD)) { \
} else { \
const struct iovec *iov; \
struct iovec v; \
@@ -114,6 +115,8 @@
} \
i->nr_segs -= kvec - i->kvec; \
i->kvec = kvec; \
+ } else if (unlikely(i->type & ITER_DISCARD)) { \
+ skip += n; \
} else { \
const struct iovec *iov; \
struct iovec v; \
@@ -428,17 +431,19 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
}
EXPORT_SYMBOL(iov_iter_fault_in_readable);
-void iov_iter_init(struct iov_iter *i, int direction,
+void iov_iter_init(struct iov_iter *i, unsigned int direction,
const struct iovec *iov, unsigned long nr_segs,
size_t count)
{
+ WARN_ON(direction & ~(READ | WRITE));
+ direction &= READ | WRITE;
+
/* It will get better. Eventually... */
if (uaccess_kernel()) {
- direction |= ITER_KVEC;
- i->type = direction;
+ i->type = ITER_KVEC | direction;
i->kvec = (struct kvec *)iov;
} else {
- i->type = direction;
+ i->type = ITER_IOVEC | direction;
i->iov = iov;
}
i->nr_segs = nr_segs;
@@ -558,7 +563,7 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return copy_pipe_to_iter(addr, bytes, i);
if (iter_is_iovec(i))
might_fault();
@@ -596,15 +601,70 @@ static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
return ret;
}
+static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t n, off, xfer = 0;
+ int idx;
+
+ if (!sanity(i))
+ return 0;
+
+ bytes = n = push_pipe(i, bytes, &idx, &off);
+ if (unlikely(!n))
+ return 0;
+ for ( ; n; idx = next_idx(idx, pipe), off = 0) {
+ size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+ unsigned long rem;
+
+ rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
+ chunk);
+ i->idx = idx;
+ i->iov_offset = off + chunk - rem;
+ xfer += chunk - rem;
+ if (rem)
+ break;
+ n -= chunk;
+ addr += chunk;
+ }
+ i->count -= xfer;
+ return xfer;
+}
+
+/**
+ * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
+ * @addr: source kernel address
+ * @bytes: total transfer length
+ * @iter: destination iterator
+ *
+ * The pmem driver arranges for filesystem-dax to use this facility via
+ * dax_copy_to_iter() for protecting read/write to persistent memory.
+ * Unless / until an architecture can guarantee identical performance
+ * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
+ * performance regression to switch more users to the mcsafe version.
+ *
+ * Otherwise, the main differences between this and typical _copy_to_iter().
+ *
+ * * Typical tail/residue handling after a fault retries the copy
+ * byte-by-byte until the fault happens again. Re-triggering machine
+ * checks is potentially fatal so the implementation uses source
+ * alignment and poison alignment assumptions to avoid re-triggering
+ * hardware exceptions.
+ *
+ * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
+ * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
+ * a short copy.
+ *
+ * See MCSAFE_TEST for self-test.
+ */
size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
- if (unlikely(i->type & ITER_PIPE)) {
- WARN_ON(1);
- return 0;
- }
+ if (unlikely(iov_iter_is_pipe(i)))
+ return copy_pipe_to_iter_mcsafe(addr, bytes, i);
if (iter_is_iovec(i))
might_fault();
iterate_and_advance(i, bytes, v,
@@ -637,7 +697,7 @@ EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
@@ -657,7 +717,7 @@ EXPORT_SYMBOL(_copy_from_iter);
bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return false;
}
@@ -684,7 +744,7 @@ EXPORT_SYMBOL(_copy_from_iter_full);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
@@ -701,10 +761,24 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
EXPORT_SYMBOL(_copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+/**
+ * _copy_from_iter_flushcache - write destination through cpu cache
+ * @addr: destination kernel address
+ * @bytes: total transfer length
+ * @iter: source iterator
+ *
+ * The pmem driver arranges for filesystem-dax to use this facility via
+ * dax_copy_from_iter() for ensuring that writes to persistent memory
+ * are flushed through the CPU cache. It is differentiated from
+ * _copy_from_iter_nocache() in that guarantees all data is flushed for
+ * all iterator types. The _copy_from_iter_nocache() only attempts to
+ * bypass the cache for the ITER_IOVEC case, and on some archs may use
+ * instructions that strand dirty-data in the cache.
+ */
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
@@ -725,7 +799,7 @@ EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return false;
}
@@ -767,7 +841,9 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
return wanted;
- } else if (likely(!(i->type & ITER_PIPE)))
+ } else if (unlikely(iov_iter_is_discard(i)))
+ return bytes;
+ else if (likely(!iov_iter_is_pipe(i)))
return copy_page_to_iter_iovec(page, offset, bytes, i);
else
return copy_page_to_iter_pipe(page, offset, bytes, i);
@@ -779,7 +855,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
{
if (unlikely(!page_copy_sane(page, offset, bytes)))
return 0;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return 0;
}
@@ -819,7 +895,7 @@ static size_t pipe_zero(size_t bytes, struct iov_iter *i)
size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
{
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return pipe_zero(bytes, i);
iterate_and_advance(i, bytes, v,
clear_user(v.iov_base, v.iov_len),
@@ -839,7 +915,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
kunmap_atomic(kaddr);
return 0;
}
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
kunmap_atomic(kaddr);
WARN_ON(1);
return 0;
@@ -903,10 +979,14 @@ static void pipe_advance(struct iov_iter *i, size_t size)
void iov_iter_advance(struct iov_iter *i, size_t size)
{
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
pipe_advance(i, size);
return;
}
+ if (unlikely(iov_iter_is_discard(i))) {
+ i->count -= size;
+ return;
+ }
iterate_and_advance(i, size, v, 0, 0, 0)
}
EXPORT_SYMBOL(iov_iter_advance);
@@ -918,7 +998,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
if (WARN_ON(unroll > MAX_RW_COUNT))
return;
i->count += unroll;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
struct pipe_inode_info *pipe = i->pipe;
int idx = i->idx;
size_t off = i->iov_offset;
@@ -942,12 +1022,14 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
pipe_truncate(i);
return;
}
+ if (unlikely(iov_iter_is_discard(i)))
+ return;
if (unroll <= i->iov_offset) {
i->iov_offset -= unroll;
return;
}
unroll -= i->iov_offset;
- if (i->type & ITER_BVEC) {
+ if (iov_iter_is_bvec(i)) {
const struct bio_vec *bvec = i->bvec;
while (1) {
size_t n = (--bvec)->bv_len;
@@ -980,23 +1062,25 @@ EXPORT_SYMBOL(iov_iter_revert);
*/
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return i->count; // it is a silly place, anyway
if (i->nr_segs == 1)
return i->count;
- else if (i->type & ITER_BVEC)
+ if (unlikely(iov_iter_is_discard(i)))
+ return i->count;
+ else if (iov_iter_is_bvec(i))
return min(i->count, i->bvec->bv_len - i->iov_offset);
else
return min(i->count, i->iov->iov_len - i->iov_offset);
}
EXPORT_SYMBOL(iov_iter_single_seg_count);
-void iov_iter_kvec(struct iov_iter *i, int direction,
+void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
const struct kvec *kvec, unsigned long nr_segs,
size_t count)
{
- BUG_ON(!(direction & ITER_KVEC));
- i->type = direction;
+ WARN_ON(direction & ~(READ | WRITE));
+ i->type = ITER_KVEC | (direction & (READ | WRITE));
i->kvec = kvec;
i->nr_segs = nr_segs;
i->iov_offset = 0;
@@ -1004,12 +1088,12 @@ void iov_iter_kvec(struct iov_iter *i, int direction,
}
EXPORT_SYMBOL(iov_iter_kvec);
-void iov_iter_bvec(struct iov_iter *i, int direction,
+void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
const struct bio_vec *bvec, unsigned long nr_segs,
size_t count)
{
- BUG_ON(!(direction & ITER_BVEC));
- i->type = direction;
+ WARN_ON(direction & ~(READ | WRITE));
+ i->type = ITER_BVEC | (direction & (READ | WRITE));
i->bvec = bvec;
i->nr_segs = nr_segs;
i->iov_offset = 0;
@@ -1017,13 +1101,13 @@ void iov_iter_bvec(struct iov_iter *i, int direction,
}
EXPORT_SYMBOL(iov_iter_bvec);
-void iov_iter_pipe(struct iov_iter *i, int direction,
+void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
struct pipe_inode_info *pipe,
size_t count)
{
- BUG_ON(direction != ITER_PIPE);
+ BUG_ON(direction != READ);
WARN_ON(pipe->nrbufs == pipe->buffers);
- i->type = direction;
+ i->type = ITER_PIPE | READ;
i->pipe = pipe;
i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
i->iov_offset = 0;
@@ -1032,12 +1116,30 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
}
EXPORT_SYMBOL(iov_iter_pipe);
+/**
+ * iov_iter_discard - Initialise an I/O iterator that discards data
+ * @i: The iterator to initialise.
+ * @direction: The direction of the transfer.
+ * @count: The size of the I/O buffer in bytes.
+ *
+ * Set up an I/O iterator that just discards everything that's written to it.
+ * It's only available as a READ iterator.
+ */
+void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
+{
+ BUG_ON(direction != READ);
+ i->type = ITER_DISCARD | READ;
+ i->count = count;
+ i->iov_offset = 0;
+}
+EXPORT_SYMBOL(iov_iter_discard);
+
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
unsigned long res = 0;
size_t size = i->count;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
return size | i->iov_offset;
return size;
@@ -1056,7 +1158,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
unsigned long res = 0;
size_t size = i->count;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return ~0U;
}
@@ -1124,8 +1226,11 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
if (maxsize > i->count)
maxsize = i->count;
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return pipe_get_pages(i, pages, maxsize, maxpages, start);
+ if (unlikely(iov_iter_is_discard(i)))
+ return -EFAULT;
+
iterate_all_kinds(i, maxsize, v, ({
unsigned long addr = (unsigned long)v.iov_base;
size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
@@ -1136,7 +1241,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
len = maxpages * PAGE_SIZE;
addr &= ~(PAGE_SIZE - 1);
n = DIV_ROUND_UP(len, PAGE_SIZE);
- res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
+ res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages);
if (unlikely(res < 0))
return res;
return (res == n ? len : res * PAGE_SIZE) - *start;
@@ -1201,8 +1306,11 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
if (maxsize > i->count)
maxsize = i->count;
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return pipe_get_pages_alloc(i, pages, maxsize, start);
+ if (unlikely(iov_iter_is_discard(i)))
+ return -EFAULT;
+
iterate_all_kinds(i, maxsize, v, ({
unsigned long addr = (unsigned long)v.iov_base;
size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
@@ -1214,7 +1322,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
p = get_pages_array(n);
if (!p)
return -ENOMEM;
- res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
+ res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p);
if (unlikely(res < 0)) {
kvfree(p);
return res;
@@ -1244,7 +1352,7 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
__wsum sum, next;
size_t off = 0;
sum = *csum;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return 0;
}
@@ -1286,7 +1394,7 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
__wsum sum, next;
size_t off = 0;
sum = *csum;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return false;
}
@@ -1331,7 +1439,7 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
__wsum sum, next;
size_t off = 0;
sum = *csum;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */
return 0;
}
@@ -1373,8 +1481,10 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
if (!size)
return 0;
+ if (unlikely(iov_iter_is_discard(i)))
+ return 0;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
struct pipe_inode_info *pipe = i->pipe;
size_t off;
int idx;
@@ -1412,11 +1522,13 @@ EXPORT_SYMBOL(iov_iter_npages);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
{
*new = *old;
- if (unlikely(new->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(new))) {
WARN_ON(1);
return NULL;
}
- if (new->type & ITER_BVEC)
+ if (unlikely(iov_iter_is_discard(new)))
+ return NULL;
+ if (iov_iter_is_bvec(new))
return new->bvec = kmemdup(new->bvec,
new->nr_segs * sizeof(struct bio_vec),
flags);
diff --git a/lib/klist.c b/lib/klist.c
index 0507fa5d84c5..f6b547812fe3 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *prev;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i)
prev = to_klist_node(prev->n_node.prev);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
@@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *next;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i)
next = to_klist_node(next->n_node.next);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
diff --git a/lib/kobject.c b/lib/kobject.c
index 18989b5b3b56..97d86dc17c42 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -35,6 +35,25 @@ const void *kobject_namespace(struct kobject *kobj)
return kobj->ktype->namespace(kobj);
}
+/**
+ * kobject_get_ownership - get sysfs ownership data for @kobj
+ * @kobj: kobject in question
+ * @uid: kernel user ID for sysfs objects
+ * @gid: kernel group ID for sysfs objects
+ *
+ * Returns initial uid/gid pair that should be used when creating sysfs
+ * representation of given kobject. Normally used to adjust ownership of
+ * objects in a container.
+ */
+void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ *uid = GLOBAL_ROOT_UID;
+ *gid = GLOBAL_ROOT_GID;
+
+ if (kobj->ktype->get_ownership)
+ kobj->ktype->get_ownership(kobj, uid, gid);
+}
+
/*
* populate_dir - populate directory with attributes.
* @kobj: object we're working on.
@@ -125,7 +144,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
int cur = strlen(kobject_name(parent));
/* back up enough to print this name with '/' */
length -= cur;
- strncpy(path + length, kobject_name(parent), cur);
+ memcpy(path + length, kobject_name(parent), cur);
*(path + --length) = '/';
}
@@ -868,9 +887,16 @@ static void kset_release(struct kobject *kobj)
kfree(kset);
}
+void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ if (kobj->parent)
+ kobject_get_ownership(kobj->parent, uid, gid);
+}
+
static struct kobj_type kset_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
- .release = kset_release,
+ .release = kset_release,
+ .get_ownership = kset_get_ownership,
};
/**
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 661a1e807bd1..1006bf70bf74 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -175,7 +175,7 @@ int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(unsigned long)tmp)
+ if (tmp != (unsigned long)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -191,7 +191,7 @@ int _kstrtol(const char *s, unsigned int base, long *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(long)tmp)
+ if (tmp != (long)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -222,7 +222,7 @@ int kstrtouint(const char *s, unsigned int base, unsigned int *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(unsigned int)tmp)
+ if (tmp != (unsigned int)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -253,7 +253,7 @@ int kstrtoint(const char *s, unsigned int base, int *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(int)tmp)
+ if (tmp != (int)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -268,7 +268,7 @@ int kstrtou16(const char *s, unsigned int base, u16 *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(u16)tmp)
+ if (tmp != (u16)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -283,7 +283,7 @@ int kstrtos16(const char *s, unsigned int base, s16 *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(s16)tmp)
+ if (tmp != (s16)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -298,7 +298,7 @@ int kstrtou8(const char *s, unsigned int base, u8 *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(u8)tmp)
+ if (tmp != (u8)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -313,7 +313,7 @@ int kstrtos8(const char *s, unsigned int base, s8 *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(s8)tmp)
+ if (tmp != (s8)tmp)
return -ERANGE;
*res = tmp;
return 0;
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index b5c1293ce147..1e1bbf171eca 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -29,7 +29,7 @@
*/
static unsigned int debug_locks_verbose;
-static DEFINE_WW_CLASS(ww_lockdep);
+static DEFINE_WD_CLASS(ww_lockdep);
static int __init setup_debug_locks_verbose(char *str)
{
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 141734d255e4..0c9d3ad17e0f 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -43,30 +43,36 @@
/*-*****************************
* Decompression functions
*******************************/
-/* LZ4_decompress_generic() :
- * This generic decompression function cover all use cases.
- * It shall be instantiated several times, using different sets of directives
- * Note that it is important this generic function is really inlined,
+
+#define DEBUGLOG(l, ...) {} /* disabled */
+
+#ifndef assert
+#define assert(condition) ((void)0)
+#endif
+
+/*
+ * LZ4_decompress_generic() :
+ * This generic decompression function covers all use cases.
+ * It shall be instantiated several times, using different sets of directives.
+ * Note that it is important for performance that this function really get inlined,
* in order to remove useless branches during compilation optimization.
*/
static FORCE_INLINE int LZ4_decompress_generic(
- const char * const source,
- char * const dest,
- int inputSize,
+ const char * const src,
+ char * const dst,
+ int srcSize,
/*
* If endOnInput == endOnInputSize,
- * this value is the max size of Output Buffer.
+ * this value is `dstCapacity`
*/
int outputSize,
/* endOnOutputSize, endOnInputSize */
- int endOnInput,
+ endCondition_directive endOnInput,
/* full, partial */
- int partialDecoding,
- /* only used if partialDecoding == partial */
- int targetOutputSize,
+ earlyEnd_directive partialDecoding,
/* noDict, withPrefix64k, usingExtDict */
- int dict,
- /* == dest when no prefix */
+ dict_directive dict,
+ /* always <= dst, == dst when no prefix */
const BYTE * const lowPrefix,
/* only if dict == usingExtDict */
const BYTE * const dictStart,
@@ -74,35 +80,43 @@ static FORCE_INLINE int LZ4_decompress_generic(
const size_t dictSize
)
{
- /* Local Variables */
- const BYTE *ip = (const BYTE *) source;
- const BYTE * const iend = ip + inputSize;
+ const BYTE *ip = (const BYTE *) src;
+ const BYTE * const iend = ip + srcSize;
- BYTE *op = (BYTE *) dest;
+ BYTE *op = (BYTE *) dst;
BYTE * const oend = op + outputSize;
BYTE *cpy;
- BYTE *oexit = op + targetOutputSize;
- const BYTE * const lowLimit = lowPrefix - dictSize;
const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
- static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
- static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
+ static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
+ static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
const int safeDecode = (endOnInput == endOnInputSize);
const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
+ /* Set up the "end" pointers for the shortcut. */
+ const BYTE *const shortiend = iend -
+ (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
+ const BYTE *const shortoend = oend -
+ (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
+
+ DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__,
+ srcSize, outputSize);
+
/* Special cases */
- /* targetOutputSize too high => decode everything */
- if ((partialDecoding) && (oexit > oend - MFLIMIT))
- oexit = oend - MFLIMIT;
+ assert(lowPrefix <= op);
+ assert(src != NULL);
/* Empty output buffer */
if ((endOnInput) && (unlikely(outputSize == 0)))
- return ((inputSize == 1) && (*ip == 0)) ? 0 : -1;
+ return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
if ((!endOnInput) && (unlikely(outputSize == 0)))
return (*ip == 0 ? 1 : -1);
+ if ((endOnInput) && unlikely(srcSize == 0))
+ return -1;
+
/* Main Loop : decode sequences */
while (1) {
size_t length;
@@ -111,12 +125,74 @@ static FORCE_INLINE int LZ4_decompress_generic(
/* get literal length */
unsigned int const token = *ip++;
-
length = token>>ML_BITS;
+ /* ip < iend before the increment */
+ assert(!endOnInput || ip <= iend);
+
+ /*
+ * A two-stage shortcut for the most common case:
+ * 1) If the literal length is 0..14, and there is enough
+ * space, enter the shortcut and copy 16 bytes on behalf
+ * of the literals (in the fast mode, only 8 bytes can be
+ * safely copied this way).
+ * 2) Further if the match length is 4..18, copy 18 bytes
+ * in a similar manner; but we ensure that there's enough
+ * space in the output for those 18 bytes earlier, upon
+ * entering the shortcut (in other words, there is a
+ * combined check for both stages).
+ */
+ if ((endOnInput ? length != RUN_MASK : length <= 8)
+ /*
+ * strictly "less than" on input, to re-enter
+ * the loop with at least one byte
+ */
+ && likely((endOnInput ? ip < shortiend : 1) &
+ (op <= shortoend))) {
+ /* Copy the literals */
+ memcpy(op, ip, endOnInput ? 16 : 8);
+ op += length; ip += length;
+
+ /*
+ * The second stage:
+ * prepare for match copying, decode full info.
+ * If it doesn't work out, the info won't be wasted.
+ */
+ length = token & ML_MASK; /* match length */
+ offset = LZ4_readLE16(ip);
+ ip += 2;
+ match = op - offset;
+ assert(match <= op); /* check overflow */
+
+ /* Do not deal with overlapping matches. */
+ if ((length != ML_MASK) &&
+ (offset >= 8) &&
+ (dict == withPrefix64k || match >= lowPrefix)) {
+ /* Copy the match. */
+ memcpy(op + 0, match + 0, 8);
+ memcpy(op + 8, match + 8, 8);
+ memcpy(op + 16, match + 16, 2);
+ op += length + MINMATCH;
+ /* Both stages worked, load the next token. */
+ continue;
+ }
+
+ /*
+ * The second stage didn't work out, but the info
+ * is ready. Propel it right to the point of match
+ * copying.
+ */
+ goto _copy_match;
+ }
+
+ /* decode literal length */
if (length == RUN_MASK) {
unsigned int s;
+ if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) {
+ /* overflow detection */
+ goto _output_error;
+ }
do {
s = *ip++;
length += s;
@@ -125,14 +201,14 @@ static FORCE_INLINE int LZ4_decompress_generic(
: 1) & (s == 255));
if ((safeDecode)
- && unlikely(
- (size_t)(op + length) < (size_t)(op))) {
+ && unlikely((uptrval)(op) +
+ length < (uptrval)(op))) {
/* overflow detection */
goto _output_error;
}
if ((safeDecode)
- && unlikely(
- (size_t)(ip + length) < (size_t)(ip))) {
+ && unlikely((uptrval)(ip) +
+ length < (uptrval)(ip))) {
/* overflow detection */
goto _output_error;
}
@@ -140,16 +216,19 @@ static FORCE_INLINE int LZ4_decompress_generic(
/* copy literals */
cpy = op + length;
- if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT))
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+
+ if (((endOnInput) && ((cpy > oend - MFLIMIT)
|| (ip + length > iend - (2 + 1 + LASTLITERALS))))
|| ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
if (partialDecoding) {
if (cpy > oend) {
/*
- * Error :
- * write attempt beyond end of output buffer
+ * Partial decoding :
+ * stop in the middle of literal segment
*/
- goto _output_error;
+ cpy = oend;
+ length = oend - op;
}
if ((endOnInput)
&& (ip + length > iend)) {
@@ -184,29 +263,43 @@ static FORCE_INLINE int LZ4_decompress_generic(
memcpy(op, ip, length);
ip += length;
op += length;
+
/* Necessarily EOF, due to parsing restrictions */
- break;
+ if (!partialDecoding || (cpy == oend))
+ break;
+ } else {
+ /* may overwrite up to WILDCOPYLENGTH beyond cpy */
+ LZ4_wildCopy(op, ip, cpy);
+ ip += length;
+ op = cpy;
}
- LZ4_wildCopy(op, ip, cpy);
- ip += length;
- op = cpy;
-
/* get offset */
offset = LZ4_readLE16(ip);
ip += 2;
match = op - offset;
- if ((checkOffset) && (unlikely(match < lowLimit))) {
+ /* get matchlength */
+ length = token & ML_MASK;
+
+_copy_match:
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
/* Error : offset outside buffers */
goto _output_error;
}
/* costs ~1%; silence an msan warning when offset == 0 */
- LZ4_write32(op, (U32)offset);
+ /*
+ * note : when partialDecoding, there is no guarantee that
+ * at least 4 bytes remain available in output buffer
+ */
+ if (!partialDecoding) {
+ assert(oend > op);
+ assert(oend - op >= 4);
+
+ LZ4_write32(op, (U32)offset);
+ }
- /* get matchlength */
- length = token & ML_MASK;
if (length == ML_MASK) {
unsigned int s;
@@ -221,7 +314,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
if ((safeDecode)
&& unlikely(
- (size_t)(op + length) < (size_t)op)) {
+ (uptrval)(op) + length < (uptrval)op)) {
/* overflow detection */
goto _output_error;
}
@@ -229,24 +322,26 @@ static FORCE_INLINE int LZ4_decompress_generic(
length += MINMATCH;
- /* check external dictionary */
+ /* match starting within external dictionary */
if ((dict == usingExtDict) && (match < lowPrefix)) {
if (unlikely(op + length > oend - LASTLITERALS)) {
/* doesn't respect parsing restriction */
- goto _output_error;
+ if (!partialDecoding)
+ goto _output_error;
+ length = min(length, (size_t)(oend - op));
}
if (length <= (size_t)(lowPrefix - match)) {
/*
- * match can be copied as a single segment
- * from external dictionary
+ * match fits entirely within external
+ * dictionary : just copy
*/
memmove(op, dictEnd - (lowPrefix - match),
length);
op += length;
} else {
/*
- * match encompass external
+ * match stretches into both external
* dictionary and current block
*/
size_t const copySize = (size_t)(lowPrefix - match);
@@ -254,7 +349,6 @@ static FORCE_INLINE int LZ4_decompress_generic(
memcpy(op, dictEnd - copySize, copySize);
op += copySize;
-
if (restSize > (size_t)(op - lowPrefix)) {
/* overlap copy */
BYTE * const endOfMatch = op + restSize;
@@ -267,23 +361,44 @@ static FORCE_INLINE int LZ4_decompress_generic(
op += restSize;
}
}
-
continue;
}
/* copy match within block */
cpy = op + length;
- if (unlikely(offset < 8)) {
- const int dec64 = dec64table[offset];
+ /*
+ * partialDecoding :
+ * may not respect endBlock parsing restrictions
+ */
+ assert(op <= oend);
+ if (partialDecoding &&
+ (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
+ size_t const mlen = min(length, (size_t)(oend - op));
+ const BYTE * const matchEnd = match + mlen;
+ BYTE * const copyEnd = op + mlen;
+
+ if (matchEnd > op) {
+ /* overlap copy */
+ while (op < copyEnd)
+ *op++ = *match++;
+ } else {
+ memcpy(op, match, mlen);
+ }
+ op = copyEnd;
+ if (op == oend)
+ break;
+ continue;
+ }
+ if (unlikely(offset < 8)) {
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
- match += dec32table[offset];
+ match += inc32table[offset];
memcpy(op + 4, match, 4);
- match -= dec64;
+ match -= dec64table[offset];
} else {
LZ4_copy8(op, match);
match += 8;
@@ -291,7 +406,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
op += 8;
- if (unlikely(cpy > oend - 12)) {
+ if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
if (cpy > oend - LASTLITERALS) {
@@ -307,60 +422,139 @@ static FORCE_INLINE int LZ4_decompress_generic(
match += oCopyLimit - op;
op = oCopyLimit;
}
-
while (op < cpy)
*op++ = *match++;
} else {
LZ4_copy8(op, match);
-
if (length > 16)
LZ4_wildCopy(op + 8, match + 8, cpy);
}
-
- op = cpy; /* correction */
+ op = cpy; /* wildcopy correction */
}
/* end of decoding */
if (endOnInput) {
/* Nb of output bytes decoded */
- return (int) (((char *)op) - dest);
+ return (int) (((char *)op) - dst);
} else {
/* Nb of input bytes read */
- return (int) (((const char *)ip) - source);
+ return (int) (((const char *)ip) - src);
}
/* Overflow error detected */
_output_error:
- return -1;
+ return (int) (-(((const char *)ip) - src)) - 1;
}
int LZ4_decompress_safe(const char *source, char *dest,
int compressedSize, int maxDecompressedSize)
{
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxDecompressedSize, endOnInputSize, full, 0,
- noDict, (BYTE *)dest, NULL, 0);
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxDecompressedSize,
+ endOnInputSize, decode_full_block,
+ noDict, (BYTE *)dest, NULL, 0);
}
-int LZ4_decompress_safe_partial(const char *source, char *dest,
- int compressedSize, int targetOutputSize, int maxDecompressedSize)
+int LZ4_decompress_safe_partial(const char *src, char *dst,
+ int compressedSize, int targetOutputSize, int dstCapacity)
{
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxDecompressedSize, endOnInputSize, partial,
- targetOutputSize, noDict, (BYTE *)dest, NULL, 0);
+ dstCapacity = min(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
+ endOnInputSize, partial_decode,
+ noDict, (BYTE *)dst, NULL, 0);
}
int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
{
return LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0, withPrefix64k,
- (BYTE *)(dest - 64 * KB), NULL, 64 * KB);
+ endOnOutputSize, decode_full_block,
+ withPrefix64k,
+ (BYTE *)dest - 64 * KB, NULL, 0);
+}
+
+/* ===== Instantiate a few more decoding cases, used more than once. ===== */
+
+int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
+ int compressedSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ withPrefix64k,
+ (BYTE *)dest - 64 * KB, NULL, 0);
+}
+
+static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
+ int compressedSize,
+ int maxOutputSize,
+ size_t prefixSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ noDict,
+ (BYTE *)dest - prefixSize, NULL, 0);
+}
+
+int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest,
+ (const BYTE *)dictStart, dictSize);
}
+static int LZ4_decompress_fast_extDict(const char *source, char *dest,
+ int originalSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ 0, originalSize,
+ endOnOutputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest,
+ (const BYTE *)dictStart, dictSize);
+}
+
+/*
+ * The "double dictionary" mode, for use with e.g. ring buffers: the first part
+ * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
+ * These routines are used only once, in LZ4_decompress_*_continue().
+ */
+static FORCE_INLINE
+int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ size_t prefixSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest - prefixSize,
+ (const BYTE *)dictStart, dictSize);
+}
+
+static FORCE_INLINE
+int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
+ int originalSize, size_t prefixSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ 0, originalSize,
+ endOnOutputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest - prefixSize,
+ (const BYTE *)dictStart, dictSize);
+}
+
+/* ===== streaming decompression functions ===== */
+
int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
const char *dictionary, int dictSize)
{
- LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *) LZ4_streamDecode;
+ LZ4_streamDecode_t_internal *lz4sd =
+ &LZ4_streamDecode->internal_donotuse;
lz4sd->prefixSize = (size_t) dictSize;
lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
@@ -382,35 +576,51 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
const char *source, char *dest, int compressedSize, int maxOutputSize)
{
- LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
+ LZ4_streamDecode_t_internal *lz4sd =
+ &LZ4_streamDecode->internal_donotuse;
int result;
- if (lz4sd->prefixEnd == (BYTE *)dest) {
- result = LZ4_decompress_generic(source, dest,
- compressedSize,
- maxOutputSize,
- endOnInputSize, full, 0,
- usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize,
- lz4sd->externalDict,
- lz4sd->extDictSize);
-
+ if (lz4sd->prefixSize == 0) {
+ /* The first call, no dictionary yet. */
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_safe(source, dest,
+ compressedSize, maxOutputSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
+ } else if (lz4sd->prefixEnd == (BYTE *)dest) {
+ /* They're rolling the current segment. */
+ if (lz4sd->prefixSize >= 64 * KB - 1)
+ result = LZ4_decompress_safe_withPrefix64k(source, dest,
+ compressedSize, maxOutputSize);
+ else if (lz4sd->extDictSize == 0)
+ result = LZ4_decompress_safe_withSmallPrefix(source,
+ dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize);
+ else
+ result = LZ4_decompress_safe_doubleDict(source, dest,
+ compressedSize, maxOutputSize,
+ lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
-
lz4sd->prefixSize += result;
- lz4sd->prefixEnd += result;
+ lz4sd->prefixEnd += result;
} else {
+ /*
+ * The buffer wraps around, or they're
+ * switching to another buffer.
+ */
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_generic(source, dest,
+ result = LZ4_decompress_safe_forceExtDict(source, dest,
compressedSize, maxOutputSize,
- endOnInputSize, full, 0,
- usingExtDict, (BYTE *)dest,
lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize = result;
- lz4sd->prefixEnd = (BYTE *)dest + result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
}
return result;
@@ -422,75 +632,66 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
int result;
- if (lz4sd->prefixEnd == (BYTE *)dest) {
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0,
- usingExtDict,
- lz4sd->prefixEnd - lz4sd->prefixSize,
- lz4sd->externalDict, lz4sd->extDictSize);
-
+ if (lz4sd->prefixSize == 0) {
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
+ } else if (lz4sd->prefixEnd == (BYTE *)dest) {
+ if (lz4sd->prefixSize >= 64 * KB - 1 ||
+ lz4sd->extDictSize == 0)
+ result = LZ4_decompress_fast(source, dest,
+ originalSize);
+ else
+ result = LZ4_decompress_fast_doubleDict(source, dest,
+ originalSize, lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
-
lz4sd->prefixSize += originalSize;
- lz4sd->prefixEnd += originalSize;
+ lz4sd->prefixEnd += originalSize;
} else {
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0,
- usingExtDict, (BYTE *)dest,
- lz4sd->externalDict, lz4sd->extDictSize);
+ result = LZ4_decompress_fast_extDict(source, dest,
+ originalSize, lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize = originalSize;
- lz4sd->prefixEnd = (BYTE *)dest + originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
}
-
return result;
}
-/*
- * Advanced decoding functions :
- * *_usingDict() :
- * These decoding functions work the same as "_continue" ones,
- * the dictionary must be explicitly provided within parameters
- */
-static FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source,
- char *dest, int compressedSize, int maxOutputSize, int safe,
- const char *dictStart, int dictSize)
+int LZ4_decompress_safe_usingDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const char *dictStart, int dictSize)
{
if (dictSize == 0)
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize, safe, full, 0,
- noDict, (BYTE *)dest, NULL, 0);
- if (dictStart + dictSize == dest) {
- if (dictSize >= (int)(64 * KB - 1))
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize, safe, full, 0,
- withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0);
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxOutputSize, safe, full, 0, noDict,
- (BYTE *)dest - dictSize, NULL, 0);
+ return LZ4_decompress_safe(source, dest,
+ compressedSize, maxOutputSize);
+ if (dictStart+dictSize == dest) {
+ if (dictSize >= 64 * KB - 1)
+ return LZ4_decompress_safe_withPrefix64k(source, dest,
+ compressedSize, maxOutputSize);
+ return LZ4_decompress_safe_withSmallPrefix(source, dest,
+ compressedSize, maxOutputSize, dictSize);
}
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxOutputSize, safe, full, 0, usingExtDict,
- (BYTE *)dest, (const BYTE *)dictStart, dictSize);
-}
-
-int LZ4_decompress_safe_usingDict(const char *source, char *dest,
- int compressedSize, int maxOutputSize,
- const char *dictStart, int dictSize)
-{
- return LZ4_decompress_usingDict_generic(source, dest,
- compressedSize, maxOutputSize, 1, dictStart, dictSize);
+ return LZ4_decompress_safe_forceExtDict(source, dest,
+ compressedSize, maxOutputSize, dictStart, dictSize);
}
int LZ4_decompress_fast_usingDict(const char *source, char *dest,
- int originalSize, const char *dictStart, int dictSize)
+ int originalSize,
+ const char *dictStart, int dictSize)
{
- return LZ4_decompress_usingDict_generic(source, dest, 0,
- originalSize, 0, dictStart, dictSize);
+ if (dictSize == 0 || dictStart + dictSize == dest)
+ return LZ4_decompress_fast(source, dest, originalSize);
+
+ return LZ4_decompress_fast_extDict(source, dest, originalSize,
+ dictStart, dictSize);
}
#ifndef STATIC
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index 00a0b58a0871..1a7fa9d9170f 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -75,6 +75,11 @@ typedef uintptr_t uptrval;
#define WILDCOPYLENGTH 8
#define LASTLITERALS 5
#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
+/*
+ * ensure it's possible to write 2 x wildcopyLength
+ * without overflowing output buffer
+ */
+#define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH)
/* Increase this value ==> compression run slower on incompressible data */
#define LZ4_SKIPTRIGGER 6
@@ -222,6 +227,8 @@ typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
-typedef enum { full = 0, partial = 1 } earlyEnd_directive;
+typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
+
+#define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c))
#endif
diff --git a/lib/memcat_p.c b/lib/memcat_p.c
new file mode 100644
index 000000000000..b810fbc66962
--- /dev/null
+++ b/lib/memcat_p.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/slab.h>
+
+/*
+ * Merge two NULL-terminated pointer arrays into a newly allocated
+ * array, which is also NULL-terminated. Nomenclature is inspired by
+ * memset_p() and memcat() found elsewhere in the kernel source tree.
+ */
+void **__memcat_p(void **a, void **b)
+{
+ void **p = a, **new;
+ int nr;
+
+ /* count the elements in both arrays */
+ for (nr = 0, p = a; *p; nr++, p++)
+ ;
+ for (p = b; *p; nr++, p++)
+ ;
+ /* one for the NULL-terminator */
+ nr++;
+
+ new = kmalloc_array(nr, sizeof(void *), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ /* nr -> last index; p points to NULL in b[] */
+ for (nr--; nr >= 0; nr--, p = p == b ? &a[nr] : p - 1)
+ new[nr] = *p;
+
+ return new;
+}
+EXPORT_SYMBOL_GPL(__memcat_p);
+
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 468fb7cd1221..a5c921e6d667 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -41,7 +41,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
mpi_ptr_t tspace = NULL;
mpi_ptr_t rp, ep, mp, bp;
mpi_size_t esize, msize, bsize, rsize;
- int esign, msign, bsign, rsign;
+ int msign, bsign, rsign;
mpi_size_t size;
int mod_shift_cnt;
int negative_result;
@@ -53,7 +53,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
esize = exp->nlimbs;
msize = mod->nlimbs;
size = 2 * msize;
- esign = exp->sign;
msign = mod->sign;
rp = res->d;
diff --git a/lib/nlattr.c b/lib/nlattr.c
index dfa55c873c13..d26de6156b97 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -45,12 +45,11 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
};
static int validate_nla_bitfield32(const struct nlattr *nla,
- u32 *valid_flags_allowed)
+ const u32 *valid_flags_mask)
{
const struct nla_bitfield32 *bf = nla_data(nla);
- u32 *valid_flags_mask = valid_flags_allowed;
- if (!valid_flags_allowed)
+ if (!valid_flags_mask)
return -EINVAL;
/*disallow invalid bit selector */
@@ -68,11 +67,99 @@ static int validate_nla_bitfield32(const struct nlattr *nla,
return 0;
}
+static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ const struct nlattr *entry;
+ int rem;
+
+ nla_for_each_attr(entry, head, len, rem) {
+ int ret;
+
+ if (nla_len(entry) == 0)
+ continue;
+
+ if (nla_len(entry) < NLA_HDRLEN) {
+ NL_SET_ERR_MSG_ATTR(extack, entry,
+ "Array element too short");
+ return -ERANGE;
+ }
+
+ ret = nla_validate(nla_data(entry), nla_len(entry),
+ maxtype, policy, extack);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nla_validate_int_range(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ bool validate_min, validate_max;
+ s64 value;
+
+ validate_min = pt->validation_type == NLA_VALIDATE_RANGE ||
+ pt->validation_type == NLA_VALIDATE_MIN;
+ validate_max = pt->validation_type == NLA_VALIDATE_RANGE ||
+ pt->validation_type == NLA_VALIDATE_MAX;
+
+ switch (pt->type) {
+ case NLA_U8:
+ value = nla_get_u8(nla);
+ break;
+ case NLA_U16:
+ value = nla_get_u16(nla);
+ break;
+ case NLA_U32:
+ value = nla_get_u32(nla);
+ break;
+ case NLA_S8:
+ value = nla_get_s8(nla);
+ break;
+ case NLA_S16:
+ value = nla_get_s16(nla);
+ break;
+ case NLA_S32:
+ value = nla_get_s32(nla);
+ break;
+ case NLA_S64:
+ value = nla_get_s64(nla);
+ break;
+ case NLA_U64:
+ /* treat this one specially, since it may not fit into s64 */
+ if ((validate_min && nla_get_u64(nla) < pt->min) ||
+ (validate_max && nla_get_u64(nla) > pt->max)) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "integer out of range");
+ return -ERANGE;
+ }
+ return 0;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if ((validate_min && value < pt->min) ||
+ (validate_max && value > pt->max)) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "integer out of range");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
static int validate_nla(const struct nlattr *nla, int maxtype,
- const struct nla_policy *policy)
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
const struct nla_policy *pt;
int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
+ int err = -ERANGE;
if (type <= 0 || type > maxtype)
return 0;
@@ -81,22 +168,40 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
BUG_ON(pt->type > NLA_TYPE_MAX);
- if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
+ if ((nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) ||
+ (pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) {
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
current->comm, type);
}
switch (pt->type) {
+ case NLA_EXACT_LEN:
+ if (attrlen != pt->len)
+ goto out_err;
+ break;
+
+ case NLA_REJECT:
+ if (extack && pt->validation_data) {
+ NL_SET_BAD_ATTR(extack, nla);
+ extack->_msg = pt->validation_data;
+ return -EINVAL;
+ }
+ err = -EINVAL;
+ goto out_err;
+
case NLA_FLAG:
if (attrlen > 0)
- return -ERANGE;
+ goto out_err;
break;
case NLA_BITFIELD32:
if (attrlen != sizeof(struct nla_bitfield32))
- return -ERANGE;
+ goto out_err;
- return validate_nla_bitfield32(nla, pt->validation_data);
+ err = validate_nla_bitfield32(nla, pt->validation_data);
+ if (err)
+ goto out_err;
+ break;
case NLA_NUL_STRING:
if (pt->len)
@@ -104,13 +209,15 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
else
minlen = attrlen;
- if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL)
- return -EINVAL;
+ if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) {
+ err = -EINVAL;
+ goto out_err;
+ }
/* fall through */
case NLA_STRING:
if (attrlen < 1)
- return -ERANGE;
+ goto out_err;
if (pt->len) {
char *buf = nla_data(nla);
@@ -119,32 +226,58 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
attrlen--;
if (attrlen > pt->len)
- return -ERANGE;
+ goto out_err;
}
break;
case NLA_BINARY:
if (pt->len && attrlen > pt->len)
- return -ERANGE;
+ goto out_err;
break;
- case NLA_NESTED_COMPAT:
- if (attrlen < pt->len)
- return -ERANGE;
- if (attrlen < NLA_ALIGN(pt->len))
- break;
- if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
- return -ERANGE;
- nla = nla_data(nla) + NLA_ALIGN(pt->len);
- if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
- return -ERANGE;
- break;
case NLA_NESTED:
/* a nested attributes is allowed to be empty; if its not,
* it must have a size of at least NLA_HDRLEN.
*/
if (attrlen == 0)
break;
+ if (attrlen < NLA_HDRLEN)
+ goto out_err;
+ if (pt->validation_data) {
+ err = nla_validate(nla_data(nla), nla_len(nla), pt->len,
+ pt->validation_data, extack);
+ if (err < 0) {
+ /*
+ * return directly to preserve the inner
+ * error message/attribute pointer
+ */
+ return err;
+ }
+ }
+ break;
+ case NLA_NESTED_ARRAY:
+ /* a nested array attribute is allowed to be empty; if its not,
+ * it must have a size of at least NLA_HDRLEN.
+ */
+ if (attrlen == 0)
+ break;
+ if (attrlen < NLA_HDRLEN)
+ goto out_err;
+ if (pt->validation_data) {
+ int err;
+
+ err = nla_validate_array(nla_data(nla), nla_len(nla),
+ pt->len, pt->validation_data,
+ extack);
+ if (err < 0) {
+ /*
+ * return directly to preserve the inner
+ * error message/attribute pointer
+ */
+ return err;
+ }
+ }
+ break;
default:
if (pt->len)
minlen = pt->len;
@@ -152,10 +285,34 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
minlen = nla_attr_minlen[pt->type];
if (attrlen < minlen)
- return -ERANGE;
+ goto out_err;
+ }
+
+ /* further validation */
+ switch (pt->validation_type) {
+ case NLA_VALIDATE_NONE:
+ /* nothing to do */
+ break;
+ case NLA_VALIDATE_RANGE:
+ case NLA_VALIDATE_MIN:
+ case NLA_VALIDATE_MAX:
+ err = nla_validate_int_range(pt, nla, extack);
+ if (err)
+ return err;
+ break;
+ case NLA_VALIDATE_FUNCTION:
+ if (pt->validate) {
+ err = pt->validate(nla, extack);
+ if (err)
+ return err;
+ }
+ break;
}
return 0;
+out_err:
+ NL_SET_ERR_MSG_ATTR(extack, nla, "Attribute failed policy validation");
+ return err;
}
/**
@@ -180,13 +337,10 @@ int nla_validate(const struct nlattr *head, int len, int maxtype,
int rem;
nla_for_each_attr(nla, head, len, rem) {
- int err = validate_nla(nla, maxtype, policy);
+ int err = validate_nla(nla, maxtype, policy, extack);
- if (err < 0) {
- if (extack)
- extack->bad_attr = nla;
+ if (err < 0)
return err;
- }
}
return 0;
@@ -237,42 +391,63 @@ EXPORT_SYMBOL(nla_policy_len);
*
* Returns 0 on success or a negative error code.
*/
-int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
- int len, const struct nla_policy *policy,
- struct netlink_ext_ack *extack)
+static int __nla_parse(struct nlattr **tb, int maxtype,
+ const struct nlattr *head, int len,
+ bool strict, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
const struct nlattr *nla;
- int rem, err;
+ int rem;
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
nla_for_each_attr(nla, head, len, rem) {
u16 type = nla_type(nla);
- if (type > 0 && type <= maxtype) {
- if (policy) {
- err = validate_nla(nla, maxtype, policy);
- if (err < 0) {
- if (extack)
- extack->bad_attr = nla;
- goto errout;
- }
+ if (type == 0 || type > maxtype) {
+ if (strict) {
+ NL_SET_ERR_MSG(extack, "Unknown attribute type");
+ return -EINVAL;
}
+ continue;
+ }
+ if (policy) {
+ int err = validate_nla(nla, maxtype, policy, extack);
- tb[type] = (struct nlattr *)nla;
+ if (err < 0)
+ return err;
}
+
+ tb[type] = (struct nlattr *)nla;
}
- if (unlikely(rem > 0))
+ if (unlikely(rem > 0)) {
pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
rem, current->comm);
+ NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes");
+ if (strict)
+ return -EINVAL;
+ }
- err = 0;
-errout:
- return err;
+ return 0;
+}
+
+int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+ int len, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, head, len, false, policy, extack);
}
EXPORT_SYMBOL(nla_parse);
+int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head,
+ int len, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, head, len, true, policy, extack);
+}
+EXPORT_SYMBOL(nla_parse_strict);
+
/**
* nla_find - Find a specific attribute in a stream of attributes
* @head: head of attribute stream
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 61a6b5aab07e..15ca78e1c7d4 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -87,11 +87,9 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
bool nmi_cpu_backtrace(struct pt_regs *regs)
{
- static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
- arch_spin_lock(&lock);
if (regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs));
@@ -102,7 +100,6 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else
dump_stack();
}
- arch_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}
diff --git a/lib/parser.c b/lib/parser.c
index 3278958b472a..dd70e5e6c9e2 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -131,13 +131,10 @@ static int match_number(substring_t *s, int *result, int base)
char *buf;
int ret;
long val;
- size_t len = s->to - s->from;
- buf = kmalloc(len + 1, GFP_KERNEL);
+ buf = match_strdup(s);
if (!buf)
return -ENOMEM;
- memcpy(buf, s->from, len);
- buf[len] = '\0';
ret = 0;
val = simple_strtol(buf, &endp, base);
@@ -166,13 +163,10 @@ static int match_u64int(substring_t *s, u64 *result, int base)
char *buf;
int ret;
u64 val;
- size_t len = s->to - s->from;
- buf = kmalloc(len + 1, GFP_KERNEL);
+ buf = match_strdup(s);
if (!buf)
return -ENOMEM;
- memcpy(buf, s->from, len);
- buf[len] = '\0';
ret = kstrtoull(buf, base, &val);
if (!ret)
@@ -327,10 +321,6 @@ EXPORT_SYMBOL(match_strlcpy);
*/
char *match_strdup(const substring_t *s)
{
- size_t sz = s->to - s->from + 1;
- char *p = kmalloc(sz, GFP_KERNEL);
- if (p)
- match_strlcpy(p, s, sz);
- return p;
+ return kmemdup_nul(s->from, s->to - s->from, GFP_KERNEL);
}
EXPORT_SYMBOL(match_strdup);
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 9f96fa7bc000..de10b8c0bff6 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -356,11 +356,35 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
*/
void percpu_ref_reinit(struct percpu_ref *ref)
{
+ WARN_ON_ONCE(!percpu_ref_is_zero(ref));
+
+ percpu_ref_resurrect(ref);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+
+/**
+ * percpu_ref_resurrect - modify a percpu refcount from dead to live
+ * @ref: perpcu_ref to resurrect
+ *
+ * Modify @ref so that it's in the same state as before percpu_ref_kill() was
+ * called. @ref must be dead but must not yet have exited.
+ *
+ * If @ref->release() frees @ref then the caller is responsible for
+ * guaranteeing that @ref->release() does not get called while this
+ * function is in progress.
+ *
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
+ */
+void percpu_ref_resurrect(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
unsigned long flags;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- WARN_ON_ONCE(!percpu_ref_is_zero(ref));
+ WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
+ WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
percpu_ref_get(ref);
@@ -368,4 +392,4 @@ void percpu_ref_reinit(struct percpu_ref *ref)
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
-EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+EXPORT_SYMBOL_GPL(percpu_ref_resurrect);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index c72577e472f2..a66595ba5543 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -4,7 +4,6 @@
*/
#include <linux/percpu_counter.h>
-#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/cpu.h>
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
deleted file mode 100644
index beb14839b41a..000000000000
--- a/lib/percpu_ida.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Percpu IDA library
- *
- * Copyright (C) 2013 Datera, Inc. Kent Overstreet
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/mm.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/bug.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/percpu.h>
-#include <linux/sched/signal.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/percpu_ida.h>
-
-struct percpu_ida_cpu {
- /*
- * Even though this is percpu, we need a lock for tag stealing by remote
- * CPUs:
- */
- spinlock_t lock;
-
- /* nr_free/freelist form a stack of free IDs */
- unsigned nr_free;
- unsigned freelist[];
-};
-
-static inline void move_tags(unsigned *dst, unsigned *dst_nr,
- unsigned *src, unsigned *src_nr,
- unsigned nr)
-{
- *src_nr -= nr;
- memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
- *dst_nr += nr;
-}
-
-/*
- * Try to steal tags from a remote cpu's percpu freelist.
- *
- * We first check how many percpu freelists have tags
- *
- * Then we iterate through the cpus until we find some tags - we don't attempt
- * to find the "best" cpu to steal from, to keep cacheline bouncing to a
- * minimum.
- */
-static inline void steal_tags(struct percpu_ida *pool,
- struct percpu_ida_cpu *tags)
-{
- unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
- struct percpu_ida_cpu *remote;
-
- for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
- cpus_have_tags; cpus_have_tags--) {
- cpu = cpumask_next(cpu, &pool->cpus_have_tags);
-
- if (cpu >= nr_cpu_ids) {
- cpu = cpumask_first(&pool->cpus_have_tags);
- if (cpu >= nr_cpu_ids)
- BUG();
- }
-
- pool->cpu_last_stolen = cpu;
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
-
- cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
-
- if (remote == tags)
- continue;
-
- spin_lock(&remote->lock);
-
- if (remote->nr_free) {
- memcpy(tags->freelist,
- remote->freelist,
- sizeof(unsigned) * remote->nr_free);
-
- tags->nr_free = remote->nr_free;
- remote->nr_free = 0;
- }
-
- spin_unlock(&remote->lock);
-
- if (tags->nr_free)
- break;
- }
-}
-
-/*
- * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
- * our percpu freelist:
- */
-static inline void alloc_global_tags(struct percpu_ida *pool,
- struct percpu_ida_cpu *tags)
-{
- move_tags(tags->freelist, &tags->nr_free,
- pool->freelist, &pool->nr_free,
- min(pool->nr_free, pool->percpu_batch_size));
-}
-
-/**
- * percpu_ida_alloc - allocate a tag
- * @pool: pool to allocate from
- * @state: task state for prepare_to_wait
- *
- * Returns a tag - an integer in the range [0..nr_tags) (passed to
- * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
- *
- * Safe to be called from interrupt context (assuming it isn't passed
- * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
- *
- * @gfp indicates whether or not to wait until a free id is available (it's not
- * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
- * however long it takes until another thread frees an id (same semantics as a
- * mempool).
- *
- * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
- */
-int percpu_ida_alloc(struct percpu_ida *pool, int state)
-{
- DEFINE_WAIT(wait);
- struct percpu_ida_cpu *tags;
- unsigned long flags;
- int tag = -ENOSPC;
-
- tags = raw_cpu_ptr(pool->tag_cpu);
- spin_lock_irqsave(&tags->lock, flags);
-
- /* Fastpath */
- if (likely(tags->nr_free)) {
- tag = tags->freelist[--tags->nr_free];
- spin_unlock_irqrestore(&tags->lock, flags);
- return tag;
- }
- spin_unlock_irqrestore(&tags->lock, flags);
-
- while (1) {
- spin_lock_irqsave(&pool->lock, flags);
- tags = this_cpu_ptr(pool->tag_cpu);
-
- /*
- * prepare_to_wait() must come before steal_tags(), in case
- * percpu_ida_free() on another cpu flips a bit in
- * cpus_have_tags
- *
- * global lock held and irqs disabled, don't need percpu lock
- */
- if (state != TASK_RUNNING)
- prepare_to_wait(&pool->wait, &wait, state);
-
- if (!tags->nr_free)
- alloc_global_tags(pool, tags);
- if (!tags->nr_free)
- steal_tags(pool, tags);
-
- if (tags->nr_free) {
- tag = tags->freelist[--tags->nr_free];
- if (tags->nr_free)
- cpumask_set_cpu(smp_processor_id(),
- &pool->cpus_have_tags);
- }
-
- spin_unlock_irqrestore(&pool->lock, flags);
-
- if (tag >= 0 || state == TASK_RUNNING)
- break;
-
- if (signal_pending_state(state, current)) {
- tag = -ERESTARTSYS;
- break;
- }
-
- schedule();
- }
- if (state != TASK_RUNNING)
- finish_wait(&pool->wait, &wait);
-
- return tag;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_alloc);
-
-/**
- * percpu_ida_free - free a tag
- * @pool: pool @tag was allocated from
- * @tag: a tag previously allocated with percpu_ida_alloc()
- *
- * Safe to be called from interrupt context.
- */
-void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
-{
- struct percpu_ida_cpu *tags;
- unsigned long flags;
- unsigned nr_free;
-
- BUG_ON(tag >= pool->nr_tags);
-
- tags = raw_cpu_ptr(pool->tag_cpu);
-
- spin_lock_irqsave(&tags->lock, flags);
- tags->freelist[tags->nr_free++] = tag;
-
- nr_free = tags->nr_free;
-
- if (nr_free == 1) {
- cpumask_set_cpu(smp_processor_id(),
- &pool->cpus_have_tags);
- wake_up(&pool->wait);
- }
- spin_unlock_irqrestore(&tags->lock, flags);
-
- if (nr_free == pool->percpu_max_size) {
- spin_lock_irqsave(&pool->lock, flags);
- spin_lock(&tags->lock);
-
- if (tags->nr_free == pool->percpu_max_size) {
- move_tags(pool->freelist, &pool->nr_free,
- tags->freelist, &tags->nr_free,
- pool->percpu_batch_size);
-
- wake_up(&pool->wait);
- }
- spin_unlock(&tags->lock);
- spin_unlock_irqrestore(&pool->lock, flags);
- }
-}
-EXPORT_SYMBOL_GPL(percpu_ida_free);
-
-/**
- * percpu_ida_destroy - release a tag pool's resources
- * @pool: pool to free
- *
- * Frees the resources allocated by percpu_ida_init().
- */
-void percpu_ida_destroy(struct percpu_ida *pool)
-{
- free_percpu(pool->tag_cpu);
- free_pages((unsigned long) pool->freelist,
- get_order(pool->nr_tags * sizeof(unsigned)));
-}
-EXPORT_SYMBOL_GPL(percpu_ida_destroy);
-
-/**
- * percpu_ida_init - initialize a percpu tag pool
- * @pool: pool to initialize
- * @nr_tags: number of tags that will be available for allocation
- *
- * Initializes @pool so that it can be used to allocate tags - integers in the
- * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
- * preallocated array of tag structures.
- *
- * Allocation is percpu, but sharding is limited by nr_tags - for best
- * performance, the workload should not span more cpus than nr_tags / 128.
- */
-int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
- unsigned long max_size, unsigned long batch_size)
-{
- unsigned i, cpu, order;
-
- memset(pool, 0, sizeof(*pool));
-
- init_waitqueue_head(&pool->wait);
- spin_lock_init(&pool->lock);
- pool->nr_tags = nr_tags;
- pool->percpu_max_size = max_size;
- pool->percpu_batch_size = batch_size;
-
- /* Guard against overflow */
- if (nr_tags > (unsigned) INT_MAX + 1) {
- pr_err("percpu_ida_init(): nr_tags too large\n");
- return -EINVAL;
- }
-
- order = get_order(nr_tags * sizeof(unsigned));
- pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
- if (!pool->freelist)
- return -ENOMEM;
-
- for (i = 0; i < nr_tags; i++)
- pool->freelist[i] = i;
-
- pool->nr_free = nr_tags;
-
- pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
- pool->percpu_max_size * sizeof(unsigned),
- sizeof(unsigned));
- if (!pool->tag_cpu)
- goto err;
-
- for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
-
- return 0;
-err:
- percpu_ida_destroy(pool);
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(__percpu_ida_init);
-
-/**
- * percpu_ida_for_each_free - iterate free ids of a pool
- * @pool: pool to iterate
- * @fn: interate callback function
- * @data: parameter for @fn
- *
- * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
- * ids might be missed, some might be iterated duplicated, and some might
- * be iterated and not free soon.
- */
-int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
- void *data)
-{
- unsigned long flags;
- struct percpu_ida_cpu *remote;
- unsigned cpu, i, err = 0;
-
- for_each_possible_cpu(cpu) {
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- spin_lock_irqsave(&remote->lock, flags);
- for (i = 0; i < remote->nr_free; i++) {
- err = fn(remote->freelist[i], data);
- if (err)
- break;
- }
- spin_unlock_irqrestore(&remote->lock, flags);
- if (err)
- goto out;
- }
-
- spin_lock_irqsave(&pool->lock, flags);
- for (i = 0; i < pool->nr_free; i++) {
- err = fn(pool->freelist[i], data);
- if (err)
- break;
- }
- spin_unlock_irqrestore(&pool->lock, flags);
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
-
-/**
- * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
- * @pool: pool related
- * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
- *
- * Note: this just returns a snapshot of free tags number.
- */
-unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
-{
- struct percpu_ida_cpu *remote;
- if (cpu == nr_cpu_ids)
- return pool->nr_free;
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- return remote->nr_free;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_free_tags);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index a9e41aed6de4..1106bb6aa01e 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -38,15 +38,13 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/xarray.h>
-/* Number of nodes in fully populated tree of given height */
-static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
-
/*
* Radix tree node cache.
*/
-static struct kmem_cache *radix_tree_node_cachep;
+struct kmem_cache *radix_tree_node_cachep;
/*
* The radix tree is variable-height, so an insert operation not only has
@@ -98,29 +96,12 @@ static inline void *node_to_entry(void *ptr)
return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
}
-#define RADIX_TREE_RETRY node_to_entry(NULL)
-
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/* Sibling slots point directly to another slot in the same node */
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
- void __rcu **ptr = node;
- return (parent->slots <= ptr) &&
- (ptr < parent->slots + RADIX_TREE_MAP_SIZE);
-}
-#else
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
- return false;
-}
-#endif
+#define RADIX_TREE_RETRY XA_RETRY_ENTRY
static inline unsigned long
get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
{
- return slot - parent->slots;
+ return parent ? slot - parent->slots : 0;
}
static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
@@ -129,24 +110,13 @@ static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
- if (radix_tree_is_internal_node(entry)) {
- if (is_sibling_entry(parent, entry)) {
- void __rcu **sibentry;
- sibentry = (void __rcu **) entry_to_node(entry);
- offset = get_slot_offset(parent, sibentry);
- entry = rcu_dereference_raw(*sibentry);
- }
- }
-#endif
-
*nodep = (void *)entry;
return offset;
}
static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
{
- return root->gfp_mask & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
+ return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
}
static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
@@ -169,32 +139,32 @@ static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
{
- root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
+ root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
}
static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
{
- root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
+ root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
}
static inline void root_tag_clear_all(struct radix_tree_root *root)
{
- root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1;
+ root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1);
}
static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
{
- return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT));
+ return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT));
}
static inline unsigned root_tags_get(const struct radix_tree_root *root)
{
- return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT;
+ return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT;
}
static inline bool is_idr(const struct radix_tree_root *root)
{
- return !!(root->gfp_mask & ROOT_IS_IDR);
+ return !!(root->xa_flags & ROOT_IS_IDR);
}
/*
@@ -254,7 +224,7 @@ radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
static unsigned int iter_offset(const struct radix_tree_iter *iter)
{
- return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK;
+ return iter->index & RADIX_TREE_MAP_MASK;
}
/*
@@ -277,99 +247,6 @@ static unsigned long next_index(unsigned long index,
return (index & ~node_maxindex(node)) + (offset << node->shift);
}
-#ifndef __KERNEL__
-static void dump_node(struct radix_tree_node *node, unsigned long index)
-{
- unsigned long i;
-
- pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
- node, node->offset, index, index | node_maxindex(node),
- node->parent,
- node->tags[0][0], node->tags[1][0], node->tags[2][0],
- node->shift, node->count, node->exceptional);
-
- for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
- unsigned long first = index | (i << node->shift);
- unsigned long last = first | ((1UL << node->shift) - 1);
- void *entry = node->slots[i];
- if (!entry)
- continue;
- if (entry == RADIX_TREE_RETRY) {
- pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
- i, first, last, node);
- } else if (!radix_tree_is_internal_node(entry)) {
- pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
- entry, i, first, last, node);
- } else if (is_sibling_entry(node, entry)) {
- pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
- entry, i, first, last, node,
- *(void **)entry_to_node(entry));
- } else {
- dump_node(entry_to_node(entry), first);
- }
- }
-}
-
-/* For debug */
-static void radix_tree_dump(struct radix_tree_root *root)
-{
- pr_debug("radix root: %p rnode %p tags %x\n",
- root, root->rnode,
- root->gfp_mask >> ROOT_TAG_SHIFT);
- if (!radix_tree_is_internal_node(root->rnode))
- return;
- dump_node(entry_to_node(root->rnode), 0);
-}
-
-static void dump_ida_node(void *entry, unsigned long index)
-{
- unsigned long i;
-
- if (!entry)
- return;
-
- if (radix_tree_is_internal_node(entry)) {
- struct radix_tree_node *node = entry_to_node(entry);
-
- pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
- node, node->offset, index * IDA_BITMAP_BITS,
- ((index | node_maxindex(node)) + 1) *
- IDA_BITMAP_BITS - 1,
- node->parent, node->tags[0][0], node->shift,
- node->count);
- for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
- dump_ida_node(node->slots[i],
- index | (i << node->shift));
- } else if (radix_tree_exceptional_entry(entry)) {
- pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n",
- entry, (int)(index & RADIX_TREE_MAP_MASK),
- index * IDA_BITMAP_BITS,
- index * IDA_BITMAP_BITS + BITS_PER_LONG -
- RADIX_TREE_EXCEPTIONAL_SHIFT,
- (unsigned long)entry >>
- RADIX_TREE_EXCEPTIONAL_SHIFT);
- } else {
- struct ida_bitmap *bitmap = entry;
-
- pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap,
- (int)(index & RADIX_TREE_MAP_MASK),
- index * IDA_BITMAP_BITS,
- (index + 1) * IDA_BITMAP_BITS - 1);
- for (i = 0; i < IDA_BITMAP_LONGS; i++)
- pr_cont(" %lx", bitmap->bitmap[i]);
- pr_cont("\n");
- }
-}
-
-static void ida_dump(struct ida *ida)
-{
- struct radix_tree_root *root = &ida->ida_rt;
- pr_debug("ida: %p node %p free %d\n", ida, root->rnode,
- root->gfp_mask >> ROOT_TAG_SHIFT);
- dump_ida_node(root->rnode, 0);
-}
-#endif
-
/*
* This assumes that the caller has performed appropriate preallocation, and
* that the caller has pinned this thread of control to the current CPU.
@@ -378,7 +255,7 @@ static struct radix_tree_node *
radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
struct radix_tree_root *root,
unsigned int shift, unsigned int offset,
- unsigned int count, unsigned int exceptional)
+ unsigned int count, unsigned int nr_values)
{
struct radix_tree_node *ret = NULL;
@@ -425,14 +302,14 @@ out:
ret->shift = shift;
ret->offset = offset;
ret->count = count;
- ret->exceptional = exceptional;
+ ret->nr_values = nr_values;
ret->parent = parent;
- ret->root = root;
+ ret->array = root;
}
return ret;
}
-static void radix_tree_node_rcu_free(struct rcu_head *head)
+void radix_tree_node_rcu_free(struct rcu_head *head)
{
struct radix_tree_node *node =
container_of(head, struct radix_tree_node, rcu_head);
@@ -530,77 +407,10 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/*
- * Preload with enough objects to ensure that we can split a single entry
- * of order @old_order into many entries of size @new_order
- */
-int radix_tree_split_preload(unsigned int old_order, unsigned int new_order,
- gfp_t gfp_mask)
-{
- unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT);
- unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) -
- (new_order / RADIX_TREE_MAP_SHIFT);
- unsigned nr = 0;
-
- WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
- BUG_ON(new_order >= old_order);
-
- while (layers--)
- nr = nr * RADIX_TREE_MAP_SIZE + 1;
- return __radix_tree_preload(gfp_mask, top * nr);
-}
-#endif
-
-/*
- * The same as function above, but preload number of nodes required to insert
- * (1 << order) continuous naturally-aligned elements.
- */
-int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
-{
- unsigned long nr_subtrees;
- int nr_nodes, subtree_height;
-
- /* Preloading doesn't help anything with this gfp mask, skip it */
- if (!gfpflags_allow_blocking(gfp_mask)) {
- preempt_disable();
- return 0;
- }
-
- /*
- * Calculate number and height of fully populated subtrees it takes to
- * store (1 << order) elements.
- */
- nr_subtrees = 1 << order;
- for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE;
- subtree_height++)
- nr_subtrees >>= RADIX_TREE_MAP_SHIFT;
-
- /*
- * The worst case is zero height tree with a single item at index 0 and
- * then inserting items starting at ULONG_MAX - (1 << order).
- *
- * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to
- * 0-index item.
- */
- nr_nodes = RADIX_TREE_MAX_PATH;
-
- /* Plus branch to fully populated subtrees. */
- nr_nodes += RADIX_TREE_MAX_PATH - subtree_height;
-
- /* Root node is shared. */
- nr_nodes--;
-
- /* Plus nodes required to build subtrees. */
- nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height];
-
- return __radix_tree_preload(gfp_mask, nr_nodes);
-}
-
static unsigned radix_tree_load_root(const struct radix_tree_root *root,
struct radix_tree_node **nodep, unsigned long *maxindex)
{
- struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
+ struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
*nodep = node;
@@ -629,7 +439,7 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
while (index > shift_maxindex(maxshift))
maxshift += RADIX_TREE_MAP_SHIFT;
- entry = rcu_dereference_raw(root->rnode);
+ entry = rcu_dereference_raw(root->xa_head);
if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
goto out;
@@ -656,9 +466,9 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
BUG_ON(shift > BITS_PER_LONG);
if (radix_tree_is_internal_node(entry)) {
entry_to_node(entry)->parent = node;
- } else if (radix_tree_exceptional_entry(entry)) {
- /* Moving an exceptional root->rnode to a node */
- node->exceptional = 1;
+ } else if (xa_is_value(entry)) {
+ /* Moving a value entry root->xa_head to a node */
+ node->nr_values = 1;
}
/*
* entry was already in the radix tree, so we do not need
@@ -666,7 +476,7 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
*/
node->slots[0] = (void __rcu *)entry;
entry = node_to_entry(node);
- rcu_assign_pointer(root->rnode, entry);
+ rcu_assign_pointer(root->xa_head, entry);
shift += RADIX_TREE_MAP_SHIFT;
} while (shift <= maxshift);
out:
@@ -677,13 +487,12 @@ out:
* radix_tree_shrink - shrink radix tree to minimum height
* @root radix tree root
*/
-static inline bool radix_tree_shrink(struct radix_tree_root *root,
- radix_tree_update_node_t update_node)
+static inline bool radix_tree_shrink(struct radix_tree_root *root)
{
bool shrunk = false;
for (;;) {
- struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
+ struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
struct radix_tree_node *child;
if (!radix_tree_is_internal_node(node))
@@ -692,15 +501,20 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
/*
* The candidate node has more than one child, or its child
- * is not at the leftmost slot, or the child is a multiorder
- * entry, we cannot shrink.
+ * is not at the leftmost slot, we cannot shrink.
*/
if (node->count != 1)
break;
child = rcu_dereference_raw(node->slots[0]);
if (!child)
break;
- if (!radix_tree_is_internal_node(child) && node->shift)
+
+ /*
+ * For an IDR, we must not shrink entry 0 into the root in
+ * case somebody calls idr_replace() with a pointer that
+ * appears to be an internal entry
+ */
+ if (!node->shift && is_idr(root))
break;
if (radix_tree_is_internal_node(child))
@@ -711,9 +525,9 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
* moving the node from one part of the tree to another: if it
* was safe to dereference the old pointer to it
* (node->slots[0]), it will be safe to dereference the new
- * one (root->rnode) as far as dependent read barriers go.
+ * one (root->xa_head) as far as dependent read barriers go.
*/
- root->rnode = (void __rcu *)child;
+ root->xa_head = (void __rcu *)child;
if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
root_tag_clear(root, IDR_FREE);
@@ -738,8 +552,6 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
node->count = 0;
if (!radix_tree_is_internal_node(child)) {
node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
- if (update_node)
- update_node(node);
}
WARN_ON_ONCE(!list_empty(&node->private_list));
@@ -751,8 +563,7 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
}
static bool delete_node(struct radix_tree_root *root,
- struct radix_tree_node *node,
- radix_tree_update_node_t update_node)
+ struct radix_tree_node *node)
{
bool deleted = false;
@@ -761,9 +572,8 @@ static bool delete_node(struct radix_tree_root *root,
if (node->count) {
if (node_to_entry(node) ==
- rcu_dereference_raw(root->rnode))
- deleted |= radix_tree_shrink(root,
- update_node);
+ rcu_dereference_raw(root->xa_head))
+ deleted |= radix_tree_shrink(root);
return deleted;
}
@@ -778,7 +588,7 @@ static bool delete_node(struct radix_tree_root *root,
*/
if (!is_idr(root))
root_tag_clear_all(root);
- root->rnode = NULL;
+ root->xa_head = NULL;
}
WARN_ON_ONCE(!list_empty(&node->private_list));
@@ -795,7 +605,6 @@ static bool delete_node(struct radix_tree_root *root,
* __radix_tree_create - create a slot in a radix tree
* @root: radix tree root
* @index: index key
- * @order: index occupies 2^order aligned slots
* @nodep: returns node
* @slotp: returns slot
*
@@ -803,36 +612,34 @@ static bool delete_node(struct radix_tree_root *root,
* at position @index in the radix tree @root.
*
* Until there is more than one item in the tree, no nodes are
- * allocated and @root->rnode is used as a direct slot instead of
+ * allocated and @root->xa_head is used as a direct slot instead of
* pointing to a node, in which case *@nodep will be NULL.
*
* Returns -ENOMEM, or 0 for success.
*/
-int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
- unsigned order, struct radix_tree_node **nodep,
- void __rcu ***slotp)
+static int __radix_tree_create(struct radix_tree_root *root,
+ unsigned long index, struct radix_tree_node **nodep,
+ void __rcu ***slotp)
{
struct radix_tree_node *node = NULL, *child;
- void __rcu **slot = (void __rcu **)&root->rnode;
+ void __rcu **slot = (void __rcu **)&root->xa_head;
unsigned long maxindex;
unsigned int shift, offset = 0;
- unsigned long max = index | ((1UL << order) - 1);
+ unsigned long max = index;
gfp_t gfp = root_gfp_mask(root);
shift = radix_tree_load_root(root, &child, &maxindex);
/* Make sure the tree is high enough. */
- if (order > 0 && max == ((1UL << order) - 1))
- max++;
if (max > maxindex) {
int error = radix_tree_extend(root, gfp, max, shift);
if (error < 0)
return error;
shift = error;
- child = rcu_dereference_raw(root->rnode);
+ child = rcu_dereference_raw(root->xa_head);
}
- while (shift > order) {
+ while (shift > 0) {
shift -= RADIX_TREE_MAP_SHIFT;
if (child == NULL) {
/* Have to add a child node. */
@@ -875,8 +682,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
for (;;) {
void *entry = rcu_dereference_raw(child->slots[offset]);
- if (radix_tree_is_internal_node(entry) &&
- !is_sibling_entry(child, entry)) {
+ if (xa_is_node(entry) && child->shift) {
child = entry_to_node(entry);
offset = 0;
continue;
@@ -894,96 +700,30 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
}
}
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-static inline int insert_entries(struct radix_tree_node *node,
- void __rcu **slot, void *item, unsigned order, bool replace)
-{
- struct radix_tree_node *child;
- unsigned i, n, tag, offset, tags = 0;
-
- if (node) {
- if (order > node->shift)
- n = 1 << (order - node->shift);
- else
- n = 1;
- offset = get_slot_offset(node, slot);
- } else {
- n = 1;
- offset = 0;
- }
-
- if (n > 1) {
- offset = offset & ~(n - 1);
- slot = &node->slots[offset];
- }
- child = node_to_entry(slot);
-
- for (i = 0; i < n; i++) {
- if (slot[i]) {
- if (replace) {
- node->count--;
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tag_get(node, tag, offset + i))
- tags |= 1 << tag;
- } else
- return -EEXIST;
- }
- }
-
- for (i = 0; i < n; i++) {
- struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
- if (i) {
- rcu_assign_pointer(slot[i], child);
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tags & (1 << tag))
- tag_clear(node, tag, offset + i);
- } else {
- rcu_assign_pointer(slot[i], item);
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tags & (1 << tag))
- tag_set(node, tag, offset);
- }
- if (radix_tree_is_internal_node(old) &&
- !is_sibling_entry(node, old) &&
- (old != RADIX_TREE_RETRY))
- radix_tree_free_nodes(old);
- if (radix_tree_exceptional_entry(old))
- node->exceptional--;
- }
- if (node) {
- node->count += n;
- if (radix_tree_exceptional_entry(item))
- node->exceptional += n;
- }
- return n;
-}
-#else
static inline int insert_entries(struct radix_tree_node *node,
- void __rcu **slot, void *item, unsigned order, bool replace)
+ void __rcu **slot, void *item, bool replace)
{
if (*slot)
return -EEXIST;
rcu_assign_pointer(*slot, item);
if (node) {
node->count++;
- if (radix_tree_exceptional_entry(item))
- node->exceptional++;
+ if (xa_is_value(item))
+ node->nr_values++;
}
return 1;
}
-#endif
/**
* __radix_tree_insert - insert into a radix tree
* @root: radix tree root
* @index: index key
- * @order: key covers the 2^order indices around index
* @item: item to insert
*
* Insert an item into the radix tree at position @index.
*/
-int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
- unsigned order, void *item)
+int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
+ void *item)
{
struct radix_tree_node *node;
void __rcu **slot;
@@ -991,11 +731,11 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
BUG_ON(radix_tree_is_internal_node(item));
- error = __radix_tree_create(root, index, order, &node, &slot);
+ error = __radix_tree_create(root, index, &node, &slot);
if (error)
return error;
- error = insert_entries(node, slot, item, order, false);
+ error = insert_entries(node, slot, item, false);
if (error < 0)
return error;
@@ -1010,7 +750,7 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
return 0;
}
-EXPORT_SYMBOL(__radix_tree_insert);
+EXPORT_SYMBOL(radix_tree_insert);
/**
* __radix_tree_lookup - lookup an item in a radix tree
@@ -1023,7 +763,7 @@ EXPORT_SYMBOL(__radix_tree_insert);
* tree @root.
*
* Until there is more than one item in the tree, no nodes are
- * allocated and @root->rnode is used as a direct slot instead of
+ * allocated and @root->xa_head is used as a direct slot instead of
* pointing to a node, in which case *@nodep will be NULL.
*/
void *__radix_tree_lookup(const struct radix_tree_root *root,
@@ -1036,7 +776,7 @@ void *__radix_tree_lookup(const struct radix_tree_root *root,
restart:
parent = NULL;
- slot = (void __rcu **)&root->rnode;
+ slot = (void __rcu **)&root->xa_head;
radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex)
return NULL;
@@ -1049,6 +789,8 @@ void *__radix_tree_lookup(const struct radix_tree_root *root,
parent = entry_to_node(node);
offset = radix_tree_descend(parent, &node, index);
slot = parent->slots + offset;
+ if (parent->shift == 0)
+ break;
}
if (nodep)
@@ -1100,36 +842,12 @@ void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
}
EXPORT_SYMBOL(radix_tree_lookup);
-static inline void replace_sibling_entries(struct radix_tree_node *node,
- void __rcu **slot, int count, int exceptional)
-{
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
- void *ptr = node_to_entry(slot);
- unsigned offset = get_slot_offset(node, slot) + 1;
-
- while (offset < RADIX_TREE_MAP_SIZE) {
- if (rcu_dereference_raw(node->slots[offset]) != ptr)
- break;
- if (count < 0) {
- node->slots[offset] = NULL;
- node->count--;
- }
- node->exceptional += exceptional;
- offset++;
- }
-#endif
-}
-
static void replace_slot(void __rcu **slot, void *item,
- struct radix_tree_node *node, int count, int exceptional)
+ struct radix_tree_node *node, int count, int values)
{
- if (WARN_ON_ONCE(radix_tree_is_internal_node(item)))
- return;
-
- if (node && (count || exceptional)) {
+ if (node && (count || values)) {
node->count += count;
- node->exceptional += exceptional;
- replace_sibling_entries(node, slot, count, exceptional);
+ node->nr_values += values;
}
rcu_assign_pointer(*slot, item);
@@ -1172,37 +890,31 @@ static int calculate_count(struct radix_tree_root *root,
* @node: pointer to tree node
* @slot: pointer to slot in @node
* @item: new item to store in the slot.
- * @update_node: callback for changing leaf nodes
*
* For use with __radix_tree_lookup(). Caller must hold tree write locked
* across slot lookup and replacement.
*/
void __radix_tree_replace(struct radix_tree_root *root,
struct radix_tree_node *node,
- void __rcu **slot, void *item,
- radix_tree_update_node_t update_node)
+ void __rcu **slot, void *item)
{
void *old = rcu_dereference_raw(*slot);
- int exceptional = !!radix_tree_exceptional_entry(item) -
- !!radix_tree_exceptional_entry(old);
+ int values = !!xa_is_value(item) - !!xa_is_value(old);
int count = calculate_count(root, node, slot, item, old);
/*
- * This function supports replacing exceptional entries and
+ * This function supports replacing value entries and
* deleting entries, but that needs accounting against the
- * node unless the slot is root->rnode.
+ * node unless the slot is root->xa_head.
*/
- WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->rnode) &&
- (count || exceptional));
- replace_slot(slot, item, node, count, exceptional);
+ WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
+ (count || values));
+ replace_slot(slot, item, node, count, values);
if (!node)
return;
- if (update_node)
- update_node(node);
-
- delete_node(root, node, update_node);
+ delete_node(root, node);
}
/**
@@ -1211,12 +923,12 @@ void __radix_tree_replace(struct radix_tree_root *root,
* @slot: pointer to slot
* @item: new item to store in the slot.
*
- * For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(),
+ * For use with radix_tree_lookup_slot() and
* radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked
* across slot lookup and replacement.
*
* NOTE: This cannot be used to switch between non-entries (empty slots),
- * regular entries, and exceptional entries, as that requires accounting
+ * regular entries, and value entries, as that requires accounting
* inside the radix tree node. When switching from one type of entry or
* deleting, use __radix_tree_lookup() and __radix_tree_replace() or
* radix_tree_iter_replace().
@@ -1224,7 +936,7 @@ void __radix_tree_replace(struct radix_tree_root *root,
void radix_tree_replace_slot(struct radix_tree_root *root,
void __rcu **slot, void *item)
{
- __radix_tree_replace(root, NULL, slot, item, NULL);
+ __radix_tree_replace(root, NULL, slot, item);
}
EXPORT_SYMBOL(radix_tree_replace_slot);
@@ -1234,161 +946,15 @@ EXPORT_SYMBOL(radix_tree_replace_slot);
* @slot: pointer to slot
* @item: new item to store in the slot.
*
- * For use with radix_tree_split() and radix_tree_for_each_slot().
- * Caller must hold tree write locked across split and replacement.
+ * For use with radix_tree_for_each_slot().
+ * Caller must hold tree write locked.
*/
void radix_tree_iter_replace(struct radix_tree_root *root,
const struct radix_tree_iter *iter,
void __rcu **slot, void *item)
{
- __radix_tree_replace(root, iter->node, slot, item, NULL);
-}
-
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/**
- * radix_tree_join - replace multiple entries with one multiorder entry
- * @root: radix tree root
- * @index: an index inside the new entry
- * @order: order of the new entry
- * @item: new entry
- *
- * Call this function to replace several entries with one larger entry.
- * The existing entries are presumed to not need freeing as a result of
- * this call.
- *
- * The replacement entry will have all the tags set on it that were set
- * on any of the entries it is replacing.
- */
-int radix_tree_join(struct radix_tree_root *root, unsigned long index,
- unsigned order, void *item)
-{
- struct radix_tree_node *node;
- void __rcu **slot;
- int error;
-
- BUG_ON(radix_tree_is_internal_node(item));
-
- error = __radix_tree_create(root, index, order, &node, &slot);
- if (!error)
- error = insert_entries(node, slot, item, order, true);
- if (error > 0)
- error = 0;
-
- return error;
-}
-
-/**
- * radix_tree_split - Split an entry into smaller entries
- * @root: radix tree root
- * @index: An index within the large entry
- * @order: Order of new entries
- *
- * Call this function as the first step in replacing a multiorder entry
- * with several entries of lower order. After this function returns,
- * loop over the relevant portion of the tree using radix_tree_for_each_slot()
- * and call radix_tree_iter_replace() to set up each new entry.
- *
- * The tags from this entry are replicated to all the new entries.
- *
- * The radix tree should be locked against modification during the entire
- * replacement operation. Lock-free lookups will see RADIX_TREE_RETRY which
- * should prompt RCU walkers to restart the lookup from the root.
- */
-int radix_tree_split(struct radix_tree_root *root, unsigned long index,
- unsigned order)
-{
- struct radix_tree_node *parent, *node, *child;
- void __rcu **slot;
- unsigned int offset, end;
- unsigned n, tag, tags = 0;
- gfp_t gfp = root_gfp_mask(root);
-
- if (!__radix_tree_lookup(root, index, &parent, &slot))
- return -ENOENT;
- if (!parent)
- return -ENOENT;
-
- offset = get_slot_offset(parent, slot);
-
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tag_get(parent, tag, offset))
- tags |= 1 << tag;
-
- for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
- if (!is_sibling_entry(parent,
- rcu_dereference_raw(parent->slots[end])))
- break;
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tags & (1 << tag))
- tag_set(parent, tag, end);
- /* rcu_assign_pointer ensures tags are set before RETRY */
- rcu_assign_pointer(parent->slots[end], RADIX_TREE_RETRY);
- }
- rcu_assign_pointer(parent->slots[offset], RADIX_TREE_RETRY);
- parent->exceptional -= (end - offset);
-
- if (order == parent->shift)
- return 0;
- if (order > parent->shift) {
- while (offset < end)
- offset += insert_entries(parent, &parent->slots[offset],
- RADIX_TREE_RETRY, order, true);
- return 0;
- }
-
- node = parent;
-
- for (;;) {
- if (node->shift > order) {
- child = radix_tree_node_alloc(gfp, node, root,
- node->shift - RADIX_TREE_MAP_SHIFT,
- offset, 0, 0);
- if (!child)
- goto nomem;
- if (node != parent) {
- node->count++;
- rcu_assign_pointer(node->slots[offset],
- node_to_entry(child));
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tags & (1 << tag))
- tag_set(node, tag, offset);
- }
-
- node = child;
- offset = 0;
- continue;
- }
-
- n = insert_entries(node, &node->slots[offset],
- RADIX_TREE_RETRY, order, false);
- BUG_ON(n > RADIX_TREE_MAP_SIZE);
-
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tags & (1 << tag))
- tag_set(node, tag, offset);
- offset += n;
-
- while (offset == RADIX_TREE_MAP_SIZE) {
- if (node == parent)
- break;
- offset = node->offset;
- child = node;
- node = node->parent;
- rcu_assign_pointer(node->slots[offset],
- node_to_entry(child));
- offset++;
- }
- if ((node == parent) && (offset == end))
- return 0;
- }
-
- nomem:
- /* Shouldn't happen; did user forget to preload? */
- /* TODO: free all the allocated nodes */
- WARN_ON(1);
- return -ENOMEM;
+ __radix_tree_replace(root, iter->node, slot, item);
}
-#endif
static void node_tag_set(struct radix_tree_root *root,
struct radix_tree_node *node,
@@ -1447,18 +1013,6 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
}
EXPORT_SYMBOL(radix_tree_tag_set);
-/**
- * radix_tree_iter_tag_set - set a tag on the current iterator entry
- * @root: radix tree root
- * @iter: iterator state
- * @tag: tag to set
- */
-void radix_tree_iter_tag_set(struct radix_tree_root *root,
- const struct radix_tree_iter *iter, unsigned int tag)
-{
- node_tag_set(root, iter->node, tag, iter_offset(iter));
-}
-
static void node_tag_clear(struct radix_tree_root *root,
struct radix_tree_node *node,
unsigned int tag, unsigned int offset)
@@ -1574,14 +1128,6 @@ int radix_tree_tag_get(const struct radix_tree_root *root,
}
EXPORT_SYMBOL(radix_tree_tag_get);
-static inline void __set_iter_shift(struct radix_tree_iter *iter,
- unsigned int shift)
-{
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
- iter->shift = shift;
-#endif
-}
-
/* Construct iter->tags bit-mask from node->tags[tag] array */
static void set_iter_tags(struct radix_tree_iter *iter,
struct radix_tree_node *node, unsigned offset,
@@ -1608,92 +1154,11 @@ static void set_iter_tags(struct radix_tree_iter *iter,
}
}
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-static void __rcu **skip_siblings(struct radix_tree_node **nodep,
- void __rcu **slot, struct radix_tree_iter *iter)
-{
- while (iter->index < iter->next_index) {
- *nodep = rcu_dereference_raw(*slot);
- if (*nodep && !is_sibling_entry(iter->node, *nodep))
- return slot;
- slot++;
- iter->index = __radix_tree_iter_add(iter, 1);
- iter->tags >>= 1;
- }
-
- *nodep = NULL;
- return NULL;
-}
-
-void __rcu **__radix_tree_next_slot(void __rcu **slot,
- struct radix_tree_iter *iter, unsigned flags)
-{
- unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
- struct radix_tree_node *node;
-
- slot = skip_siblings(&node, slot, iter);
-
- while (radix_tree_is_internal_node(node)) {
- unsigned offset;
- unsigned long next_index;
-
- if (node == RADIX_TREE_RETRY)
- return slot;
- node = entry_to_node(node);
- iter->node = node;
- iter->shift = node->shift;
-
- if (flags & RADIX_TREE_ITER_TAGGED) {
- offset = radix_tree_find_next_bit(node, tag, 0);
- if (offset == RADIX_TREE_MAP_SIZE)
- return NULL;
- slot = &node->slots[offset];
- iter->index = __radix_tree_iter_add(iter, offset);
- set_iter_tags(iter, node, offset, tag);
- node = rcu_dereference_raw(*slot);
- } else {
- offset = 0;
- slot = &node->slots[0];
- for (;;) {
- node = rcu_dereference_raw(*slot);
- if (node)
- break;
- slot++;
- offset++;
- if (offset == RADIX_TREE_MAP_SIZE)
- return NULL;
- }
- iter->index = __radix_tree_iter_add(iter, offset);
- }
- if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0))
- goto none;
- next_index = (iter->index | shift_maxindex(iter->shift)) + 1;
- if (next_index < iter->next_index)
- iter->next_index = next_index;
- }
-
- return slot;
- none:
- iter->next_index = 0;
- return NULL;
-}
-EXPORT_SYMBOL(__radix_tree_next_slot);
-#else
-static void __rcu **skip_siblings(struct radix_tree_node **nodep,
- void __rcu **slot, struct radix_tree_iter *iter)
-{
- return slot;
-}
-#endif
-
void __rcu **radix_tree_iter_resume(void __rcu **slot,
struct radix_tree_iter *iter)
{
- struct radix_tree_node *node;
-
slot++;
iter->index = __radix_tree_iter_add(iter, 1);
- skip_siblings(&node, slot, iter);
iter->next_index = iter->index;
iter->tags = 0;
return NULL;
@@ -1744,8 +1209,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
iter->next_index = maxindex + 1;
iter->tags = 1;
iter->node = NULL;
- __set_iter_shift(iter, 0);
- return (void __rcu **)&root->rnode;
+ return (void __rcu **)&root->xa_head;
}
do {
@@ -1765,8 +1229,6 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
while (++offset < RADIX_TREE_MAP_SIZE) {
void *slot = rcu_dereference_raw(
node->slots[offset]);
- if (is_sibling_entry(node, slot))
- continue;
if (slot)
break;
}
@@ -1784,13 +1246,12 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
goto restart;
if (child == RADIX_TREE_RETRY)
break;
- } while (radix_tree_is_internal_node(child));
+ } while (node->shift && radix_tree_is_internal_node(child));
/* Update the iterator state */
- iter->index = (index &~ node_maxindex(node)) | (offset << node->shift);
+ iter->index = (index &~ node_maxindex(node)) | offset;
iter->next_index = (index | node_maxindex(node)) + 1;
iter->node = node;
- __set_iter_shift(iter, node->shift);
if (flags & RADIX_TREE_ITER_TAGGED)
set_iter_tags(iter, node, offset, tag);
@@ -1847,48 +1308,6 @@ radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
EXPORT_SYMBOL(radix_tree_gang_lookup);
/**
- * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
- * @root: radix tree root
- * @results: where the results of the lookup are placed
- * @indices: where their indices should be placed (but usually NULL)
- * @first_index: start the lookup from this key
- * @max_items: place up to this many items at *results
- *
- * Performs an index-ascending scan of the tree for present items. Places
- * their slots at *@results and returns the number of items which were
- * placed at *@results.
- *
- * The implementation is naive.
- *
- * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
- * be dereferenced with radix_tree_deref_slot, and if using only RCU
- * protection, radix_tree_deref_slot may fail requiring a retry.
- */
-unsigned int
-radix_tree_gang_lookup_slot(const struct radix_tree_root *root,
- void __rcu ***results, unsigned long *indices,
- unsigned long first_index, unsigned int max_items)
-{
- struct radix_tree_iter iter;
- void __rcu **slot;
- unsigned int ret = 0;
-
- if (unlikely(!max_items))
- return 0;
-
- radix_tree_for_each_slot(slot, root, &iter, first_index) {
- results[ret] = slot;
- if (indices)
- indices[ret] = iter.index;
- if (++ret == max_items)
- break;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
-
-/**
* radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
* based on a tag
* @root: radix tree root
@@ -1964,28 +1383,11 @@ radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
}
EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
-/**
- * __radix_tree_delete_node - try to free node after clearing a slot
- * @root: radix tree root
- * @node: node containing @index
- * @update_node: callback for changing leaf nodes
- *
- * After clearing the slot at @index in @node from radix tree
- * rooted at @root, call this function to attempt freeing the
- * node and shrinking the tree.
- */
-void __radix_tree_delete_node(struct radix_tree_root *root,
- struct radix_tree_node *node,
- radix_tree_update_node_t update_node)
-{
- delete_node(root, node, update_node);
-}
-
static bool __radix_tree_delete(struct radix_tree_root *root,
struct radix_tree_node *node, void __rcu **slot)
{
void *old = rcu_dereference_raw(*slot);
- int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0;
+ int values = xa_is_value(old) ? -1 : 0;
unsigned offset = get_slot_offset(node, slot);
int tag;
@@ -1995,8 +1397,8 @@ static bool __radix_tree_delete(struct radix_tree_root *root,
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
node_tag_clear(root, node, tag, offset);
- replace_slot(slot, NULL, node, -1, exceptional);
- return node && delete_node(root, node, NULL);
+ replace_slot(slot, NULL, node, -1, values);
+ return node && delete_node(root, node);
}
/**
@@ -2068,19 +1470,6 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
}
EXPORT_SYMBOL(radix_tree_delete);
-void radix_tree_clear_tags(struct radix_tree_root *root,
- struct radix_tree_node *node,
- void __rcu **slot)
-{
- if (node) {
- unsigned int tag, offset = get_slot_offset(node, slot);
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- node_tag_clear(root, node, tag, offset);
- } else {
- root_tag_clear_all(root);
- }
-}
-
/**
* radix_tree_tagged - test whether any items in the tree are tagged
* @root: radix tree root
@@ -2106,42 +1495,12 @@ void idr_preload(gfp_t gfp_mask)
}
EXPORT_SYMBOL(idr_preload);
-/**
- * ida_pre_get - reserve resources for ida allocation
- * @ida: ida handle
- * @gfp: memory allocation flags
- *
- * This function should be called before calling ida_get_new_above(). If it
- * is unable to allocate memory, it will return %0. On success, it returns %1.
- */
-int ida_pre_get(struct ida *ida, gfp_t gfp)
-{
- /*
- * The IDA API has no preload_end() equivalent. Instead,
- * ida_get_new() can return -EAGAIN, prompting the caller
- * to return to the ida_pre_get() step.
- */
- if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
- preempt_enable();
-
- if (!this_cpu_read(ida_bitmap)) {
- struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp);
- if (!bitmap)
- return 0;
- if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
- kfree(bitmap);
- }
-
- return 1;
-}
-EXPORT_SYMBOL(ida_pre_get);
-
void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max)
{
struct radix_tree_node *node = NULL, *child;
- void __rcu **slot = (void __rcu **)&root->rnode;
+ void __rcu **slot = (void __rcu **)&root->xa_head;
unsigned long maxindex, start = iter->next_index;
unsigned int shift, offset = 0;
@@ -2157,8 +1516,10 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
if (error < 0)
return ERR_PTR(error);
shift = error;
- child = rcu_dereference_raw(root->rnode);
+ child = rcu_dereference_raw(root->xa_head);
}
+ if (start == 0 && shift == 0)
+ shift = RADIX_TREE_MAP_SHIFT;
while (shift) {
shift -= RADIX_TREE_MAP_SHIFT;
@@ -2201,7 +1562,6 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
else
iter->next_index = 1;
iter->node = node;
- __set_iter_shift(iter, shift);
set_iter_tags(iter, node, offset, IDR_FREE);
return slot;
@@ -2220,10 +1580,10 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
*/
void idr_destroy(struct idr *idr)
{
- struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode);
+ struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head);
if (radix_tree_is_internal_node(node))
radix_tree_free_nodes(node);
- idr->idr_rt.rnode = NULL;
+ idr->idr_rt.xa_head = NULL;
root_tag_set(&idr->idr_rt, IDR_FREE);
}
EXPORT_SYMBOL(idr_destroy);
@@ -2237,31 +1597,6 @@ radix_tree_node_ctor(void *arg)
INIT_LIST_HEAD(&node->private_list);
}
-static __init unsigned long __maxindex(unsigned int height)
-{
- unsigned int width = height * RADIX_TREE_MAP_SHIFT;
- int shift = RADIX_TREE_INDEX_BITS - width;
-
- if (shift < 0)
- return ~0UL;
- if (shift >= BITS_PER_LONG)
- return 0UL;
- return ~0UL >> shift;
-}
-
-static __init void radix_tree_init_maxnodes(void)
-{
- unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1];
- unsigned int i, j;
-
- for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++)
- height_to_maxindex[i] = __maxindex(i);
- for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) {
- for (j = i; j > 0; j--)
- height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1;
- }
-}
-
static int radix_tree_cpu_dead(unsigned int cpu)
{
struct radix_tree_preload *rtp;
@@ -2275,8 +1610,6 @@ static int radix_tree_cpu_dead(unsigned int cpu)
kmem_cache_free(radix_tree_node_cachep, node);
rtp->nr--;
}
- kfree(per_cpu(ida_bitmap, cpu));
- per_cpu(ida_bitmap, cpu) = NULL;
return 0;
}
@@ -2286,11 +1619,11 @@ void __init radix_tree_init(void)
BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
+ BUILD_BUG_ON(XA_CHUNK_SIZE > 255);
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
radix_tree_node_ctor);
- radix_tree_init_maxnodes();
ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
NULL, radix_tree_cpu_dead);
WARN_ON(ret < 0);
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index 140fa8bb5c23..914ebe98fc21 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -55,22 +55,24 @@ static inline void XOR(int x, int y, int z)
asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
}
-static inline void LOAD_DATA(int x, int n, u8 *ptr)
+static inline void LOAD_DATA(int x, u8 *ptr)
{
- typedef struct { u8 _[16*n]; } addrtype;
+ typedef struct { u8 _[16 * $#]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
asm volatile ("VLM %2,%3,0,%r1"
- : : "m" (*__ptr), "a" (__ptr), "i" (x), "i" (x + n - 1));
+ : : "m" (*__ptr), "a" (__ptr), "i" (x),
+ "i" (x + $# - 1));
}
-static inline void STORE_DATA(int x, int n, u8 *ptr)
+static inline void STORE_DATA(int x, u8 *ptr)
{
- typedef struct { u8 _[16*n]; } addrtype;
+ typedef struct { u8 _[16 * $#]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
asm volatile ("VSTM %2,%3,0,1"
- : "=m" (*__ptr) : "a" (__ptr), "i" (x), "i" (x + n - 1));
+ : "=m" (*__ptr) : "a" (__ptr), "i" (x),
+ "i" (x + $# - 1));
}
static inline void COPY_VEC(int x, int y)
@@ -93,19 +95,19 @@ static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
q = dptr[z0 + 2]; /* RS syndrome */
for (d = 0; d < bytes; d += $#*NSIZE) {
- LOAD_DATA(0,$#,&dptr[z0][d]);
+ LOAD_DATA(0,&dptr[z0][d]);
COPY_VEC(8+$$,0+$$);
for (z = z0 - 1; z >= 0; z--) {
MASK(16+$$,8+$$);
AND(16+$$,16+$$,25);
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
- LOAD_DATA(16,$#,&dptr[z][d]);
+ LOAD_DATA(16,&dptr[z][d]);
XOR(0+$$,0+$$,16+$$);
XOR(8+$$,8+$$,16+$$);
}
- STORE_DATA(0,$#,&p[d]);
- STORE_DATA(8,$#,&q[d]);
+ STORE_DATA(0,&p[d]);
+ STORE_DATA(8,&q[d]);
}
kernel_fpu_end(&vxstate, KERNEL_VXR);
}
@@ -127,14 +129,14 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
for (d = 0; d < bytes; d += $#*NSIZE) {
/* P/Q data pages */
- LOAD_DATA(0,$#,&dptr[z0][d]);
+ LOAD_DATA(0,&dptr[z0][d]);
COPY_VEC(8+$$,0+$$);
for (z = z0 - 1; z >= start; z--) {
MASK(16+$$,8+$$);
AND(16+$$,16+$$,25);
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
- LOAD_DATA(16,$#,&dptr[z][d]);
+ LOAD_DATA(16,&dptr[z][d]);
XOR(0+$$,0+$$,16+$$);
XOR(8+$$,8+$$,16+$$);
}
@@ -145,12 +147,12 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
}
- LOAD_DATA(16,$#,&p[d]);
+ LOAD_DATA(16,&p[d]);
XOR(16+$$,16+$$,0+$$);
- STORE_DATA(16,$#,&p[d]);
- LOAD_DATA(16,$#,&q[d]);
+ STORE_DATA(16,&p[d]);
+ LOAD_DATA(16,&q[d]);
XOR(16+$$,16+$$,8+$$);
- STORE_DATA(16,$#,&q[d]);
+ STORE_DATA(16,&q[d]);
}
kernel_fpu_end(&vxstate, KERNEL_VXR);
}
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c
index fcb4ce682c6f..bf043258fa00 100644
--- a/lib/reciprocal_div.c
+++ b/lib/reciprocal_div.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <asm/div64.h>
#include <linux/reciprocal_div.h>
@@ -26,3 +27,43 @@ struct reciprocal_value reciprocal_value(u32 d)
return R;
}
EXPORT_SYMBOL(reciprocal_value);
+
+struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec)
+{
+ struct reciprocal_value_adv R;
+ u32 l, post_shift;
+ u64 mhigh, mlow;
+
+ /* ceil(log2(d)) */
+ l = fls(d - 1);
+ /* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to
+ * be handled before calling "reciprocal_value_adv", please see the
+ * comment at include/linux/reciprocal_div.h.
+ */
+ WARN(l == 32,
+ "ceil(log2(0x%08x)) == 32, %s doesn't support such divisor",
+ d, __func__);
+ post_shift = l;
+ mlow = 1ULL << (32 + l);
+ do_div(mlow, d);
+ mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec));
+ do_div(mhigh, d);
+
+ for (; post_shift > 0; post_shift--) {
+ u64 lo = mlow >> 1, hi = mhigh >> 1;
+
+ if (lo >= hi)
+ break;
+
+ mlow = lo;
+ mhigh = hi;
+ }
+
+ R.m = (u32)mhigh;
+ R.sh = post_shift;
+ R.exp = l;
+ R.is_wide_m = mhigh > U32_MAX;
+
+ return R;
+}
+EXPORT_SYMBOL(reciprocal_value_adv);
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index d8bb1a1eba72..e5fdc8b9e856 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -283,7 +283,7 @@ out:
* in index form
* @prim: primitive element to generate polynomial roots
* @nroots: RS code generator polynomial degree (number of roots)
- * @gfp: GFP_ flags for allocations
+ * @gfp: Memory allocation flags.
*/
struct rs_control *init_rs_gfp(int symsize, int gfpoly, int fcr, int prim,
int nroots, gfp_t gfp)
diff --git a/lib/refcount.c b/lib/refcount.c
index d3b81cefce91..ebcf8cd49e05 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -35,13 +35,13 @@
*
*/
+#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/spinlock.h>
#include <linux/bug.h>
-#ifdef CONFIG_REFCOUNT_FULL
-
/**
- * refcount_add_not_zero - add a value to a refcount unless it is 0
+ * refcount_add_not_zero_checked - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
* @r: the refcount
*
@@ -58,7 +58,7 @@
*
* Return: false if the passed refcount is 0, true otherwise
*/
-bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -79,10 +79,10 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
return true;
}
-EXPORT_SYMBOL(refcount_add_not_zero);
+EXPORT_SYMBOL(refcount_add_not_zero_checked);
/**
- * refcount_add - add a value to a refcount
+ * refcount_add_checked - add a value to a refcount
* @i: the value to add to the refcount
* @r: the refcount
*
@@ -97,14 +97,14 @@ EXPORT_SYMBOL(refcount_add_not_zero);
* cases, refcount_inc(), or one of its variants, should instead be used to
* increment a reference count.
*/
-void refcount_add(unsigned int i, refcount_t *r)
+void refcount_add_checked(unsigned int i, refcount_t *r)
{
- WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n");
}
-EXPORT_SYMBOL(refcount_add);
+EXPORT_SYMBOL(refcount_add_checked);
/**
- * refcount_inc_not_zero - increment a refcount unless it is 0
+ * refcount_inc_not_zero_checked - increment a refcount unless it is 0
* @r: the refcount to increment
*
* Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
@@ -115,7 +115,7 @@ EXPORT_SYMBOL(refcount_add);
*
* Return: true if the increment was successful, false otherwise
*/
-bool refcount_inc_not_zero(refcount_t *r)
+bool refcount_inc_not_zero_checked(refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -134,10 +134,10 @@ bool refcount_inc_not_zero(refcount_t *r)
return true;
}
-EXPORT_SYMBOL(refcount_inc_not_zero);
+EXPORT_SYMBOL(refcount_inc_not_zero_checked);
/**
- * refcount_inc - increment a refcount
+ * refcount_inc_checked - increment a refcount
* @r: the refcount to increment
*
* Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
@@ -148,14 +148,14 @@ EXPORT_SYMBOL(refcount_inc_not_zero);
* Will WARN if the refcount is 0, as this represents a possible use-after-free
* condition.
*/
-void refcount_inc(refcount_t *r)
+void refcount_inc_checked(refcount_t *r)
{
- WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n");
}
-EXPORT_SYMBOL(refcount_inc);
+EXPORT_SYMBOL(refcount_inc_checked);
/**
- * refcount_sub_and_test - subtract from a refcount and test if it is 0
+ * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
* @i: amount to subtract from the refcount
* @r: the refcount
*
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(refcount_inc);
*
* Return: true if the resulting refcount is 0, false otherwise
*/
-bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -192,10 +192,10 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
return !new;
}
-EXPORT_SYMBOL(refcount_sub_and_test);
+EXPORT_SYMBOL(refcount_sub_and_test_checked);
/**
- * refcount_dec_and_test - decrement a refcount and test if it is 0
+ * refcount_dec_and_test_checked - decrement a refcount and test if it is 0
* @r: the refcount
*
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
@@ -207,14 +207,14 @@ EXPORT_SYMBOL(refcount_sub_and_test);
*
* Return: true if the resulting refcount is 0, false otherwise
*/
-bool refcount_dec_and_test(refcount_t *r)
+bool refcount_dec_and_test_checked(refcount_t *r)
{
- return refcount_sub_and_test(1, r);
+ return refcount_sub_and_test_checked(1, r);
}
-EXPORT_SYMBOL(refcount_dec_and_test);
+EXPORT_SYMBOL(refcount_dec_and_test_checked);
/**
- * refcount_dec - decrement a refcount
+ * refcount_dec_checked - decrement a refcount
* @r: the refcount
*
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
@@ -223,12 +223,11 @@ EXPORT_SYMBOL(refcount_dec_and_test);
* Provides release memory ordering, such that prior loads and stores are done
* before.
*/
-void refcount_dec(refcount_t *r)
+void refcount_dec_checked(refcount_t *r)
{
- WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+ WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
-EXPORT_SYMBOL(refcount_dec);
-#endif /* CONFIG_REFCOUNT_FULL */
+EXPORT_SYMBOL(refcount_dec_checked);
/**
* refcount_dec_if_one - decrement a refcount if it is 1
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index e5c8586cf717..30526afa8343 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -115,8 +115,7 @@ static void bucket_table_free_rcu(struct rcu_head *head)
static union nested_table *nested_table_alloc(struct rhashtable *ht,
union nested_table __rcu **prev,
- unsigned int shifted,
- unsigned int nhash)
+ bool leaf)
{
union nested_table *ntbl;
int i;
@@ -127,10 +126,9 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
- if (ntbl && shifted) {
- for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
- INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
- (i << shifted) | nhash);
+ if (ntbl && leaf) {
+ for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
+ INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
}
rcu_assign_pointer(*prev, ntbl);
@@ -156,7 +154,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
return NULL;
if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
- 0, 0)) {
+ false)) {
kfree(tbl);
return NULL;
}
@@ -175,17 +173,15 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
int i;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
- if (gfp != GFP_KERNEL)
- tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
- else
- tbl = kvzalloc(size, gfp);
+ tbl = kvzalloc(size, gfp);
size = nbuckets;
- if (tbl == NULL && gfp != GFP_KERNEL) {
+ if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
nbuckets = 0;
}
+
if (tbl == NULL)
return NULL;
@@ -206,7 +202,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
tbl->hash_rnd = get_random_u32();
for (i = 0; i < nbuckets; i++)
- INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
+ INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
return tbl;
}
@@ -227,8 +223,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
- struct bucket_table *new_tbl = rhashtable_last_table(ht,
- rht_dereference_rcu(old_tbl->future_tbl, ht));
+ struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
int err = -EAGAIN;
struct rhash_head *head, *next, *entry;
@@ -298,21 +293,14 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
struct bucket_table *old_tbl,
struct bucket_table *new_tbl)
{
- /* Protect future_tbl using the first bucket lock. */
- spin_lock_bh(old_tbl->locks);
-
- /* Did somebody beat us to it? */
- if (rcu_access_pointer(old_tbl->future_tbl)) {
- spin_unlock_bh(old_tbl->locks);
- return -EEXIST;
- }
-
/* Make insertions go into the new, empty table right away. Deletions
* and lookups will be attempted in both tables until we synchronize.
+ * As cmpxchg() provides strong barriers, we do not need
+ * rcu_assign_pointer().
*/
- rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
- spin_unlock_bh(old_tbl->locks);
+ if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
+ return -EEXIST;
return 0;
}
@@ -459,7 +447,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
err = -ENOMEM;
- new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
+ new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
if (new_tbl == NULL)
goto fail;
@@ -475,7 +463,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
fail:
/* Do not fail the insert if someone else did a rehash. */
- if (likely(rcu_dereference_raw(tbl->future_tbl)))
+ if (likely(rcu_access_pointer(tbl->future_tbl)))
return 0;
/* Schedule async rehash to retry allocation in process context. */
@@ -548,7 +536,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
return ERR_CAST(data);
- new_tbl = rcu_dereference(tbl->future_tbl);
+ new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (new_tbl)
return new_tbl;
@@ -607,7 +595,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
break;
spin_unlock_bh(lock);
- tbl = rcu_dereference(tbl->future_tbl);
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
}
data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
@@ -1002,7 +990,6 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
* .key_offset = offsetof(struct test_obj, key),
* .key_len = sizeof(int),
* .hashfn = jhash,
- * .nulls_base = (1U << RHT_BASE_SHIFT),
* };
*
* Configuration Example 2: Variable length keys
@@ -1034,9 +1021,6 @@ int rhashtable_init(struct rhashtable *ht,
(params->obj_hashfn && !params->obj_cmpfn))
return -EINVAL;
- if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
- return -EINVAL;
-
memset(ht, 0, sizeof(*ht));
mutex_init(&ht->mutex);
spin_lock_init(&ht->lock);
@@ -1073,9 +1057,16 @@ int rhashtable_init(struct rhashtable *ht,
}
}
+ /*
+ * This is api initialization and thus we need to guarantee the
+ * initial rhashtable allocation. Upon failure, retry with the
+ * smallest possible size with __GFP_NOFAIL semantics.
+ */
tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
- if (tbl == NULL)
- return -ENOMEM;
+ if (unlikely(tbl == NULL)) {
+ size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
+ tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
+ }
atomic_set(&ht->nelems, 0);
@@ -1100,10 +1091,6 @@ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
{
int err;
- /* No rhlist NULLs marking for now. */
- if (params->nulls_base)
- return -EINVAL;
-
err = rhashtable_init(&hlt->ht, params);
hlt->ht.rhlist = true;
return err;
@@ -1227,25 +1214,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl;
- unsigned int shifted;
- unsigned int nhash;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
hash >>= tbl->nest;
- nhash = index;
- shifted = tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0, nhash);
+ size <= (1 << shift));
while (ntbl && size > (1 << shift)) {
index = hash & ((1 << shift) - 1);
size >>= shift;
hash >>= shift;
- nhash |= index << shifted;
- shifted += shift;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0,
- nhash);
+ size <= (1 << shift));
}
if (!ntbl)
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index 6dd30615a201..d1c1e6388eaa 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -148,10 +148,9 @@ static __init int sg_pool_init(void)
cleanup_sdb:
for (i = 0; i < SG_MEMPOOL_NR; i++) {
struct sg_pool *sgp = sg_pools + i;
- if (sgp->pool)
- mempool_destroy(sgp->pool);
- if (sgp->slab)
- kmem_cache_destroy(sgp->slab);
+
+ mempool_destroy(sgp->pool);
+ kmem_cache_destroy(sgp->slab);
}
return -ENOMEM;
diff --git a/lib/string.c b/lib/string.c
index 2c0900a5d51a..38e4ca08e757 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -27,6 +27,7 @@
#include <linux/export.h>
#include <linux/bug.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/word-at-a-time.h>
diff --git a/lib/test_bitfield.c b/lib/test_bitfield.c
new file mode 100644
index 000000000000..5b8f4108662d
--- /dev/null
+++ b/lib/test_bitfield.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for bitfield helpers.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+
+#define CHECK_ENC_GET_U(tp, v, field, res) do { \
+ { \
+ u##tp _res; \
+ \
+ _res = u##tp##_encode_bits(v, field); \
+ if (_res != res) { \
+ pr_warn("u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n",\
+ (u64)_res); \
+ return -EINVAL; \
+ } \
+ if (u##tp##_get_bits(_res, field) != v) \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET_LE(tp, v, field, res) do { \
+ { \
+ __le##tp _res; \
+ \
+ _res = le##tp##_encode_bits(v, field); \
+ if (_res != cpu_to_le##tp(res)) { \
+ pr_warn("le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
+ (u64)le##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ return -EINVAL; \
+ } \
+ if (le##tp##_get_bits(_res, field) != v) \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET_BE(tp, v, field, res) do { \
+ { \
+ __be##tp _res; \
+ \
+ _res = be##tp##_encode_bits(v, field); \
+ if (_res != cpu_to_be##tp(res)) { \
+ pr_warn("be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
+ (u64)be##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ return -EINVAL; \
+ } \
+ if (be##tp##_get_bits(_res, field) != v) \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET(tp, v, field, res) do { \
+ CHECK_ENC_GET_U(tp, v, field, res); \
+ CHECK_ENC_GET_LE(tp, v, field, res); \
+ CHECK_ENC_GET_BE(tp, v, field, res); \
+ } while (0)
+
+static int test_constants(void)
+{
+ /*
+ * NOTE
+ * This whole function compiles (or at least should, if everything
+ * is going according to plan) to nothing after optimisation.
+ */
+
+ CHECK_ENC_GET(16, 1, 0x000f, 0x0001);
+ CHECK_ENC_GET(16, 3, 0x00f0, 0x0030);
+ CHECK_ENC_GET(16, 5, 0x0f00, 0x0500);
+ CHECK_ENC_GET(16, 7, 0xf000, 0x7000);
+ CHECK_ENC_GET(16, 14, 0x000f, 0x000e);
+ CHECK_ENC_GET(16, 15, 0x00f0, 0x00f0);
+
+ CHECK_ENC_GET_U(8, 1, 0x0f, 0x01);
+ CHECK_ENC_GET_U(8, 3, 0xf0, 0x30);
+ CHECK_ENC_GET_U(8, 14, 0x0f, 0x0e);
+ CHECK_ENC_GET_U(8, 15, 0xf0, 0xf0);
+
+ CHECK_ENC_GET(32, 1, 0x00000f00, 0x00000100);
+ CHECK_ENC_GET(32, 3, 0x0000f000, 0x00003000);
+ CHECK_ENC_GET(32, 5, 0x000f0000, 0x00050000);
+ CHECK_ENC_GET(32, 7, 0x00f00000, 0x00700000);
+ CHECK_ENC_GET(32, 14, 0x0f000000, 0x0e000000);
+ CHECK_ENC_GET(32, 15, 0xf0000000, 0xf0000000);
+
+ CHECK_ENC_GET(64, 1, 0x00000f0000000000ull, 0x0000010000000000ull);
+ CHECK_ENC_GET(64, 3, 0x0000f00000000000ull, 0x0000300000000000ull);
+ CHECK_ENC_GET(64, 5, 0x000f000000000000ull, 0x0005000000000000ull);
+ CHECK_ENC_GET(64, 7, 0x00f0000000000000ull, 0x0070000000000000ull);
+ CHECK_ENC_GET(64, 14, 0x0f00000000000000ull, 0x0e00000000000000ull);
+ CHECK_ENC_GET(64, 15, 0xf000000000000000ull, 0xf000000000000000ull);
+
+ return 0;
+}
+
+#define CHECK(tp, mask) do { \
+ u64 v; \
+ \
+ for (v = 0; v < 1 << hweight32(mask); v++) \
+ if (tp##_encode_bits(v, mask) != v << __ffs64(mask)) \
+ return -EINVAL; \
+ } while (0)
+
+static int test_variables(void)
+{
+ CHECK(u8, 0x0f);
+ CHECK(u8, 0xf0);
+ CHECK(u8, 0x38);
+
+ CHECK(u16, 0x0038);
+ CHECK(u16, 0x0380);
+ CHECK(u16, 0x3800);
+ CHECK(u16, 0x8000);
+
+ CHECK(u32, 0x80000000);
+ CHECK(u32, 0x7f000000);
+ CHECK(u32, 0x07e00000);
+ CHECK(u32, 0x00018000);
+
+ CHECK(u64, 0x8000000000000000ull);
+ CHECK(u64, 0x7f00000000000000ull);
+ CHECK(u64, 0x0001800000000000ull);
+ CHECK(u64, 0x0000000080000000ull);
+ CHECK(u64, 0x000000007f000000ull);
+ CHECK(u64, 0x0000000018000000ull);
+ CHECK(u64, 0x0000001f8000000ull);
+
+ return 0;
+}
+
+static int __init test_bitfields(void)
+{
+ int ret = test_constants();
+
+ if (ret) {
+ pr_warn("constant tests failed!\n");
+ return ret;
+ }
+
+ ret = test_variables();
+ if (ret) {
+ pr_warn("variable tests failed!\n");
+ return ret;
+ }
+
+#ifdef TEST_BITFIELD_COMPILE
+ /* these should fail compilation */
+ CHECK_ENC_GET(16, 16, 0x0f00, 0x1000);
+ u32_encode_bits(7, 0x06000000);
+
+ /* this should at least give a warning */
+ u16_encode_bits(0, 0x60000);
+#endif
+
+ pr_info("tests passed\n");
+
+ return 0;
+}
+module_init(test_bitfields)
+
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 08d3d59dca17..aa22bcaec1dc 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6494,6 +6494,7 @@ static struct sk_buff *populate_skb(char *buf, int size)
skb->queue_mapping = SKB_QUEUE_MAP;
skb->vlan_tci = SKB_VLAN_TCI;
skb->vlan_proto = htons(ETH_P_IP);
+ dev_net_set(&dev, &init_net);
skb->dev = &dev;
skb->dev->ifindex = SKB_DEV_IFINDEX;
skb->dev->type = SKB_DEV_TYPE;
diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
index b9cdeecc19dc..d5a06addeb27 100644
--- a/lib/test_debug_virtual.c
+++ b/lib/test_debug_virtual.c
@@ -15,7 +15,7 @@ struct foo {
unsigned int bar;
};
-struct foo *foo;
+static struct foo *foo;
static int __init test_debug_virtual_init(void)
{
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index 3f415d8101f3..626f580b4ff7 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -18,7 +18,7 @@ static const unsigned char data_b[] = {
static const unsigned char data_a[] = ".2.{....p..$}.4...1.....L...C...";
-static const char * const test_data_1_le[] __initconst = {
+static const char * const test_data_1[] __initconst = {
"be", "32", "db", "7b", "0a", "18", "93", "b2",
"70", "ba", "c4", "24", "7d", "83", "34", "9b",
"a6", "9c", "31", "ad", "9c", "0f", "ac", "e9",
@@ -32,16 +32,33 @@ static const char * const test_data_2_le[] __initconst = {
"d14c", "9919", "b143", "0caf",
};
+static const char * const test_data_2_be[] __initconst = {
+ "be32", "db7b", "0a18", "93b2",
+ "70ba", "c424", "7d83", "349b",
+ "a69c", "31ad", "9c0f", "ace9",
+ "4cd1", "1999", "43b1", "af0c",
+};
+
static const char * const test_data_4_le[] __initconst = {
"7bdb32be", "b293180a", "24c4ba70", "9b34837d",
"ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143",
};
+static const char * const test_data_4_be[] __initconst = {
+ "be32db7b", "0a1893b2", "70bac424", "7d83349b",
+ "a69c31ad", "9c0face9", "4cd11999", "43b1af0c",
+};
+
static const char * const test_data_8_le[] __initconst = {
"b293180a7bdb32be", "9b34837d24c4ba70",
"e9ac0f9cad319ca6", "0cafb1439919d14c",
};
+static const char * const test_data_8_be[] __initconst = {
+ "be32db7b0a1893b2", "70bac4247d83349b",
+ "a69c31ad9c0face9", "4cd1199943b1af0c",
+};
+
#define FILL_CHAR '#'
static unsigned total_tests __initdata;
@@ -56,6 +73,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
size_t l = len;
int gs = groupsize, rs = rowsize;
unsigned int i;
+ const bool is_be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
if (rs != 16 && rs != 32)
rs = 16;
@@ -67,13 +85,13 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
gs = 1;
if (gs == 8)
- result = test_data_8_le;
+ result = is_be ? test_data_8_be : test_data_8_le;
else if (gs == 4)
- result = test_data_4_le;
+ result = is_be ? test_data_4_be : test_data_4_le;
else if (gs == 2)
- result = test_data_2_le;
+ result = is_be ? test_data_2_be : test_data_2_le;
else
- result = test_data_1_le;
+ result = test_data_1;
/* hex dump */
p = test;
diff --git a/lib/test_ida.c b/lib/test_ida.c
new file mode 100644
index 000000000000..b06880625961
--- /dev/null
+++ b/lib/test_ida.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_ida.c: Test the IDA API
+ * Copyright (c) 2016-2018 Microsoft Corporation
+ * Copyright (c) 2018 Oracle Corporation
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/idr.h>
+#include <linux/module.h>
+
+static unsigned int tests_run;
+static unsigned int tests_passed;
+
+#ifdef __KERNEL__
+void ida_dump(struct ida *ida) { }
+#endif
+#define IDA_BUG_ON(ida, x) do { \
+ tests_run++; \
+ if (x) { \
+ ida_dump(ida); \
+ dump_stack(); \
+ } else { \
+ tests_passed++; \
+ } \
+} while (0)
+
+/*
+ * Straightforward checks that allocating and freeing IDs work.
+ */
+static void ida_check_alloc(struct ida *ida)
+{
+ int i, id;
+
+ for (i = 0; i < 10000; i++)
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+
+ ida_free(ida, 20);
+ ida_free(ida, 21);
+ for (i = 0; i < 3; i++) {
+ id = ida_alloc(ida, GFP_KERNEL);
+ IDA_BUG_ON(ida, id < 0);
+ if (i == 2)
+ IDA_BUG_ON(ida, id != 10000);
+ }
+
+ for (i = 0; i < 5000; i++)
+ ida_free(ida, i);
+
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 5000, GFP_KERNEL) != 10001);
+ ida_destroy(ida);
+
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/* Destroy an IDA with a single entry at @base */
+static void ida_check_destroy_1(struct ida *ida, unsigned int base)
+{
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) != base);
+ IDA_BUG_ON(ida, ida_is_empty(ida));
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/* Check that ida_destroy and ida_is_empty work */
+static void ida_check_destroy(struct ida *ida)
+{
+ /* Destroy an already-empty IDA */
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+ ida_check_destroy_1(ida, 0);
+ ida_check_destroy_1(ida, 1);
+ ida_check_destroy_1(ida, 1023);
+ ida_check_destroy_1(ida, 1024);
+ ida_check_destroy_1(ida, 12345678);
+}
+
+/*
+ * Check what happens when we fill a leaf and then delete it. This may
+ * discover mishandling of IDR_FREE.
+ */
+static void ida_check_leaf(struct ida *ida, unsigned int base)
+{
+ unsigned long i;
+
+ for (i = 0; i < IDA_BITMAP_BITS; i++) {
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+ base + i);
+ }
+
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != 0);
+ IDA_BUG_ON(ida, ida_is_empty(ida));
+ ida_free(ida, 0);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/*
+ * Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
+ * Allocating up to 2^31-1 should succeed, and then allocating the next one
+ * should fail.
+ */
+static void ida_check_max(struct ida *ida)
+{
+ unsigned long i, j;
+
+ for (j = 1; j < 65537; j *= 2) {
+ unsigned long base = (1UL << 31) - j;
+ for (i = 0; i < j; i++) {
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+ base + i);
+ }
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+ -ENOSPC);
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ }
+}
+
+/*
+ * Check handling of conversions between exceptional entries and full bitmaps.
+ */
+static void ida_check_conv(struct ida *ida)
+{
+ unsigned long i;
+
+ for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
+ IDA_BUG_ON(ida, ida_alloc_min(ida, i + 1, GFP_KERNEL) != i + 1);
+ IDA_BUG_ON(ida, ida_alloc_min(ida, i + BITS_PER_LONG,
+ GFP_KERNEL) != i + BITS_PER_LONG);
+ ida_free(ida, i + 1);
+ ida_free(ida, i + BITS_PER_LONG);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ }
+
+ for (i = 0; i < IDA_BITMAP_BITS * 2; i++)
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+ for (i = IDA_BITMAP_BITS * 2; i > 0; i--)
+ ida_free(ida, i - 1);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+ for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++)
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+ for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--)
+ ida_free(ida, i - 1);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+static DEFINE_IDA(ida);
+
+static int ida_checks(void)
+{
+ IDA_BUG_ON(&ida, !ida_is_empty(&ida));
+ ida_check_alloc(&ida);
+ ida_check_destroy(&ida);
+ ida_check_leaf(&ida, 0);
+ ida_check_leaf(&ida, 1024);
+ ida_check_leaf(&ida, 1024 * 64);
+ ida_check_max(&ida);
+ ida_check_conv(&ida);
+
+ printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
+ return (tests_run != tests_passed) ? 0 : -EINVAL;
+}
+
+static void ida_exit(void)
+{
+}
+
+module_init(ida_checks);
+module_exit(ida_exit);
+MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index ec657105edbf..51b78405bf24 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -579,6 +579,73 @@ static noinline void __init kmem_cache_invalid_free(void)
kmem_cache_destroy(cache);
}
+static noinline void __init kasan_memchr(void)
+{
+ char *ptr;
+ size_t size = 24;
+
+ pr_info("out-of-bounds in memchr\n");
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ if (!ptr)
+ return;
+
+ memchr(ptr, '1', size + 1);
+ kfree(ptr);
+}
+
+static noinline void __init kasan_memcmp(void)
+{
+ char *ptr;
+ size_t size = 24;
+ int arr[9];
+
+ pr_info("out-of-bounds in memcmp\n");
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ if (!ptr)
+ return;
+
+ memset(arr, 0, sizeof(arr));
+ memcmp(ptr, arr, size+1);
+ kfree(ptr);
+}
+
+static noinline void __init kasan_strings(void)
+{
+ char *ptr;
+ size_t size = 24;
+
+ pr_info("use-after-free in strchr\n");
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ if (!ptr)
+ return;
+
+ kfree(ptr);
+
+ /*
+ * Try to cause only 1 invalid access (less spam in dmesg).
+ * For that we need ptr to point to zeroed byte.
+ * Skip metadata that could be stored in freed object so ptr
+ * will likely point to zeroed byte.
+ */
+ ptr += 16;
+ strchr(ptr, '1');
+
+ pr_info("use-after-free in strrchr\n");
+ strrchr(ptr, '1');
+
+ pr_info("use-after-free in strcmp\n");
+ strcmp(ptr, "2");
+
+ pr_info("use-after-free in strncmp\n");
+ strncmp(ptr, "2", 1);
+
+ pr_info("use-after-free in strlen\n");
+ strlen(ptr);
+
+ pr_info("use-after-free in strnlen\n");
+ strnlen(ptr, 1);
+}
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -618,6 +685,9 @@ static int __init kmalloc_tests_init(void)
use_after_scope_test();
kmem_cache_double_free();
kmem_cache_invalid_free();
+ kasan_memchr();
+ kasan_memcmp();
+ kasan_strings();
kasan_restore_multi_shot(multishot);
diff --git a/lib/test_memcat_p.c b/lib/test_memcat_p.c
new file mode 100644
index 000000000000..849c477d49d0
--- /dev/null
+++ b/lib/test_memcat_p.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for memcat_p() in lib/memcat_p.c
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+struct test_struct {
+ int num;
+ unsigned int magic;
+};
+
+#define MAGIC 0xf00ff00f
+/* Size of each of the NULL-terminated input arrays */
+#define INPUT_MAX 128
+/* Expected number of non-NULL elements in the output array */
+#define EXPECT (INPUT_MAX * 2 - 2)
+
+static int __init test_memcat_p_init(void)
+{
+ struct test_struct **in0, **in1, **out, **p;
+ int err = -ENOMEM, i, r, total = 0;
+
+ in0 = kcalloc(INPUT_MAX, sizeof(*in0), GFP_KERNEL);
+ if (!in0)
+ return err;
+
+ in1 = kcalloc(INPUT_MAX, sizeof(*in1), GFP_KERNEL);
+ if (!in1)
+ goto err_free_in0;
+
+ for (i = 0, r = 1; i < INPUT_MAX - 1; i++) {
+ in0[i] = kmalloc(sizeof(**in0), GFP_KERNEL);
+ if (!in0[i])
+ goto err_free_elements;
+
+ in1[i] = kmalloc(sizeof(**in1), GFP_KERNEL);
+ if (!in1[i]) {
+ kfree(in0[i]);
+ goto err_free_elements;
+ }
+
+ /* lifted from test_sort.c */
+ r = (r * 725861) % 6599;
+ in0[i]->num = r;
+ in1[i]->num = -r;
+ in0[i]->magic = MAGIC;
+ in1[i]->magic = MAGIC;
+ }
+
+ in0[i] = in1[i] = NULL;
+
+ out = memcat_p(in0, in1);
+ if (!out)
+ goto err_free_all_elements;
+
+ err = -EINVAL;
+ for (i = 0, p = out; *p && (i < INPUT_MAX * 2 - 1); p++, i++) {
+ total += (*p)->num;
+
+ if ((*p)->magic != MAGIC) {
+ pr_err("test failed: wrong magic at %d: %u\n", i,
+ (*p)->magic);
+ goto err_free_out;
+ }
+ }
+
+ if (total) {
+ pr_err("test failed: expected zero total, got %d\n", total);
+ goto err_free_out;
+ }
+
+ if (i != EXPECT) {
+ pr_err("test failed: expected output size %d, got %d\n",
+ EXPECT, i);
+ goto err_free_out;
+ }
+
+ for (i = 0; i < INPUT_MAX - 1; i++)
+ if (out[i] != in0[i] || out[i + INPUT_MAX - 1] != in1[i]) {
+ pr_err("test failed: wrong element order at %d\n", i);
+ goto err_free_out;
+ }
+
+ err = 0;
+ pr_info("test passed\n");
+
+err_free_out:
+ kfree(out);
+err_free_all_elements:
+ i = INPUT_MAX;
+err_free_elements:
+ for (i--; i >= 0; i--) {
+ kfree(in1[i]);
+ kfree(in0[i]);
+ }
+
+ kfree(in1);
+err_free_in0:
+ kfree(in0);
+
+ return err;
+}
+
+static void __exit test_memcat_p_exit(void)
+{
+}
+
+module_init(test_memcat_p_init);
+module_exit(test_memcat_p_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_overflow.c b/lib/test_overflow.c
index 2278fe05a1b0..fc680562d8b6 100644
--- a/lib/test_overflow.c
+++ b/lib/test_overflow.c
@@ -252,7 +252,8 @@ static int __init test_ ## t ## _overflow(void) { \
int err = 0; \
unsigned i; \
\
- pr_info("%-3s: %zu tests\n", #t, ARRAY_SIZE(t ## _tests)); \
+ pr_info("%-3s: %zu arithmetic tests\n", #t, \
+ ARRAY_SIZE(t ## _tests)); \
for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \
err |= do_test_ ## t(&t ## _tests[i]); \
return err; \
@@ -287,6 +288,200 @@ static int __init test_overflow_calculation(void)
return err;
}
+static int __init test_overflow_shift(void)
+{
+ int err = 0;
+
+/* Args are: value, shift, type, expected result, overflow expected */
+#define TEST_ONE_SHIFT(a, s, t, expect, of) ({ \
+ int __failed = 0; \
+ typeof(a) __a = (a); \
+ typeof(s) __s = (s); \
+ t __e = (expect); \
+ t __d; \
+ bool __of = check_shl_overflow(__a, __s, &__d); \
+ if (__of != of) { \
+ pr_warn("expected (%s)(%s << %s) to%s overflow\n", \
+ #t, #a, #s, of ? "" : " not"); \
+ __failed = 1; \
+ } else if (!__of && __d != __e) { \
+ pr_warn("expected (%s)(%s << %s) == %s\n", \
+ #t, #a, #s, #expect); \
+ if ((t)-1 < 0) \
+ pr_warn("got %lld\n", (s64)__d); \
+ else \
+ pr_warn("got %llu\n", (u64)__d); \
+ __failed = 1; \
+ } \
+ if (!__failed) \
+ pr_info("ok: (%s)(%s << %s) == %s\n", #t, #a, #s, \
+ of ? "overflow" : #expect); \
+ __failed; \
+})
+
+ /* Sane shifts. */
+ err |= TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
+ err |= TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
+ err |= TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false);
+ err |= TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false);
+ err |= TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false);
+ err |= TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false);
+ err |= TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false);
+ err |= TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false);
+ err |= TEST_ONE_SHIFT(1, 0, int, 1 << 0, false);
+ err |= TEST_ONE_SHIFT(1, 16, int, 1 << 16, false);
+ err |= TEST_ONE_SHIFT(1, 30, int, 1 << 30, false);
+ err |= TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false);
+ err |= TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false);
+ err |= TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false);
+ err |= TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false);
+ err |= TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false);
+ err |= TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false);
+ err |= TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false);
+ err |= TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false);
+ err |= TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false);
+ err |= TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false);
+ err |= TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false);
+ err |= TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false);
+ err |= TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false);
+ err |= TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false);
+ err |= TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64,
+ 0xFFFFFFFFULL << 32, false);
+
+ /* Sane shift: start and end with 0, without a too-wide shift. */
+ err |= TEST_ONE_SHIFT(0, 7, u8, 0, false);
+ err |= TEST_ONE_SHIFT(0, 15, u16, 0, false);
+ err |= TEST_ONE_SHIFT(0, 31, unsigned int, 0, false);
+ err |= TEST_ONE_SHIFT(0, 31, u32, 0, false);
+ err |= TEST_ONE_SHIFT(0, 63, u64, 0, false);
+
+ /* Sane shift: start and end with 0, without reaching signed bit. */
+ err |= TEST_ONE_SHIFT(0, 6, s8, 0, false);
+ err |= TEST_ONE_SHIFT(0, 14, s16, 0, false);
+ err |= TEST_ONE_SHIFT(0, 30, int, 0, false);
+ err |= TEST_ONE_SHIFT(0, 30, s32, 0, false);
+ err |= TEST_ONE_SHIFT(0, 62, s64, 0, false);
+
+ /* Overflow: shifted the bit off the end. */
+ err |= TEST_ONE_SHIFT(1, 8, u8, 0, true);
+ err |= TEST_ONE_SHIFT(1, 16, u16, 0, true);
+ err |= TEST_ONE_SHIFT(1, 32, unsigned int, 0, true);
+ err |= TEST_ONE_SHIFT(1, 32, u32, 0, true);
+ err |= TEST_ONE_SHIFT(1, 64, u64, 0, true);
+
+ /* Overflow: shifted into the signed bit. */
+ err |= TEST_ONE_SHIFT(1, 7, s8, 0, true);
+ err |= TEST_ONE_SHIFT(1, 15, s16, 0, true);
+ err |= TEST_ONE_SHIFT(1, 31, int, 0, true);
+ err |= TEST_ONE_SHIFT(1, 31, s32, 0, true);
+ err |= TEST_ONE_SHIFT(1, 63, s64, 0, true);
+
+ /* Overflow: high bit falls off unsigned types. */
+ /* 10010110 */
+ err |= TEST_ONE_SHIFT(150, 1, u8, 0, true);
+ /* 1000100010010110 */
+ err |= TEST_ONE_SHIFT(34966, 1, u16, 0, true);
+ /* 10000100000010001000100010010110 */
+ err |= TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true);
+ err |= TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true);
+ /* 1000001000010000010000000100000010000100000010001000100010010110 */
+ err |= TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true);
+
+ /* Overflow: bit shifted into signed bit on signed types. */
+ /* 01001011 */
+ err |= TEST_ONE_SHIFT(75, 1, s8, 0, true);
+ /* 0100010001001011 */
+ err |= TEST_ONE_SHIFT(17483, 1, s16, 0, true);
+ /* 01000010000001000100010001001011 */
+ err |= TEST_ONE_SHIFT(1107575883, 1, s32, 0, true);
+ err |= TEST_ONE_SHIFT(1107575883, 1, int, 0, true);
+ /* 0100000100001000001000000010000001000010000001000100010001001011 */
+ err |= TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true);
+
+ /* Overflow: bit shifted past signed bit on signed types. */
+ /* 01001011 */
+ err |= TEST_ONE_SHIFT(75, 2, s8, 0, true);
+ /* 0100010001001011 */
+ err |= TEST_ONE_SHIFT(17483, 2, s16, 0, true);
+ /* 01000010000001000100010001001011 */
+ err |= TEST_ONE_SHIFT(1107575883, 2, s32, 0, true);
+ err |= TEST_ONE_SHIFT(1107575883, 2, int, 0, true);
+ /* 0100000100001000001000000010000001000010000001000100010001001011 */
+ err |= TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
+
+ /* Overflow: values larger than destination type. */
+ err |= TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
+ err |= TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
+ err |= TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true);
+ err |= TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true);
+ err |= TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true);
+ err |= TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true);
+ err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true);
+ err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
+ err |= TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
+
+ /* Nonsense: negative initial value. */
+ err |= TEST_ONE_SHIFT(-1, 0, s8, 0, true);
+ err |= TEST_ONE_SHIFT(-1, 0, u8, 0, true);
+ err |= TEST_ONE_SHIFT(-5, 0, s16, 0, true);
+ err |= TEST_ONE_SHIFT(-5, 0, u16, 0, true);
+ err |= TEST_ONE_SHIFT(-10, 0, int, 0, true);
+ err |= TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true);
+ err |= TEST_ONE_SHIFT(-100, 0, s32, 0, true);
+ err |= TEST_ONE_SHIFT(-100, 0, u32, 0, true);
+ err |= TEST_ONE_SHIFT(-10000, 0, s64, 0, true);
+ err |= TEST_ONE_SHIFT(-10000, 0, u64, 0, true);
+
+ /* Nonsense: negative shift values. */
+ err |= TEST_ONE_SHIFT(0, -5, s8, 0, true);
+ err |= TEST_ONE_SHIFT(0, -5, u8, 0, true);
+ err |= TEST_ONE_SHIFT(0, -10, s16, 0, true);
+ err |= TEST_ONE_SHIFT(0, -10, u16, 0, true);
+ err |= TEST_ONE_SHIFT(0, -15, int, 0, true);
+ err |= TEST_ONE_SHIFT(0, -15, unsigned int, 0, true);
+ err |= TEST_ONE_SHIFT(0, -20, s32, 0, true);
+ err |= TEST_ONE_SHIFT(0, -20, u32, 0, true);
+ err |= TEST_ONE_SHIFT(0, -30, s64, 0, true);
+ err |= TEST_ONE_SHIFT(0, -30, u64, 0, true);
+
+ /* Overflow: shifted at or beyond entire type's bit width. */
+ err |= TEST_ONE_SHIFT(0, 8, u8, 0, true);
+ err |= TEST_ONE_SHIFT(0, 9, u8, 0, true);
+ err |= TEST_ONE_SHIFT(0, 8, s8, 0, true);
+ err |= TEST_ONE_SHIFT(0, 9, s8, 0, true);
+ err |= TEST_ONE_SHIFT(0, 16, u16, 0, true);
+ err |= TEST_ONE_SHIFT(0, 17, u16, 0, true);
+ err |= TEST_ONE_SHIFT(0, 16, s16, 0, true);
+ err |= TEST_ONE_SHIFT(0, 17, s16, 0, true);
+ err |= TEST_ONE_SHIFT(0, 32, u32, 0, true);
+ err |= TEST_ONE_SHIFT(0, 33, u32, 0, true);
+ err |= TEST_ONE_SHIFT(0, 32, int, 0, true);
+ err |= TEST_ONE_SHIFT(0, 33, int, 0, true);
+ err |= TEST_ONE_SHIFT(0, 32, s32, 0, true);
+ err |= TEST_ONE_SHIFT(0, 33, s32, 0, true);
+ err |= TEST_ONE_SHIFT(0, 64, u64, 0, true);
+ err |= TEST_ONE_SHIFT(0, 65, u64, 0, true);
+ err |= TEST_ONE_SHIFT(0, 64, s64, 0, true);
+ err |= TEST_ONE_SHIFT(0, 65, s64, 0, true);
+
+ /*
+ * Corner case: for unsigned types, we fail when we've shifted
+ * through the entire width of bits. For signed types, we might
+ * want to match this behavior, but that would mean noticing if
+ * we shift through all but the signed bit, and this is not
+ * currently detected (but we'll notice an overflow into the
+ * signed bit). So, for now, we will test this condition but
+ * mark it as not expected to overflow.
+ */
+ err |= TEST_ONE_SHIFT(0, 7, s8, 0, false);
+ err |= TEST_ONE_SHIFT(0, 15, s16, 0, false);
+ err |= TEST_ONE_SHIFT(0, 31, int, 0, false);
+ err |= TEST_ONE_SHIFT(0, 31, s32, 0, false);
+ err |= TEST_ONE_SHIFT(0, 63, s64, 0, false);
+
+ return err;
+}
+
/*
* Deal with the various forms of allocator arguments. See comments above
* the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
@@ -397,6 +592,7 @@ static int __init test_module_init(void)
int err = 0;
err |= test_overflow_calculation();
+ err |= test_overflow_shift();
err |= test_overflow_allocation();
if (err) {
diff --git a/lib/test_printf.c b/lib/test_printf.c
index cea592f402ed..53527ea822b5 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -206,6 +206,7 @@ test_string(void)
#define PTR_WIDTH 16
#define PTR ((void *)0xffff0123456789abUL)
#define PTR_STR "ffff0123456789ab"
+#define PTR_VAL_NO_CRNG "(____ptrval____)"
#define ZEROS "00000000" /* hex 32 zero bits */
static int __init
@@ -216,7 +217,16 @@ plain_format(void)
nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
- if (nchars != PTR_WIDTH || strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
+ if (nchars != PTR_WIDTH)
+ return -1;
+
+ if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
+ pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
+ PTR_VAL_NO_CRNG);
+ return 0;
+ }
+
+ if (strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
return -1;
return 0;
@@ -227,6 +237,7 @@ plain_format(void)
#define PTR_WIDTH 8
#define PTR ((void *)0x456789ab)
#define PTR_STR "456789ab"
+#define PTR_VAL_NO_CRNG "(ptrval)"
static int __init
plain_format(void)
@@ -245,7 +256,16 @@ plain_hash(void)
nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
- if (nchars != PTR_WIDTH || strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
+ if (nchars != PTR_WIDTH)
+ return -1;
+
+ if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
+ pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
+ PTR_VAL_NO_CRNG);
+ return 0;
+ }
+
+ if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
return -1;
return 0;
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index fb6968109113..82ac39ce5310 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -83,7 +83,7 @@ static u32 my_hashfn(const void *data, u32 len, u32 seed)
{
const struct test_obj_rhl *obj = data;
- return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE;
+ return (obj->value.id % 10);
}
static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
@@ -99,7 +99,6 @@ static struct rhashtable_params test_rht_params = {
.key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(struct test_obj_val),
.hashfn = jhash,
- .nulls_base = (3U << RHT_BASE_SHIFT),
};
static struct rhashtable_params test_rht_params_dup = {
@@ -296,8 +295,6 @@ static int __init test_rhltable(unsigned int entries)
if (!obj_in_table)
goto out_free;
- /* nulls_base not supported in rhlist interface */
- test_rht_params.nulls_base = 0;
err = rhltable_init(&rhlt, &test_rht_params);
if (WARN_ON(err))
goto out_free;
@@ -501,6 +498,8 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
unsigned int i, cnt = 0;
ht = &rhlt->ht;
+ /* Take the mutex to avoid RCU warning */
+ mutex_lock(&ht->mutex);
tbl = rht_dereference(ht->tbl, ht);
for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next;
@@ -534,6 +533,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
}
}
printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
+ mutex_unlock(&ht->mutex);
return cnt;
}
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
new file mode 100644
index 000000000000..aa47754150ce
--- /dev/null
+++ b/lib/test_xarray.c
@@ -0,0 +1,1238 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_xarray.c: Test the XArray API
+ * Copyright (c) 2017-2018 Microsoft Corporation
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/xarray.h>
+#include <linux/module.h>
+
+static unsigned int tests_run;
+static unsigned int tests_passed;
+
+#ifndef XA_DEBUG
+# ifdef __KERNEL__
+void xa_dump(const struct xarray *xa) { }
+# endif
+#undef XA_BUG_ON
+#define XA_BUG_ON(xa, x) do { \
+ tests_run++; \
+ if (x) { \
+ printk("BUG at %s:%d\n", __func__, __LINE__); \
+ xa_dump(xa); \
+ dump_stack(); \
+ } else { \
+ tests_passed++; \
+ } \
+} while (0)
+#endif
+
+static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp);
+}
+
+static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ u32 id = 0;
+
+ XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_value(index & LONG_MAX),
+ gfp) != 0);
+ XA_BUG_ON(xa, id != index);
+}
+
+static void xa_erase_index(struct xarray *xa, unsigned long index)
+{
+ XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX));
+ XA_BUG_ON(xa, xa_load(xa, index) != NULL);
+}
+
+/*
+ * If anyone needs this, please move it to xarray.c. We have no current
+ * users outside the test suite because all current multislot users want
+ * to use the advanced API.
+ */
+static void *xa_store_order(struct xarray *xa, unsigned long index,
+ unsigned order, void *entry, gfp_t gfp)
+{
+ XA_STATE_ORDER(xas, xa, index, order);
+ void *curr;
+
+ do {
+ xas_lock(&xas);
+ curr = xas_store(&xas, entry);
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, gfp));
+
+ return curr;
+}
+
+static noinline void check_xa_err(struct xarray *xa)
+{
+ XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
+ XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
+#ifndef __KERNEL__
+ /* The kernel does not fail GFP_NOWAIT allocations */
+ XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
+ XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
+#endif
+ XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
+ XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
+ XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
+// kills the test-suite :-(
+// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
+}
+
+static noinline void check_xas_retry(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+
+ xa_store_index(xa, 0, GFP_KERNEL);
+ xa_store_index(xa, 1, GFP_KERNEL);
+
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
+ xa_erase_index(xa, 1);
+ XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
+ XA_BUG_ON(xa, xas_retry(&xas, NULL));
+ XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
+ xas_reset(&xas);
+ XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
+ XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
+ XA_BUG_ON(xa, xas.xa_node != NULL);
+
+ XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
+ xas.xa_node = XAS_RESTART;
+ XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
+ rcu_read_unlock();
+
+ /* Make sure we can iterate through retry entries */
+ xas_lock(&xas);
+ xas_set(&xas, 0);
+ xas_store(&xas, XA_RETRY_ENTRY);
+ xas_set(&xas, 1);
+ xas_store(&xas, XA_RETRY_ENTRY);
+
+ xas_set(&xas, 0);
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ xas_store(&xas, xa_mk_value(xas.xa_index));
+ }
+ xas_unlock(&xas);
+
+ xa_erase_index(xa, 0);
+ xa_erase_index(xa, 1);
+}
+
+static noinline void check_xa_load(struct xarray *xa)
+{
+ unsigned long i, j;
+
+ for (i = 0; i < 1024; i++) {
+ for (j = 0; j < 1024; j++) {
+ void *entry = xa_load(xa, j);
+ if (j < i)
+ XA_BUG_ON(xa, xa_to_value(entry) != j);
+ else
+ XA_BUG_ON(xa, entry);
+ }
+ XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
+ }
+
+ for (i = 0; i < 1024; i++) {
+ for (j = 0; j < 1024; j++) {
+ void *entry = xa_load(xa, j);
+ if (j >= i)
+ XA_BUG_ON(xa, xa_to_value(entry) != j);
+ else
+ XA_BUG_ON(xa, entry);
+ }
+ xa_erase_index(xa, i);
+ }
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
+{
+ unsigned int order;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
+
+ /* NULL elements have no marks set */
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+
+ /* Storing a pointer will not make a mark appear */
+ XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+
+ /* Setting one mark will not set another mark */
+ XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
+
+ /* Storing NULL clears marks, and they can't be set again */
+ xa_erase_index(xa, index);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+
+ /*
+ * Storing a multi-index entry over entries with marks gives the
+ * entire entry the union of the marks
+ */
+ BUG_ON((index % 4) != 0);
+ for (order = 2; order < max_order; order++) {
+ unsigned long base = round_down(index, 1UL << order);
+ unsigned long next = base + (1UL << order);
+ unsigned long i;
+
+ XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
+ xa_set_mark(xa, index + 1, XA_MARK_0);
+ XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
+ xa_set_mark(xa, index + 2, XA_MARK_1);
+ XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
+ xa_store_order(xa, index, order, xa_mk_value(index),
+ GFP_KERNEL);
+ for (i = base; i < next; i++) {
+ XA_STATE(xas, xa, i);
+ unsigned int seen = 0;
+ void *entry;
+
+ XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
+ XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1));
+ XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
+
+ /* We should see two elements in the array */
+ xas_for_each(&xas, entry, ULONG_MAX)
+ seen++;
+ XA_BUG_ON(xa, seen != 2);
+
+ /* One of which is marked */
+ xas_set(&xas, 0);
+ seen = 0;
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
+ seen++;
+ XA_BUG_ON(xa, seen != 1);
+ }
+ XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
+ XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
+ XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
+ xa_erase_index(xa, index);
+ xa_erase_index(xa, next);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_xa_mark_2(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned long index;
+ unsigned int count = 0;
+ void *entry;
+
+ xa_store_index(xa, 0, GFP_KERNEL);
+ xa_set_mark(xa, 0, XA_MARK_0);
+ xas_lock(&xas);
+ xas_load(&xas);
+ xas_init_marks(&xas);
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
+
+ for (index = 3500; index < 4500; index++) {
+ xa_store_index(xa, index, GFP_KERNEL);
+ xa_set_mark(xa, index, XA_MARK_0);
+ }
+
+ xas_reset(&xas);
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
+ count++;
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != 1000);
+
+ xas_lock(&xas);
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ xas_init_marks(&xas);
+ XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
+ XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
+ }
+ xas_unlock(&xas);
+
+ xa_destroy(xa);
+}
+
+static noinline void check_xa_mark(struct xarray *xa)
+{
+ unsigned long index;
+
+ for (index = 0; index < 16384; index += 4)
+ check_xa_mark_1(xa, index);
+
+ check_xa_mark_2(xa);
+}
+
+static noinline void check_xa_shrink(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 1);
+ struct xa_node *node;
+ unsigned int order;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
+
+ /*
+ * Check that erasing the entry at 1 shrinks the tree and properly
+ * marks the node as being deleted.
+ */
+ xas_lock(&xas);
+ XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
+ node = xas.xa_node;
+ XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
+ XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
+ XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
+ XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
+ XA_BUG_ON(xa, xas_load(&xas) != NULL);
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
+ xa_erase_index(xa, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (order = 0; order < max_order; order++) {
+ unsigned long max = (1UL << order) - 1;
+ xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
+ rcu_read_lock();
+ node = xa_head(xa);
+ rcu_read_unlock();
+ XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
+ NULL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xa_head(xa) == node);
+ rcu_read_unlock();
+ XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
+ xa_erase_index(xa, ULONG_MAX);
+ XA_BUG_ON(xa, xa->xa_head != node);
+ xa_erase_index(xa, 0);
+ }
+}
+
+static noinline void check_cmpxchg(struct xarray *xa)
+{
+ void *FIVE = xa_mk_value(5);
+ void *SIX = xa_mk_value(6);
+ void *LOTS = xa_mk_value(12345678);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EEXIST);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
+ xa_erase_index(xa, 12345678);
+ xa_erase_index(xa, 5);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_reserve(struct xarray *xa)
+{
+ void *entry;
+ unsigned long index = 0;
+
+ /* An array with a reserved entry is not empty */
+ XA_BUG_ON(xa, !xa_empty(xa));
+ xa_reserve(xa, 12345678, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ XA_BUG_ON(xa, xa_load(xa, 12345678));
+ xa_release(xa, 12345678);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Releasing a used entry does nothing */
+ xa_reserve(xa, 12345678, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
+ xa_release(xa, 12345678);
+ xa_erase_index(xa, 12345678);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* cmpxchg sees a reserved entry as NULL */
+ xa_reserve(xa, 12345678, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678),
+ GFP_NOWAIT) != NULL);
+ xa_release(xa, 12345678);
+ xa_erase_index(xa, 12345678);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Can iterate through a reserved entry */
+ xa_store_index(xa, 5, GFP_KERNEL);
+ xa_reserve(xa, 6, GFP_KERNEL);
+ xa_store_index(xa, 7, GFP_KERNEL);
+
+ xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ XA_BUG_ON(xa, index != 5 && index != 7);
+ }
+ xa_destroy(xa);
+}
+
+static noinline void check_xas_erase(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+ unsigned long i, j;
+
+ for (i = 0; i < 200; i++) {
+ for (j = i; j < 2 * i + 17; j++) {
+ xas_set(&xas, j);
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(j));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+ }
+
+ xas_set(&xas, ULONG_MAX);
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(0));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ xas_lock(&xas);
+ xas_store(&xas, NULL);
+
+ xas_set(&xas, 0);
+ j = i;
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_value(j));
+ xas_store(&xas, NULL);
+ j++;
+ }
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+}
+
+#ifdef CONFIG_XARRAY_MULTI
+static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, index);
+ unsigned long min = index & ~((1UL << order) - 1);
+ unsigned long max = min + (1UL << order);
+
+ xa_store_order(xa, index, order, xa_mk_value(index), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(index));
+ XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(index));
+ XA_BUG_ON(xa, xa_load(xa, max) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
+
+ XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index));
+ XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min));
+ XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min));
+ XA_BUG_ON(xa, xa_load(xa, max) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
+
+ xa_erase_index(xa, min);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, index);
+ xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
+
+ XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
+ XA_BUG_ON(xa, xas.xa_index != index);
+ XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+#endif
+
+static noinline void check_multi_store(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned long i, j, k;
+ unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
+
+ /* Loading from any position returns the same value */
+ xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
+ rcu_read_unlock();
+
+ /* Storing adjacent to the value does not alter the value */
+ xa_store(xa, 3, xa, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
+ rcu_read_unlock();
+
+ /* Overwriting multiple indexes works */
+ xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
+ rcu_read_unlock();
+
+ /* We can erase multiple values with a single store */
+ xa_store_order(xa, 0, 63, NULL, GFP_KERNEL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Even when the first slot is empty but the others aren't */
+ xa_store_index(xa, 1, GFP_KERNEL);
+ xa_store_index(xa, 2, GFP_KERNEL);
+ xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (i = 0; i < max_order; i++) {
+ for (j = 0; j < max_order; j++) {
+ xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL);
+ xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL);
+
+ for (k = 0; k < max_order; k++) {
+ void *entry = xa_load(xa, (1UL << k) - 1);
+ if ((i < k) && (j < k))
+ XA_BUG_ON(xa, entry != NULL);
+ else
+ XA_BUG_ON(xa, entry != xa_mk_value(j));
+ }
+
+ xa_erase(xa, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+ }
+
+ for (i = 0; i < 20; i++) {
+ check_multi_store_1(xa, 200, i);
+ check_multi_store_1(xa, 0, i);
+ check_multi_store_1(xa, (1UL << i) + 1, i);
+ }
+ check_multi_store_2(xa, 4095, 9);
+#endif
+}
+
+static DEFINE_XARRAY_ALLOC(xa0);
+
+static noinline void check_xa_alloc(void)
+{
+ int i;
+ u32 id;
+
+ /* An empty array should assign 0 to the first alloc */
+ xa_alloc_index(&xa0, 0, GFP_KERNEL);
+
+ /* Erasing it should make the array empty again */
+ xa_erase_index(&xa0, 0);
+ XA_BUG_ON(&xa0, !xa_empty(&xa0));
+
+ /* And it should assign 0 again */
+ xa_alloc_index(&xa0, 0, GFP_KERNEL);
+
+ /* The next assigned ID should be 1 */
+ xa_alloc_index(&xa0, 1, GFP_KERNEL);
+ xa_erase_index(&xa0, 1);
+
+ /* Storing a value should mark it used */
+ xa_store_index(&xa0, 1, GFP_KERNEL);
+ xa_alloc_index(&xa0, 2, GFP_KERNEL);
+
+ /* If we then erase 0, it should be free */
+ xa_erase_index(&xa0, 0);
+ xa_alloc_index(&xa0, 0, GFP_KERNEL);
+
+ xa_erase_index(&xa0, 1);
+ xa_erase_index(&xa0, 2);
+
+ for (i = 1; i < 5000; i++) {
+ xa_alloc_index(&xa0, i, GFP_KERNEL);
+ }
+
+ xa_destroy(&xa0);
+
+ id = 0xfffffffeU;
+ XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+ GFP_KERNEL) != 0);
+ XA_BUG_ON(&xa0, id != 0xfffffffeU);
+ XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+ GFP_KERNEL) != 0);
+ XA_BUG_ON(&xa0, id != 0xffffffffU);
+ XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+ GFP_KERNEL) != -ENOSPC);
+ XA_BUG_ON(&xa0, id != 0xffffffffU);
+ xa_destroy(&xa0);
+}
+
+static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
+ unsigned int order, unsigned int present)
+{
+ XA_STATE_ORDER(xas, xa, start, order);
+ void *entry;
+ unsigned int count = 0;
+
+retry:
+ xas_lock(&xas);
+ xas_for_each_conflict(&xas, entry) {
+ XA_BUG_ON(xa, !xa_is_value(entry));
+ XA_BUG_ON(xa, entry < xa_mk_value(start));
+ XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1));
+ count++;
+ }
+ xas_store(&xas, xa_mk_value(start));
+ xas_unlock(&xas);
+ if (xas_nomem(&xas, GFP_KERNEL)) {
+ count = 0;
+ goto retry;
+ }
+ XA_BUG_ON(xa, xas_error(&xas));
+ XA_BUG_ON(xa, count != present);
+ XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start));
+ XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
+ xa_mk_value(start));
+ xa_erase_index(xa, start);
+}
+
+static noinline void check_store_iter(struct xarray *xa)
+{
+ unsigned int i, j;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+
+ for (i = 0; i < max_order; i++) {
+ unsigned int min = 1 << i;
+ unsigned int max = (2 << i) - 1;
+ __check_store_iter(xa, 0, i, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ __check_store_iter(xa, min, i, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ xa_store_index(xa, min, GFP_KERNEL);
+ __check_store_iter(xa, min, i, 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ xa_store_index(xa, max, GFP_KERNEL);
+ __check_store_iter(xa, min, i, 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (j = 0; j < min; j++)
+ xa_store_index(xa, j, GFP_KERNEL);
+ __check_store_iter(xa, 0, i, min);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ for (j = 0; j < min; j++)
+ xa_store_index(xa, min + j, GFP_KERNEL);
+ __check_store_iter(xa, min, i, min);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+#ifdef CONFIG_XARRAY_MULTI
+ xa_store_index(xa, 63, GFP_KERNEL);
+ xa_store_index(xa, 65, GFP_KERNEL);
+ __check_store_iter(xa, 64, 2, 1);
+ xa_erase_index(xa, 63);
+#endif
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_multi_find(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned long index;
+
+ xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
+
+ index = 0;
+ XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
+ xa_mk_value(12));
+ XA_BUG_ON(xa, index != 12);
+ index = 13;
+ XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
+ xa_mk_value(12));
+ XA_BUG_ON(xa, (index < 12) || (index >= 16));
+ XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
+ xa_mk_value(16));
+ XA_BUG_ON(xa, index != 16);
+
+ xa_erase_index(xa, 12);
+ xa_erase_index(xa, 16);
+ XA_BUG_ON(xa, !xa_empty(xa));
+#endif
+}
+
+static noinline void check_multi_find_2(struct xarray *xa)
+{
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
+ unsigned int i, j;
+ void *entry;
+
+ for (i = 0; i < max_order; i++) {
+ unsigned long index = 1UL << i;
+ for (j = 0; j < index; j++) {
+ XA_STATE(xas, xa, j + index);
+ xa_store_index(xa, index - 1, GFP_KERNEL);
+ xa_store_order(xa, index, i, xa_mk_value(index),
+ GFP_KERNEL);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ xa_erase_index(xa, index);
+ }
+ rcu_read_unlock();
+ xa_erase_index(xa, index - 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+ }
+}
+
+static noinline void check_find(struct xarray *xa)
+{
+ unsigned long i, j, k;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /*
+ * Check xa_find with all pairs between 0 and 99 inclusive,
+ * starting at every index between 0 and 99
+ */
+ for (i = 0; i < 100; i++) {
+ XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
+ xa_set_mark(xa, i, XA_MARK_0);
+ for (j = 0; j < i; j++) {
+ XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
+ NULL);
+ xa_set_mark(xa, j, XA_MARK_0);
+ for (k = 0; k < 100; k++) {
+ unsigned long index = k;
+ void *entry = xa_find(xa, &index, ULONG_MAX,
+ XA_PRESENT);
+ if (k <= j)
+ XA_BUG_ON(xa, index != j);
+ else if (k <= i)
+ XA_BUG_ON(xa, index != i);
+ else
+ XA_BUG_ON(xa, entry != NULL);
+
+ index = k;
+ entry = xa_find(xa, &index, ULONG_MAX,
+ XA_MARK_0);
+ if (k <= j)
+ XA_BUG_ON(xa, index != j);
+ else if (k <= i)
+ XA_BUG_ON(xa, index != i);
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ }
+ xa_erase_index(xa, j);
+ XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
+ XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
+ }
+ xa_erase_index(xa, i);
+ XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
+ }
+ XA_BUG_ON(xa, !xa_empty(xa));
+ check_multi_find(xa);
+ check_multi_find_2(xa);
+}
+
+/* See find_swap_entry() in mm/shmem.c */
+static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned int checked = 0;
+ void *entry;
+
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ if (xas_retry(&xas, entry))
+ continue;
+ if (entry == item)
+ break;
+ checked++;
+ if ((checked % 4) != 0)
+ continue;
+ xas_pause(&xas);
+ }
+ rcu_read_unlock();
+
+ return entry ? xas.xa_index : -1;
+}
+
+static noinline void check_find_entry(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned int order;
+ unsigned long offset, index;
+
+ for (order = 0; order < 20; order++) {
+ for (offset = 0; offset < (1UL << (order + 3));
+ offset += (1UL << order)) {
+ for (index = 0; index < (1UL << (order + 5));
+ index += (1UL << order)) {
+ xa_store_order(xa, index, order,
+ xa_mk_value(index), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, index) !=
+ xa_mk_value(index));
+ XA_BUG_ON(xa, xa_find_entry(xa,
+ xa_mk_value(index)) != index);
+ }
+ XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
+ xa_destroy(xa);
+ }
+ }
+#endif
+
+ XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
+ XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_value(LONG_MAX)) != -1);
+ xa_erase_index(xa, ULONG_MAX);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_move_small(struct xarray *xa, unsigned long idx)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned long i;
+
+ xa_store_index(xa, 0, GFP_KERNEL);
+ xa_store_index(xa, idx, GFP_KERNEL);
+
+ rcu_read_lock();
+ for (i = 0; i < idx * 4; i++) {
+ void *entry = xas_next(&xas);
+ if (i <= idx)
+ XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
+ XA_BUG_ON(xa, xas.xa_index != i);
+ if (i == 0 || i == idx)
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ }
+ xas_next(&xas);
+ XA_BUG_ON(xa, xas.xa_index != i);
+
+ do {
+ void *entry = xas_prev(&xas);
+ i--;
+ if (i <= idx)
+ XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
+ XA_BUG_ON(xa, xas.xa_index != i);
+ if (i == 0 || i == idx)
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ } while (i > 0);
+
+ xas_set(&xas, ULONG_MAX);
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
+ XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
+ XA_BUG_ON(xa, xas.xa_index != 0);
+ XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+ XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
+ rcu_read_unlock();
+
+ xa_erase_index(xa, 0);
+ xa_erase_index(xa, idx);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_move(struct xarray *xa)
+{
+ XA_STATE(xas, xa, (1 << 16) - 1);
+ unsigned long i;
+
+ for (i = 0; i < (1 << 16); i++)
+ XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
+
+ rcu_read_lock();
+ do {
+ void *entry = xas_prev(&xas);
+ i--;
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, i != xas.xa_index);
+ } while (i != 0);
+
+ XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+ XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
+
+ do {
+ void *entry = xas_next(&xas);
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, i != xas.xa_index);
+ i++;
+ } while (i < (1 << 16));
+ rcu_read_unlock();
+
+ for (i = (1 << 8); i < (1 << 15); i++)
+ xa_erase_index(xa, i);
+
+ i = xas.xa_index;
+
+ rcu_read_lock();
+ do {
+ void *entry = xas_prev(&xas);
+ i--;
+ if ((i < (1 << 8)) || (i >= (1 << 15)))
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ XA_BUG_ON(xa, i != xas.xa_index);
+ } while (i != 0);
+
+ XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+ XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
+
+ do {
+ void *entry = xas_next(&xas);
+ if ((i < (1 << 8)) || (i >= (1 << 15)))
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ XA_BUG_ON(xa, i != xas.xa_index);
+ i++;
+ } while (i < (1 << 16));
+ rcu_read_unlock();
+
+ xa_destroy(xa);
+
+ for (i = 0; i < 16; i++)
+ check_move_small(xa, 1UL << i);
+
+ for (i = 2; i < 16; i++)
+ check_move_small(xa, (1UL << i) - 1);
+}
+
+static noinline void xa_store_many_order(struct xarray *xa,
+ unsigned long index, unsigned order)
+{
+ XA_STATE_ORDER(xas, xa, index, order);
+ unsigned int i = 0;
+
+ do {
+ xas_lock(&xas);
+ XA_BUG_ON(xa, xas_find_conflict(&xas));
+ xas_create_range(&xas);
+ if (xas_error(&xas))
+ goto unlock;
+ for (i = 0; i < (1U << order); i++) {
+ XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(index + i)));
+ xas_next(&xas);
+ }
+unlock:
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, xas_error(&xas));
+}
+
+static noinline void check_create_range_1(struct xarray *xa,
+ unsigned long index, unsigned order)
+{
+ unsigned long i;
+
+ xa_store_many_order(xa, index, order);
+ for (i = index; i < index + (1UL << order); i++)
+ xa_erase_index(xa, i);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_create_range_2(struct xarray *xa, unsigned order)
+{
+ unsigned long i;
+ unsigned long nr = 1UL << order;
+
+ for (i = 0; i < nr * nr; i += nr)
+ xa_store_many_order(xa, i, order);
+ for (i = 0; i < nr * nr; i++)
+ xa_erase_index(xa, i);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_create_range_3(void)
+{
+ XA_STATE(xas, NULL, 0);
+ xas_set_err(&xas, -EEXIST);
+ xas_create_range(&xas);
+ XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
+}
+
+static noinline void check_create_range_4(struct xarray *xa,
+ unsigned long index, unsigned order)
+{
+ XA_STATE_ORDER(xas, xa, index, order);
+ unsigned long base = xas.xa_index;
+ unsigned long i = 0;
+
+ xa_store_index(xa, index, GFP_KERNEL);
+ do {
+ xas_lock(&xas);
+ xas_create_range(&xas);
+ if (xas_error(&xas))
+ goto unlock;
+ for (i = 0; i < (1UL << order); i++) {
+ void *old = xas_store(&xas, xa_mk_value(base + i));
+ if (xas.xa_index == index)
+ XA_BUG_ON(xa, old != xa_mk_value(base + i));
+ else
+ XA_BUG_ON(xa, old != NULL);
+ xas_next(&xas);
+ }
+unlock:
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, xas_error(&xas));
+
+ for (i = base; i < base + (1UL << order); i++)
+ xa_erase_index(xa, i);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_create_range(struct xarray *xa)
+{
+ unsigned int order;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
+
+ for (order = 0; order < max_order; order++) {
+ check_create_range_1(xa, 0, order);
+ check_create_range_1(xa, 1U << order, order);
+ check_create_range_1(xa, 2U << order, order);
+ check_create_range_1(xa, 3U << order, order);
+ check_create_range_1(xa, 1U << 24, order);
+ if (order < 10)
+ check_create_range_2(xa, order);
+
+ check_create_range_4(xa, 0, order);
+ check_create_range_4(xa, 1U << order, order);
+ check_create_range_4(xa, 2U << order, order);
+ check_create_range_4(xa, 3U << order, order);
+ check_create_range_4(xa, 1U << 24, order);
+
+ check_create_range_4(xa, 1, order);
+ check_create_range_4(xa, (1U << order) + 1, order);
+ check_create_range_4(xa, (2U << order) + 1, order);
+ check_create_range_4(xa, (2U << order) - 1, order);
+ check_create_range_4(xa, (3U << order) + 1, order);
+ check_create_range_4(xa, (3U << order) - 1, order);
+ check_create_range_4(xa, (1U << 24) + 1, order);
+ }
+
+ check_create_range_3();
+}
+
+static noinline void __check_store_range(struct xarray *xa, unsigned long first,
+ unsigned long last)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ xa_store_range(xa, first, last, xa_mk_value(first), GFP_KERNEL);
+
+ XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_value(first));
+ XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_value(first));
+ XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
+
+ xa_store_range(xa, first, last, NULL, GFP_KERNEL);
+#endif
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_store_range(struct xarray *xa)
+{
+ unsigned long i, j;
+
+ for (i = 0; i < 128; i++) {
+ for (j = i; j < 128; j++) {
+ __check_store_range(xa, i, j);
+ __check_store_range(xa, 128 + i, 128 + j);
+ __check_store_range(xa, 4095 + i, 4095 + j);
+ __check_store_range(xa, 4096 + i, 4096 + j);
+ __check_store_range(xa, 123456 + i, 123456 + j);
+ __check_store_range(xa, UINT_MAX + i, UINT_MAX + j);
+ }
+ }
+}
+
+static LIST_HEAD(shadow_nodes);
+
+static void test_update_node(struct xa_node *node)
+{
+ if (node->count && node->count == node->nr_values) {
+ if (list_empty(&node->private_list))
+ list_add(&shadow_nodes, &node->private_list);
+ } else {
+ if (!list_empty(&node->private_list))
+ list_del_init(&node->private_list);
+ }
+}
+
+static noinline void shadow_remove(struct xarray *xa)
+{
+ struct xa_node *node;
+
+ xa_lock(xa);
+ while ((node = list_first_entry_or_null(&shadow_nodes,
+ struct xa_node, private_list))) {
+ XA_STATE(xas, node->array, 0);
+ XA_BUG_ON(xa, node->array != xa);
+ list_del_init(&node->private_list);
+ xas.xa_node = xa_parent_locked(node->array, node);
+ xas.xa_offset = node->offset;
+ xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
+ xas_set_update(&xas, test_update_node);
+ xas_store(&xas, NULL);
+ }
+ xa_unlock(xa);
+}
+
+static noinline void check_workingset(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ xas_set_update(&xas, test_update_node);
+
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(0));
+ xas_next(&xas);
+ xas_store(&xas, xa_mk_value(1));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, list_empty(&shadow_nodes));
+
+ xas_lock(&xas);
+ xas_next(&xas);
+ xas_store(&xas, &xas);
+ XA_BUG_ON(xa, !list_empty(&shadow_nodes));
+
+ xas_store(&xas, xa_mk_value(2));
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, list_empty(&shadow_nodes));
+
+ shadow_remove(xa);
+ XA_BUG_ON(xa, !list_empty(&shadow_nodes));
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+/*
+ * Check that the pointer / value / sibling entries are accounted the
+ * way we expect them to be.
+ */
+static noinline void check_account(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned int order;
+
+ for (order = 1; order < 12; order++) {
+ XA_STATE(xas, xa, 1 << order);
+
+ xa_store_order(xa, 0, order, xa, GFP_KERNEL);
+ xas_load(&xas);
+ XA_BUG_ON(xa, xas.xa_node->count == 0);
+ XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
+ XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
+
+ xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order),
+ GFP_KERNEL);
+ XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
+
+ xa_erase(xa, 1 << order);
+ XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
+
+ xa_erase(xa, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+#endif
+}
+
+static noinline void check_destroy(struct xarray *xa)
+{
+ unsigned long index;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Destroying an empty array is a no-op */
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Destroying an array with a single entry */
+ for (index = 0; index < 1000; index++) {
+ xa_store_index(xa, index, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+
+ /* Destroying an array with a single entry at ULONG_MAX */
+ xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+#ifdef CONFIG_XARRAY_MULTI
+ /* Destroying an array with a multi-index entry */
+ xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+#endif
+}
+
+static DEFINE_XARRAY(array);
+
+static int xarray_checks(void)
+{
+ check_xa_err(&array);
+ check_xas_retry(&array);
+ check_xa_load(&array);
+ check_xa_mark(&array);
+ check_xa_shrink(&array);
+ check_xas_erase(&array);
+ check_cmpxchg(&array);
+ check_reserve(&array);
+ check_multi_store(&array);
+ check_xa_alloc();
+ check_find(&array);
+ check_find_entry(&array);
+ check_account(&array);
+ check_destroy(&array);
+ check_move(&array);
+ check_create_range(&array);
+ check_store_range(&array);
+ check_store_iter(&array);
+
+ check_workingset(&array, 0);
+ check_workingset(&array, 64);
+ check_workingset(&array, 4096);
+
+ printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
+ return (tests_run == tests_passed) ? 0 : -EINVAL;
+}
+
+static void xarray_exit(void)
+{
+}
+
+module_init(xarray_checks);
+module_exit(xarray_exit);
+MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index a48aaa79d352..37a54a6dd594 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -613,6 +613,109 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec)
}
static noinline_for_stack
+char *pointer_string(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ spec.base = 16;
+ spec.flags |= SMALL;
+ if (spec.field_width == -1) {
+ spec.field_width = 2 * sizeof(ptr);
+ spec.flags |= ZEROPAD;
+ }
+
+ return number(buf, end, (unsigned long int)ptr, spec);
+}
+
+/* Make pointers available for printing early in the boot sequence. */
+static int debug_boot_weak_hash __ro_after_init;
+
+static int __init debug_boot_weak_hash_enable(char *str)
+{
+ debug_boot_weak_hash = 1;
+ pr_info("debug_boot_weak_hash enabled\n");
+ return 0;
+}
+early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
+
+static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
+static siphash_key_t ptr_key __read_mostly;
+
+static void enable_ptr_key_workfn(struct work_struct *work)
+{
+ get_random_bytes(&ptr_key, sizeof(ptr_key));
+ /* Needs to run from preemptible context */
+ static_branch_disable(&not_filled_random_ptr_key);
+}
+
+static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
+
+static void fill_random_ptr_key(struct random_ready_callback *unused)
+{
+ /* This may be in an interrupt handler. */
+ queue_work(system_unbound_wq, &enable_ptr_key_work);
+}
+
+static struct random_ready_callback random_ready = {
+ .func = fill_random_ptr_key
+};
+
+static int __init initialize_ptr_random(void)
+{
+ int key_size = sizeof(ptr_key);
+ int ret;
+
+ /* Use hw RNG if available. */
+ if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
+ static_branch_disable(&not_filled_random_ptr_key);
+ return 0;
+ }
+
+ ret = add_random_ready_callback(&random_ready);
+ if (!ret) {
+ return 0;
+ } else if (ret == -EALREADY) {
+ /* This is in preemptible context */
+ enable_ptr_key_workfn(&enable_ptr_key_work);
+ return 0;
+ }
+
+ return ret;
+}
+early_initcall(initialize_ptr_random);
+
+/* Maps a pointer to a 32 bit unique identifier. */
+static char *ptr_to_id(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
+ unsigned long hashval;
+
+ /* When debugging early boot use non-cryptographically secure hash. */
+ if (unlikely(debug_boot_weak_hash)) {
+ hashval = hash_long((unsigned long)ptr, 32);
+ return pointer_string(buf, end, (const void *)hashval, spec);
+ }
+
+ if (static_branch_unlikely(&not_filled_random_ptr_key)) {
+ spec.field_width = 2 * sizeof(ptr);
+ /* string length must be less than default_width */
+ return string(buf, end, str, spec);
+ }
+
+#ifdef CONFIG_64BIT
+ hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+ /*
+ * Mask off the first 32 bits, this makes explicit that we have
+ * modified the address (and 32 bits is plenty for a unique ID).
+ */
+ hashval = hashval & 0xffffffff;
+#else
+ hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+ return pointer_string(buf, end, (const void *)hashval, spec);
+}
+
+static noinline_for_stack
char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec,
const char *fmt)
{
@@ -1357,20 +1460,6 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
return string(buf, end, uuid, spec);
}
-static noinline_for_stack
-char *pointer_string(char *buf, char *end, const void *ptr,
- struct printf_spec spec)
-{
- spec.base = 16;
- spec.flags |= SMALL;
- if (spec.field_width == -1) {
- spec.field_width = 2 * sizeof(ptr);
- spec.flags |= ZEROPAD;
- }
-
- return number(buf, end, (unsigned long int)ptr, spec);
-}
-
int kptr_restrict __read_mostly;
static noinline_for_stack
@@ -1421,7 +1510,8 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
}
static noinline_for_stack
-char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
+char *netdev_bits(char *buf, char *end, const void *addr,
+ struct printf_spec spec, const char *fmt)
{
unsigned long long num;
int size;
@@ -1432,9 +1522,7 @@ char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
size = sizeof(netdev_features_t);
break;
default:
- num = (unsigned long)addr;
- size = sizeof(unsigned long);
- break;
+ return ptr_to_id(buf, end, addr, spec);
}
return special_hex_number(buf, end, num, size);
@@ -1474,7 +1562,7 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
#ifdef CONFIG_COMMON_CLK
return string(buf, end, __clk_get_name(clk), spec);
#else
- return special_hex_number(buf, end, (unsigned long)clk, sizeof(unsigned long));
+ return ptr_to_id(buf, end, clk, spec);
#endif
}
}
@@ -1596,6 +1684,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
fmt = "f";
for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) {
+ int precision;
if (pass) {
if (buf < end)
*buf = ':';
@@ -1607,7 +1696,11 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
buf = device_node_gen_full_name(dn, buf, end);
break;
case 'n': /* name */
- buf = string(buf, end, dn->name, str_spec);
+ p = kbasename(of_node_full_name(dn));
+ precision = str_spec.precision;
+ str_spec.precision = strchrnul(p, '@') - p;
+ buf = string(buf, end, p, str_spec);
+ str_spec.precision = precision;
break;
case 'p': /* phandle */
buf = number(buf, end, (unsigned int)dn->phandle, num_spec);
@@ -1651,69 +1744,6 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
return widen_string(buf, buf - buf_start, end, spec);
}
-static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
-static siphash_key_t ptr_key __read_mostly;
-
-static void enable_ptr_key_workfn(struct work_struct *work)
-{
- get_random_bytes(&ptr_key, sizeof(ptr_key));
- /* Needs to run from preemptible context */
- static_branch_disable(&not_filled_random_ptr_key);
-}
-
-static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
-
-static void fill_random_ptr_key(struct random_ready_callback *unused)
-{
- /* This may be in an interrupt handler. */
- queue_work(system_unbound_wq, &enable_ptr_key_work);
-}
-
-static struct random_ready_callback random_ready = {
- .func = fill_random_ptr_key
-};
-
-static int __init initialize_ptr_random(void)
-{
- int ret = add_random_ready_callback(&random_ready);
-
- if (!ret) {
- return 0;
- } else if (ret == -EALREADY) {
- /* This is in preemptible context */
- enable_ptr_key_workfn(&enable_ptr_key_work);
- return 0;
- }
-
- return ret;
-}
-early_initcall(initialize_ptr_random);
-
-/* Maps a pointer to a 32 bit unique identifier. */
-static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
-{
- const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
- unsigned long hashval;
-
- if (static_branch_unlikely(&not_filled_random_ptr_key)) {
- spec.field_width = 2 * sizeof(ptr);
- /* string length must be less than default_width */
- return string(buf, end, str, spec);
- }
-
-#ifdef CONFIG_64BIT
- hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
- /*
- * Mask off the first 32 bits, this makes explicit that we have
- * modified the address (and 32 bits is plenty for a unique ID).
- */
- hashval = hashval & 0xffffffff;
-#else
- hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
-#endif
- return pointer_string(buf, end, (const void *)hashval, spec);
-}
-
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -1808,17 +1838,15 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
* p page flags (see struct page) given as pointer to unsigned long
* g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t
* v vma flags (VM_*) given as pointer to unsigned long
- * - 'O' For a kobject based struct. Must be one of the following:
- * - 'OF[fnpPcCF]' For a device tree object
- * Without any optional arguments prints the full_name
- * f device node full_name
- * n device node name
- * p device node phandle
- * P device node path spec (name + @unit)
- * F device node flags
- * c major compatible string
- * C full compatible string
- *
+ * - 'OF[fnpPcCF]' For a device tree object
+ * Without any optional arguments prints the full_name
+ * f device node full_name
+ * n device node name
+ * p device node phandle
+ * P device node path spec (name + @unit)
+ * F device node flags
+ * c major compatible string
+ * C full compatible string
* - 'x' For printing the address. Equivalent to "%lx".
*
* ** When making changes please also update:
@@ -1919,7 +1947,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
break;
return restricted_pointer(buf, end, ptr, spec);
case 'N':
- return netdev_bits(buf, end, ptr, fmt);
+ return netdev_bits(buf, end, ptr, spec, fmt);
case 'a':
return address_val(buf, end, ptr, fmt);
case 'd':
@@ -1942,6 +1970,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'F':
return device_node_string(buf, end, ptr, spec, fmt + 1);
}
+ break;
case 'x':
return pointer_string(buf, end, ptr, spec);
}
@@ -2768,7 +2797,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
copy = end - str;
memcpy(str, args, copy);
str += len;
- args += len;
+ args += len + 1;
}
}
if (process)
diff --git a/lib/xarray.c b/lib/xarray.c
new file mode 100644
index 000000000000..8b176f009c08
--- /dev/null
+++ b/lib/xarray.c
@@ -0,0 +1,2036 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * XArray implementation
+ * Copyright (c) 2017 Microsoft Corporation
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/bitmap.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+/*
+ * Coding conventions in this file:
+ *
+ * @xa is used to refer to the entire xarray.
+ * @xas is the 'xarray operation state'. It may be either a pointer to
+ * an xa_state, or an xa_state stored on the stack. This is an unfortunate
+ * ambiguity.
+ * @index is the index of the entry being operated on
+ * @mark is an xa_mark_t; a small number indicating one of the mark bits.
+ * @node refers to an xa_node; usually the primary one being operated on by
+ * this function.
+ * @offset is the index into the slots array inside an xa_node.
+ * @parent refers to the @xa_node closer to the head than @node.
+ * @entry refers to something stored in a slot in the xarray
+ */
+
+static inline unsigned int xa_lock_type(const struct xarray *xa)
+{
+ return (__force unsigned int)xa->xa_flags & 3;
+}
+
+static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
+{
+ if (lock_type == XA_LOCK_IRQ)
+ xas_lock_irq(xas);
+ else if (lock_type == XA_LOCK_BH)
+ xas_lock_bh(xas);
+ else
+ xas_lock(xas);
+}
+
+static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
+{
+ if (lock_type == XA_LOCK_IRQ)
+ xas_unlock_irq(xas);
+ else if (lock_type == XA_LOCK_BH)
+ xas_unlock_bh(xas);
+ else
+ xas_unlock(xas);
+}
+
+static inline bool xa_track_free(const struct xarray *xa)
+{
+ return xa->xa_flags & XA_FLAGS_TRACK_FREE;
+}
+
+static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
+{
+ if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
+ xa->xa_flags |= XA_FLAGS_MARK(mark);
+}
+
+static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
+{
+ if (xa->xa_flags & XA_FLAGS_MARK(mark))
+ xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
+}
+
+static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
+{
+ return node->marks[(__force unsigned)mark];
+}
+
+static inline bool node_get_mark(struct xa_node *node,
+ unsigned int offset, xa_mark_t mark)
+{
+ return test_bit(offset, node_marks(node, mark));
+}
+
+/* returns true if the bit was set */
+static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
+ xa_mark_t mark)
+{
+ return __test_and_set_bit(offset, node_marks(node, mark));
+}
+
+/* returns true if the bit was set */
+static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
+ xa_mark_t mark)
+{
+ return __test_and_clear_bit(offset, node_marks(node, mark));
+}
+
+static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
+{
+ return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
+}
+
+static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
+{
+ bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
+}
+
+#define mark_inc(mark) do { \
+ mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
+} while (0)
+
+/*
+ * xas_squash_marks() - Merge all marks to the first entry
+ * @xas: Array operation state.
+ *
+ * Set a mark on the first entry if any entry has it set. Clear marks on
+ * all sibling entries.
+ */
+static void xas_squash_marks(const struct xa_state *xas)
+{
+ unsigned int mark = 0;
+ unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
+
+ if (!xas->xa_sibs)
+ return;
+
+ do {
+ unsigned long *marks = xas->xa_node->marks[mark];
+ if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
+ continue;
+ __set_bit(xas->xa_offset, marks);
+ bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
+ } while (mark++ != (__force unsigned)XA_MARK_MAX);
+}
+
+/* extracts the offset within this node from the index */
+static unsigned int get_offset(unsigned long index, struct xa_node *node)
+{
+ return (index >> node->shift) & XA_CHUNK_MASK;
+}
+
+static void xas_set_offset(struct xa_state *xas)
+{
+ xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
+}
+
+/* move the index either forwards (find) or backwards (sibling slot) */
+static void xas_move_index(struct xa_state *xas, unsigned long offset)
+{
+ unsigned int shift = xas->xa_node->shift;
+ xas->xa_index &= ~XA_CHUNK_MASK << shift;
+ xas->xa_index += offset << shift;
+}
+
+static void xas_advance(struct xa_state *xas)
+{
+ xas->xa_offset++;
+ xas_move_index(xas, xas->xa_offset);
+}
+
+static void *set_bounds(struct xa_state *xas)
+{
+ xas->xa_node = XAS_BOUNDS;
+ return NULL;
+}
+
+/*
+ * Starts a walk. If the @xas is already valid, we assume that it's on
+ * the right path and just return where we've got to. If we're in an
+ * error state, return NULL. If the index is outside the current scope
+ * of the xarray, return NULL without changing @xas->xa_node. Otherwise
+ * set @xas->xa_node to NULL and return the current head of the array.
+ */
+static void *xas_start(struct xa_state *xas)
+{
+ void *entry;
+
+ if (xas_valid(xas))
+ return xas_reload(xas);
+ if (xas_error(xas))
+ return NULL;
+
+ entry = xa_head(xas->xa);
+ if (!xa_is_node(entry)) {
+ if (xas->xa_index)
+ return set_bounds(xas);
+ } else {
+ if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
+ return set_bounds(xas);
+ }
+
+ xas->xa_node = NULL;
+ return entry;
+}
+
+static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+{
+ unsigned int offset = get_offset(xas->xa_index, node);
+ void *entry = xa_entry(xas->xa, node, offset);
+
+ xas->xa_node = node;
+ if (xa_is_sibling(entry)) {
+ offset = xa_to_sibling(entry);
+ entry = xa_entry(xas->xa, node, offset);
+ }
+
+ xas->xa_offset = offset;
+ return entry;
+}
+
+/**
+ * xas_load() - Load an entry from the XArray (advanced).
+ * @xas: XArray operation state.
+ *
+ * Usually walks the @xas to the appropriate state to load the entry
+ * stored at xa_index. However, it will do nothing and return %NULL if
+ * @xas is in an error state. xas_load() will never expand the tree.
+ *
+ * If the xa_state is set up to operate on a multi-index entry, xas_load()
+ * may return %NULL or an internal entry, even if there are entries
+ * present within the range specified by @xas.
+ *
+ * Context: Any context. The caller should hold the xa_lock or the RCU lock.
+ * Return: Usually an entry in the XArray, but see description for exceptions.
+ */
+void *xas_load(struct xa_state *xas)
+{
+ void *entry = xas_start(xas);
+
+ while (xa_is_node(entry)) {
+ struct xa_node *node = xa_to_node(entry);
+
+ if (xas->xa_shift > node->shift)
+ break;
+ entry = xas_descend(xas, node);
+ }
+ return entry;
+}
+EXPORT_SYMBOL_GPL(xas_load);
+
+/* Move the radix tree node cache here */
+extern struct kmem_cache *radix_tree_node_cachep;
+extern void radix_tree_node_rcu_free(struct rcu_head *head);
+
+#define XA_RCU_FREE ((struct xarray *)1)
+
+static void xa_node_free(struct xa_node *node)
+{
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+ node->array = XA_RCU_FREE;
+ call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
+}
+
+/*
+ * xas_destroy() - Free any resources allocated during the XArray operation.
+ * @xas: XArray operation state.
+ *
+ * This function is now internal-only.
+ */
+static void xas_destroy(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_alloc;
+
+ if (!node)
+ return;
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+ kmem_cache_free(radix_tree_node_cachep, node);
+ xas->xa_alloc = NULL;
+}
+
+/**
+ * xas_nomem() - Allocate memory if needed.
+ * @xas: XArray operation state.
+ * @gfp: Memory allocation flags.
+ *
+ * If we need to add new nodes to the XArray, we try to allocate memory
+ * with GFP_NOWAIT while holding the lock, which will usually succeed.
+ * If it fails, @xas is flagged as needing memory to continue. The caller
+ * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
+ * the caller should retry the operation.
+ *
+ * Forward progress is guaranteed as one node is allocated here and
+ * stored in the xa_state where it will be found by xas_alloc(). More
+ * nodes will likely be found in the slab allocator, but we do not tie
+ * them up here.
+ *
+ * Return: true if memory was needed, and was successfully allocated.
+ */
+bool xas_nomem(struct xa_state *xas, gfp_t gfp)
+{
+ if (xas->xa_node != XA_ERROR(-ENOMEM)) {
+ xas_destroy(xas);
+ return false;
+ }
+ xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ if (!xas->xa_alloc)
+ return false;
+ XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
+ xas->xa_node = XAS_RESTART;
+ return true;
+}
+EXPORT_SYMBOL_GPL(xas_nomem);
+
+/*
+ * __xas_nomem() - Drop locks and allocate memory if needed.
+ * @xas: XArray operation state.
+ * @gfp: Memory allocation flags.
+ *
+ * Internal variant of xas_nomem().
+ *
+ * Return: true if memory was needed, and was successfully allocated.
+ */
+static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
+ __must_hold(xas->xa->xa_lock)
+{
+ unsigned int lock_type = xa_lock_type(xas->xa);
+
+ if (xas->xa_node != XA_ERROR(-ENOMEM)) {
+ xas_destroy(xas);
+ return false;
+ }
+ if (gfpflags_allow_blocking(gfp)) {
+ xas_unlock_type(xas, lock_type);
+ xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas_lock_type(xas, lock_type);
+ } else {
+ xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ }
+ if (!xas->xa_alloc)
+ return false;
+ XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
+ xas->xa_node = XAS_RESTART;
+ return true;
+}
+
+static void xas_update(struct xa_state *xas, struct xa_node *node)
+{
+ if (xas->xa_update)
+ xas->xa_update(node);
+ else
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+}
+
+static void *xas_alloc(struct xa_state *xas, unsigned int shift)
+{
+ struct xa_node *parent = xas->xa_node;
+ struct xa_node *node = xas->xa_alloc;
+
+ if (xas_invalid(xas))
+ return NULL;
+
+ if (node) {
+ xas->xa_alloc = NULL;
+ } else {
+ node = kmem_cache_alloc(radix_tree_node_cachep,
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (!node) {
+ xas_set_err(xas, -ENOMEM);
+ return NULL;
+ }
+ }
+
+ if (parent) {
+ node->offset = xas->xa_offset;
+ parent->count++;
+ XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
+ xas_update(xas, parent);
+ }
+ XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+ node->shift = shift;
+ node->count = 0;
+ node->nr_values = 0;
+ RCU_INIT_POINTER(node->parent, xas->xa_node);
+ node->array = xas->xa;
+
+ return node;
+}
+
+#ifdef CONFIG_XARRAY_MULTI
+/* Returns the number of indices covered by a given xa_state */
+static unsigned long xas_size(const struct xa_state *xas)
+{
+ return (xas->xa_sibs + 1UL) << xas->xa_shift;
+}
+#endif
+
+/*
+ * Use this to calculate the maximum index that will need to be created
+ * in order to add the entry described by @xas. Because we cannot store a
+ * multiple-index entry at index 0, the calculation is a little more complex
+ * than you might expect.
+ */
+static unsigned long xas_max(struct xa_state *xas)
+{
+ unsigned long max = xas->xa_index;
+
+#ifdef CONFIG_XARRAY_MULTI
+ if (xas->xa_shift || xas->xa_sibs) {
+ unsigned long mask = xas_size(xas) - 1;
+ max |= mask;
+ if (mask == max)
+ max++;
+ }
+#endif
+
+ return max;
+}
+
+/* The maximum index that can be contained in the array without expanding it */
+static unsigned long max_index(void *entry)
+{
+ if (!xa_is_node(entry))
+ return 0;
+ return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
+}
+
+static void xas_shrink(struct xa_state *xas)
+{
+ struct xarray *xa = xas->xa;
+ struct xa_node *node = xas->xa_node;
+
+ for (;;) {
+ void *entry;
+
+ XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
+ if (node->count != 1)
+ break;
+ entry = xa_entry_locked(xa, node, 0);
+ if (!entry)
+ break;
+ if (!xa_is_node(entry) && node->shift)
+ break;
+ xas->xa_node = XAS_BOUNDS;
+
+ RCU_INIT_POINTER(xa->xa_head, entry);
+ if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
+ xa_mark_clear(xa, XA_FREE_MARK);
+
+ node->count = 0;
+ node->nr_values = 0;
+ if (!xa_is_node(entry))
+ RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
+ xas_update(xas, node);
+ xa_node_free(node);
+ if (!xa_is_node(entry))
+ break;
+ node = xa_to_node(entry);
+ node->parent = NULL;
+ }
+}
+
+/*
+ * xas_delete_node() - Attempt to delete an xa_node
+ * @xas: Array operation state.
+ *
+ * Attempts to delete the @xas->xa_node. This will fail if xa->node has
+ * a non-zero reference count.
+ */
+static void xas_delete_node(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+
+ for (;;) {
+ struct xa_node *parent;
+
+ XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
+ if (node->count)
+ break;
+
+ parent = xa_parent_locked(xas->xa, node);
+ xas->xa_node = parent;
+ xas->xa_offset = node->offset;
+ xa_node_free(node);
+
+ if (!parent) {
+ xas->xa->xa_head = NULL;
+ xas->xa_node = XAS_BOUNDS;
+ return;
+ }
+
+ parent->slots[xas->xa_offset] = NULL;
+ parent->count--;
+ XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
+ node = parent;
+ xas_update(xas, node);
+ }
+
+ if (!node->parent)
+ xas_shrink(xas);
+}
+
+/**
+ * xas_free_nodes() - Free this node and all nodes that it references
+ * @xas: Array operation state.
+ * @top: Node to free
+ *
+ * This node has been removed from the tree. We must now free it and all
+ * of its subnodes. There may be RCU walkers with references into the tree,
+ * so we must replace all entries with retry markers.
+ */
+static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
+{
+ unsigned int offset = 0;
+ struct xa_node *node = top;
+
+ for (;;) {
+ void *entry = xa_entry_locked(xas->xa, node, offset);
+
+ if (xa_is_node(entry)) {
+ node = xa_to_node(entry);
+ offset = 0;
+ continue;
+ }
+ if (entry)
+ RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
+ offset++;
+ while (offset == XA_CHUNK_SIZE) {
+ struct xa_node *parent;
+
+ parent = xa_parent_locked(xas->xa, node);
+ offset = node->offset + 1;
+ node->count = 0;
+ node->nr_values = 0;
+ xas_update(xas, node);
+ xa_node_free(node);
+ if (node == top)
+ return;
+ node = parent;
+ }
+ }
+}
+
+/*
+ * xas_expand adds nodes to the head of the tree until it has reached
+ * sufficient height to be able to contain @xas->xa_index
+ */
+static int xas_expand(struct xa_state *xas, void *head)
+{
+ struct xarray *xa = xas->xa;
+ struct xa_node *node = NULL;
+ unsigned int shift = 0;
+ unsigned long max = xas_max(xas);
+
+ if (!head) {
+ if (max == 0)
+ return 0;
+ while ((max >> shift) >= XA_CHUNK_SIZE)
+ shift += XA_CHUNK_SHIFT;
+ return shift + XA_CHUNK_SHIFT;
+ } else if (xa_is_node(head)) {
+ node = xa_to_node(head);
+ shift = node->shift + XA_CHUNK_SHIFT;
+ }
+ xas->xa_node = NULL;
+
+ while (max > max_index(head)) {
+ xa_mark_t mark = 0;
+
+ XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
+ node = xas_alloc(xas, shift);
+ if (!node)
+ return -ENOMEM;
+
+ node->count = 1;
+ if (xa_is_value(head))
+ node->nr_values = 1;
+ RCU_INIT_POINTER(node->slots[0], head);
+
+ /* Propagate the aggregated mark info to the new child */
+ for (;;) {
+ if (xa_track_free(xa) && mark == XA_FREE_MARK) {
+ node_mark_all(node, XA_FREE_MARK);
+ if (!xa_marked(xa, XA_FREE_MARK)) {
+ node_clear_mark(node, 0, XA_FREE_MARK);
+ xa_mark_set(xa, XA_FREE_MARK);
+ }
+ } else if (xa_marked(xa, mark)) {
+ node_set_mark(node, 0, mark);
+ }
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+
+ /*
+ * Now that the new node is fully initialised, we can add
+ * it to the tree
+ */
+ if (xa_is_node(head)) {
+ xa_to_node(head)->offset = 0;
+ rcu_assign_pointer(xa_to_node(head)->parent, node);
+ }
+ head = xa_mk_node(node);
+ rcu_assign_pointer(xa->xa_head, head);
+ xas_update(xas, node);
+
+ shift += XA_CHUNK_SHIFT;
+ }
+
+ xas->xa_node = node;
+ return shift;
+}
+
+/*
+ * xas_create() - Create a slot to store an entry in.
+ * @xas: XArray operation state.
+ *
+ * Most users will not need to call this function directly, as it is called
+ * by xas_store(). It is useful for doing conditional store operations
+ * (see the xa_cmpxchg() implementation for an example).
+ *
+ * Return: If the slot already existed, returns the contents of this slot.
+ * If the slot was newly created, returns NULL. If it failed to create the
+ * slot, returns NULL and indicates the error in @xas.
+ */
+static void *xas_create(struct xa_state *xas)
+{
+ struct xarray *xa = xas->xa;
+ void *entry;
+ void __rcu **slot;
+ struct xa_node *node = xas->xa_node;
+ int shift;
+ unsigned int order = xas->xa_shift;
+
+ if (xas_top(node)) {
+ entry = xa_head_locked(xa);
+ xas->xa_node = NULL;
+ shift = xas_expand(xas, entry);
+ if (shift < 0)
+ return NULL;
+ entry = xa_head_locked(xa);
+ slot = &xa->xa_head;
+ } else if (xas_error(xas)) {
+ return NULL;
+ } else if (node) {
+ unsigned int offset = xas->xa_offset;
+
+ shift = node->shift;
+ entry = xa_entry_locked(xa, node, offset);
+ slot = &node->slots[offset];
+ } else {
+ shift = 0;
+ entry = xa_head_locked(xa);
+ slot = &xa->xa_head;
+ }
+
+ while (shift > order) {
+ shift -= XA_CHUNK_SHIFT;
+ if (!entry) {
+ node = xas_alloc(xas, shift);
+ if (!node)
+ break;
+ if (xa_track_free(xa))
+ node_mark_all(node, XA_FREE_MARK);
+ rcu_assign_pointer(*slot, xa_mk_node(node));
+ } else if (xa_is_node(entry)) {
+ node = xa_to_node(entry);
+ } else {
+ break;
+ }
+ entry = xas_descend(xas, node);
+ slot = &node->slots[xas->xa_offset];
+ }
+
+ return entry;
+}
+
+/**
+ * xas_create_range() - Ensure that stores to this range will succeed
+ * @xas: XArray operation state.
+ *
+ * Creates all of the slots in the range covered by @xas. Sets @xas to
+ * create single-index entries and positions it at the beginning of the
+ * range. This is for the benefit of users which have not yet been
+ * converted to use multi-index entries.
+ */
+void xas_create_range(struct xa_state *xas)
+{
+ unsigned long index = xas->xa_index;
+ unsigned char shift = xas->xa_shift;
+ unsigned char sibs = xas->xa_sibs;
+
+ xas->xa_index |= ((sibs + 1) << shift) - 1;
+ if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
+ xas->xa_offset |= sibs;
+ xas->xa_shift = 0;
+ xas->xa_sibs = 0;
+
+ for (;;) {
+ xas_create(xas);
+ if (xas_error(xas))
+ goto restore;
+ if (xas->xa_index <= (index | XA_CHUNK_MASK))
+ goto success;
+ xas->xa_index -= XA_CHUNK_SIZE;
+
+ for (;;) {
+ struct xa_node *node = xas->xa_node;
+ xas->xa_node = xa_parent_locked(xas->xa, node);
+ xas->xa_offset = node->offset - 1;
+ if (node->offset != 0)
+ break;
+ }
+ }
+
+restore:
+ xas->xa_shift = shift;
+ xas->xa_sibs = sibs;
+ xas->xa_index = index;
+ return;
+success:
+ xas->xa_index = index;
+ if (xas->xa_node)
+ xas_set_offset(xas);
+}
+EXPORT_SYMBOL_GPL(xas_create_range);
+
+static void update_node(struct xa_state *xas, struct xa_node *node,
+ int count, int values)
+{
+ if (!node || (!count && !values))
+ return;
+
+ node->count += count;
+ node->nr_values += values;
+ XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
+ XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
+ xas_update(xas, node);
+ if (count < 0)
+ xas_delete_node(xas);
+}
+
+/**
+ * xas_store() - Store this entry in the XArray.
+ * @xas: XArray operation state.
+ * @entry: New entry.
+ *
+ * If @xas is operating on a multi-index entry, the entry returned by this
+ * function is essentially meaningless (it may be an internal entry or it
+ * may be %NULL, even if there are non-NULL entries at some of the indices
+ * covered by the range). This is not a problem for any current users,
+ * and can be changed if needed.
+ *
+ * Return: The old entry at this index.
+ */
+void *xas_store(struct xa_state *xas, void *entry)
+{
+ struct xa_node *node;
+ void __rcu **slot = &xas->xa->xa_head;
+ unsigned int offset, max;
+ int count = 0;
+ int values = 0;
+ void *first, *next;
+ bool value = xa_is_value(entry);
+
+ if (entry)
+ first = xas_create(xas);
+ else
+ first = xas_load(xas);
+
+ if (xas_invalid(xas))
+ return first;
+ node = xas->xa_node;
+ if (node && (xas->xa_shift < node->shift))
+ xas->xa_sibs = 0;
+ if ((first == entry) && !xas->xa_sibs)
+ return first;
+
+ next = first;
+ offset = xas->xa_offset;
+ max = xas->xa_offset + xas->xa_sibs;
+ if (node) {
+ slot = &node->slots[offset];
+ if (xas->xa_sibs)
+ xas_squash_marks(xas);
+ }
+ if (!entry)
+ xas_init_marks(xas);
+
+ for (;;) {
+ /*
+ * Must clear the marks before setting the entry to NULL,
+ * otherwise xas_for_each_marked may find a NULL entry and
+ * stop early. rcu_assign_pointer contains a release barrier
+ * so the mark clearing will appear to happen before the
+ * entry is set to NULL.
+ */
+ rcu_assign_pointer(*slot, entry);
+ if (xa_is_node(next))
+ xas_free_nodes(xas, xa_to_node(next));
+ if (!node)
+ break;
+ count += !next - !entry;
+ values += !xa_is_value(first) - !value;
+ if (entry) {
+ if (offset == max)
+ break;
+ if (!xa_is_sibling(entry))
+ entry = xa_mk_sibling(xas->xa_offset);
+ } else {
+ if (offset == XA_CHUNK_MASK)
+ break;
+ }
+ next = xa_entry_locked(xas->xa, node, ++offset);
+ if (!xa_is_sibling(next)) {
+ if (!entry && (offset > max))
+ break;
+ first = next;
+ }
+ slot++;
+ }
+
+ update_node(xas, node, count, values);
+ return first;
+}
+EXPORT_SYMBOL_GPL(xas_store);
+
+/**
+ * xas_get_mark() - Returns the state of this mark.
+ * @xas: XArray operation state.
+ * @mark: Mark number.
+ *
+ * Return: true if the mark is set, false if the mark is clear or @xas
+ * is in an error state.
+ */
+bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
+{
+ if (xas_invalid(xas))
+ return false;
+ if (!xas->xa_node)
+ return xa_marked(xas->xa, mark);
+ return node_get_mark(xas->xa_node, xas->xa_offset, mark);
+}
+EXPORT_SYMBOL_GPL(xas_get_mark);
+
+/**
+ * xas_set_mark() - Sets the mark on this entry and its parents.
+ * @xas: XArray operation state.
+ * @mark: Mark number.
+ *
+ * Sets the specified mark on this entry, and walks up the tree setting it
+ * on all the ancestor entries. Does nothing if @xas has not been walked to
+ * an entry, or is in an error state.
+ */
+void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
+{
+ struct xa_node *node = xas->xa_node;
+ unsigned int offset = xas->xa_offset;
+
+ if (xas_invalid(xas))
+ return;
+
+ while (node) {
+ if (node_set_mark(node, offset, mark))
+ return;
+ offset = node->offset;
+ node = xa_parent_locked(xas->xa, node);
+ }
+
+ if (!xa_marked(xas->xa, mark))
+ xa_mark_set(xas->xa, mark);
+}
+EXPORT_SYMBOL_GPL(xas_set_mark);
+
+/**
+ * xas_clear_mark() - Clears the mark on this entry and its parents.
+ * @xas: XArray operation state.
+ * @mark: Mark number.
+ *
+ * Clears the specified mark on this entry, and walks back to the head
+ * attempting to clear it on all the ancestor entries. Does nothing if
+ * @xas has not been walked to an entry, or is in an error state.
+ */
+void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
+{
+ struct xa_node *node = xas->xa_node;
+ unsigned int offset = xas->xa_offset;
+
+ if (xas_invalid(xas))
+ return;
+
+ while (node) {
+ if (!node_clear_mark(node, offset, mark))
+ return;
+ if (node_any_mark(node, mark))
+ return;
+
+ offset = node->offset;
+ node = xa_parent_locked(xas->xa, node);
+ }
+
+ if (xa_marked(xas->xa, mark))
+ xa_mark_clear(xas->xa, mark);
+}
+EXPORT_SYMBOL_GPL(xas_clear_mark);
+
+/**
+ * xas_init_marks() - Initialise all marks for the entry
+ * @xas: Array operations state.
+ *
+ * Initialise all marks for the entry specified by @xas. If we're tracking
+ * free entries with a mark, we need to set it on all entries. All other
+ * marks are cleared.
+ *
+ * This implementation is not as efficient as it could be; we may walk
+ * up the tree multiple times.
+ */
+void xas_init_marks(const struct xa_state *xas)
+{
+ xa_mark_t mark = 0;
+
+ for (;;) {
+ if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
+ xas_set_mark(xas, mark);
+ else
+ xas_clear_mark(xas, mark);
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+}
+EXPORT_SYMBOL_GPL(xas_init_marks);
+
+/**
+ * xas_pause() - Pause a walk to drop a lock.
+ * @xas: XArray operation state.
+ *
+ * Some users need to pause a walk and drop the lock they're holding in
+ * order to yield to a higher priority thread or carry out an operation
+ * on an entry. Those users should call this function before they drop
+ * the lock. It resets the @xas to be suitable for the next iteration
+ * of the loop after the user has reacquired the lock. If most entries
+ * found during a walk require you to call xas_pause(), the xa_for_each()
+ * iterator may be more appropriate.
+ *
+ * Note that xas_pause() only works for forward iteration. If a user needs
+ * to pause a reverse iteration, we will need a xas_pause_rev().
+ */
+void xas_pause(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+
+ if (xas_invalid(xas))
+ return;
+
+ if (node) {
+ unsigned int offset = xas->xa_offset;
+ while (++offset < XA_CHUNK_SIZE) {
+ if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
+ break;
+ }
+ xas->xa_index += (offset - xas->xa_offset) << node->shift;
+ } else {
+ xas->xa_index++;
+ }
+ xas->xa_node = XAS_RESTART;
+}
+EXPORT_SYMBOL_GPL(xas_pause);
+
+/*
+ * __xas_prev() - Find the previous entry in the XArray.
+ * @xas: XArray operation state.
+ *
+ * Helper function for xas_prev() which handles all the complex cases
+ * out of line.
+ */
+void *__xas_prev(struct xa_state *xas)
+{
+ void *entry;
+
+ if (!xas_frozen(xas->xa_node))
+ xas->xa_index--;
+ if (xas_not_node(xas->xa_node))
+ return xas_load(xas);
+
+ if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
+ xas->xa_offset--;
+
+ while (xas->xa_offset == 255) {
+ xas->xa_offset = xas->xa_node->offset - 1;
+ xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+ if (!xas->xa_node)
+ return set_bounds(xas);
+ }
+
+ for (;;) {
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!xa_is_node(entry))
+ return entry;
+
+ xas->xa_node = xa_to_node(entry);
+ xas_set_offset(xas);
+ }
+}
+EXPORT_SYMBOL_GPL(__xas_prev);
+
+/*
+ * __xas_next() - Find the next entry in the XArray.
+ * @xas: XArray operation state.
+ *
+ * Helper function for xas_next() which handles all the complex cases
+ * out of line.
+ */
+void *__xas_next(struct xa_state *xas)
+{
+ void *entry;
+
+ if (!xas_frozen(xas->xa_node))
+ xas->xa_index++;
+ if (xas_not_node(xas->xa_node))
+ return xas_load(xas);
+
+ if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
+ xas->xa_offset++;
+
+ while (xas->xa_offset == XA_CHUNK_SIZE) {
+ xas->xa_offset = xas->xa_node->offset + 1;
+ xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+ if (!xas->xa_node)
+ return set_bounds(xas);
+ }
+
+ for (;;) {
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!xa_is_node(entry))
+ return entry;
+
+ xas->xa_node = xa_to_node(entry);
+ xas_set_offset(xas);
+ }
+}
+EXPORT_SYMBOL_GPL(__xas_next);
+
+/**
+ * xas_find() - Find the next present entry in the XArray.
+ * @xas: XArray operation state.
+ * @max: Highest index to return.
+ *
+ * If the @xas has not yet been walked to an entry, return the entry
+ * which has an index >= xas.xa_index. If it has been walked, the entry
+ * currently being pointed at has been processed, and so we move to the
+ * next entry.
+ *
+ * If no entry is found and the array is smaller than @max, the iterator
+ * is set to the smallest index not yet in the array. This allows @xas
+ * to be immediately passed to xas_store().
+ *
+ * Return: The entry, if found, otherwise %NULL.
+ */
+void *xas_find(struct xa_state *xas, unsigned long max)
+{
+ void *entry;
+
+ if (xas_error(xas))
+ return NULL;
+
+ if (!xas->xa_node) {
+ xas->xa_index = 1;
+ return set_bounds(xas);
+ } else if (xas_top(xas->xa_node)) {
+ entry = xas_load(xas);
+ if (entry || xas_not_node(xas->xa_node))
+ return entry;
+ } else if (!xas->xa_node->shift &&
+ xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
+ xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
+ }
+
+ xas_advance(xas);
+
+ while (xas->xa_node && (xas->xa_index <= max)) {
+ if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
+ xas->xa_offset = xas->xa_node->offset + 1;
+ xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+ continue;
+ }
+
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (xa_is_node(entry)) {
+ xas->xa_node = xa_to_node(entry);
+ xas->xa_offset = 0;
+ continue;
+ }
+ if (entry && !xa_is_sibling(entry))
+ return entry;
+
+ xas_advance(xas);
+ }
+
+ if (!xas->xa_node)
+ xas->xa_node = XAS_BOUNDS;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(xas_find);
+
+/**
+ * xas_find_marked() - Find the next marked entry in the XArray.
+ * @xas: XArray operation state.
+ * @max: Highest index to return.
+ * @mark: Mark number to search for.
+ *
+ * If the @xas has not yet been walked to an entry, return the marked entry
+ * which has an index >= xas.xa_index. If it has been walked, the entry
+ * currently being pointed at has been processed, and so we return the
+ * first marked entry with an index > xas.xa_index.
+ *
+ * If no marked entry is found and the array is smaller than @max, @xas is
+ * set to the bounds state and xas->xa_index is set to the smallest index
+ * not yet in the array. This allows @xas to be immediately passed to
+ * xas_store().
+ *
+ * If no entry is found before @max is reached, @xas is set to the restart
+ * state.
+ *
+ * Return: The entry, if found, otherwise %NULL.
+ */
+void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
+{
+ bool advance = true;
+ unsigned int offset;
+ void *entry;
+
+ if (xas_error(xas))
+ return NULL;
+
+ if (!xas->xa_node) {
+ xas->xa_index = 1;
+ goto out;
+ } else if (xas_top(xas->xa_node)) {
+ advance = false;
+ entry = xa_head(xas->xa);
+ xas->xa_node = NULL;
+ if (xas->xa_index > max_index(entry))
+ goto bounds;
+ if (!xa_is_node(entry)) {
+ if (xa_marked(xas->xa, mark))
+ return entry;
+ xas->xa_index = 1;
+ goto out;
+ }
+ xas->xa_node = xa_to_node(entry);
+ xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
+ }
+
+ while (xas->xa_index <= max) {
+ if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
+ xas->xa_offset = xas->xa_node->offset + 1;
+ xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+ if (!xas->xa_node)
+ break;
+ advance = false;
+ continue;
+ }
+
+ if (!advance) {
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (xa_is_sibling(entry)) {
+ xas->xa_offset = xa_to_sibling(entry);
+ xas_move_index(xas, xas->xa_offset);
+ }
+ }
+
+ offset = xas_find_chunk(xas, advance, mark);
+ if (offset > xas->xa_offset) {
+ advance = false;
+ xas_move_index(xas, offset);
+ /* Mind the wrap */
+ if ((xas->xa_index - 1) >= max)
+ goto max;
+ xas->xa_offset = offset;
+ if (offset == XA_CHUNK_SIZE)
+ continue;
+ }
+
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!xa_is_node(entry))
+ return entry;
+ xas->xa_node = xa_to_node(entry);
+ xas_set_offset(xas);
+ }
+
+out:
+ if (!max)
+ goto max;
+bounds:
+ xas->xa_node = XAS_BOUNDS;
+ return NULL;
+max:
+ xas->xa_node = XAS_RESTART;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(xas_find_marked);
+
+/**
+ * xas_find_conflict() - Find the next present entry in a range.
+ * @xas: XArray operation state.
+ *
+ * The @xas describes both a range and a position within that range.
+ *
+ * Context: Any context. Expects xa_lock to be held.
+ * Return: The next entry in the range covered by @xas or %NULL.
+ */
+void *xas_find_conflict(struct xa_state *xas)
+{
+ void *curr;
+
+ if (xas_error(xas))
+ return NULL;
+
+ if (!xas->xa_node)
+ return NULL;
+
+ if (xas_top(xas->xa_node)) {
+ curr = xas_start(xas);
+ if (!curr)
+ return NULL;
+ while (xa_is_node(curr)) {
+ struct xa_node *node = xa_to_node(curr);
+ curr = xas_descend(xas, node);
+ }
+ if (curr)
+ return curr;
+ }
+
+ if (xas->xa_node->shift > xas->xa_shift)
+ return NULL;
+
+ for (;;) {
+ if (xas->xa_node->shift == xas->xa_shift) {
+ if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
+ break;
+ } else if (xas->xa_offset == XA_CHUNK_MASK) {
+ xas->xa_offset = xas->xa_node->offset;
+ xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
+ if (!xas->xa_node)
+ break;
+ continue;
+ }
+ curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
+ if (xa_is_sibling(curr))
+ continue;
+ while (xa_is_node(curr)) {
+ xas->xa_node = xa_to_node(curr);
+ xas->xa_offset = 0;
+ curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
+ }
+ if (curr)
+ return curr;
+ }
+ xas->xa_offset -= xas->xa_sibs;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(xas_find_conflict);
+
+/**
+ * xa_init_flags() - Initialise an empty XArray with flags.
+ * @xa: XArray.
+ * @flags: XA_FLAG values.
+ *
+ * If you need to initialise an XArray with special flags (eg you need
+ * to take the lock from interrupt context), use this function instead
+ * of xa_init().
+ *
+ * Context: Any context.
+ */
+void xa_init_flags(struct xarray *xa, gfp_t flags)
+{
+ unsigned int lock_type;
+ static struct lock_class_key xa_lock_irq;
+ static struct lock_class_key xa_lock_bh;
+
+ spin_lock_init(&xa->xa_lock);
+ xa->xa_flags = flags;
+ xa->xa_head = NULL;
+
+ lock_type = xa_lock_type(xa);
+ if (lock_type == XA_LOCK_IRQ)
+ lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
+ else if (lock_type == XA_LOCK_BH)
+ lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
+}
+EXPORT_SYMBOL(xa_init_flags);
+
+/**
+ * xa_load() - Load an entry from an XArray.
+ * @xa: XArray.
+ * @index: index into array.
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: The entry at @index in @xa.
+ */
+void *xa_load(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+
+ rcu_read_lock();
+ do {
+ entry = xas_load(&xas);
+ if (xa_is_zero(entry))
+ entry = NULL;
+ } while (xas_retry(&xas, entry));
+ rcu_read_unlock();
+
+ return entry;
+}
+EXPORT_SYMBOL(xa_load);
+
+static void *xas_result(struct xa_state *xas, void *curr)
+{
+ if (xa_is_zero(curr))
+ return NULL;
+ XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
+ if (xas_error(xas))
+ curr = xas->xa_node;
+ return curr;
+}
+
+/**
+ * __xa_erase() - Erase this entry from the XArray while locked.
+ * @xa: XArray.
+ * @index: Index into array.
+ *
+ * If the entry at this index is a multi-index entry then all indices will
+ * be erased, and the entry will no longer be a multi-index entry.
+ * This function expects the xa_lock to be held on entry.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: The old entry at this index.
+ */
+void *__xa_erase(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ return xas_result(&xas, xas_store(&xas, NULL));
+}
+EXPORT_SYMBOL_GPL(__xa_erase);
+
+/**
+ * xa_store() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * After this function returns, loads from this index will return @entry.
+ * Storing into an existing multislot entry updates the entry of every index.
+ * The marks associated with @index are unaffected unless @entry is %NULL.
+ *
+ * Context: Process context. Takes and releases the xa_lock. May sleep
+ * if the @gfp flags permit.
+ * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
+ * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
+ * failed.
+ */
+void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ void *curr;
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return XA_ERROR(-EINVAL);
+
+ do {
+ xas_lock(&xas);
+ curr = xas_store(&xas, entry);
+ if (xa_track_free(xa) && entry)
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, curr);
+}
+EXPORT_SYMBOL(xa_store);
+
+/**
+ * __xa_store() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * You must already be holding the xa_lock when calling this function.
+ * It will drop the lock if needed to allocate memory, and then reacquire
+ * it afterwards.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: The old entry at this index or xa_err() if an error happened.
+ */
+void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ void *curr;
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return XA_ERROR(-EINVAL);
+
+ do {
+ curr = xas_store(&xas, entry);
+ if (xa_track_free(xa) && entry)
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ } while (__xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, curr);
+}
+EXPORT_SYMBOL(__xa_store);
+
+/**
+ * xa_cmpxchg() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * If the entry at @index is the same as @old, replace it with @entry.
+ * If the return value is equal to @old, then the exchange was successful.
+ *
+ * Context: Process context. Takes and releases the xa_lock. May sleep
+ * if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+void *xa_cmpxchg(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ void *curr;
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return XA_ERROR(-EINVAL);
+
+ do {
+ xas_lock(&xas);
+ curr = xas_load(&xas);
+ if (curr == XA_ZERO_ENTRY)
+ curr = NULL;
+ if (curr == old) {
+ xas_store(&xas, entry);
+ if (xa_track_free(xa) && entry)
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ }
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, curr);
+}
+EXPORT_SYMBOL(xa_cmpxchg);
+
+/**
+ * __xa_cmpxchg() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * You must already be holding the xa_lock when calling this function.
+ * It will drop the lock if needed to allocate memory, and then reacquire
+ * it afterwards.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: The old entry at this index or xa_err() if an error happened.
+ */
+void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ void *curr;
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return XA_ERROR(-EINVAL);
+
+ do {
+ curr = xas_load(&xas);
+ if (curr == XA_ZERO_ENTRY)
+ curr = NULL;
+ if (curr == old) {
+ xas_store(&xas, entry);
+ if (xa_track_free(xa) && entry)
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ }
+ } while (__xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, curr);
+}
+EXPORT_SYMBOL(__xa_cmpxchg);
+
+/**
+ * xa_reserve() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * Ensures there is somewhere to store an entry at @index in the array.
+ * If there is already something stored at @index, this function does
+ * nothing. If there was nothing there, the entry is marked as reserved.
+ * Loads from @index will continue to see a %NULL pointer until a
+ * subsequent store to @index.
+ *
+ * If you do not use the entry that you have reserved, call xa_release()
+ * or xa_erase() to free any unnecessary memory.
+ *
+ * Context: Process context. Takes and releases the xa_lock, IRQ or BH safe
+ * if specified in XArray flags. May sleep if the @gfp flags permit.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ unsigned int lock_type = xa_lock_type(xa);
+ void *curr;
+
+ do {
+ xas_lock_type(&xas, lock_type);
+ curr = xas_load(&xas);
+ if (!curr)
+ xas_store(&xas, XA_ZERO_ENTRY);
+ xas_unlock_type(&xas, lock_type);
+ } while (xas_nomem(&xas, gfp));
+
+ return xas_error(&xas);
+}
+EXPORT_SYMBOL(xa_reserve);
+
+#ifdef CONFIG_XARRAY_MULTI
+static void xas_set_range(struct xa_state *xas, unsigned long first,
+ unsigned long last)
+{
+ unsigned int shift = 0;
+ unsigned long sibs = last - first;
+ unsigned int offset = XA_CHUNK_MASK;
+
+ xas_set(xas, first);
+
+ while ((first & XA_CHUNK_MASK) == 0) {
+ if (sibs < XA_CHUNK_MASK)
+ break;
+ if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
+ break;
+ shift += XA_CHUNK_SHIFT;
+ if (offset == XA_CHUNK_MASK)
+ offset = sibs & XA_CHUNK_MASK;
+ sibs >>= XA_CHUNK_SHIFT;
+ first >>= XA_CHUNK_SHIFT;
+ }
+
+ offset = first & XA_CHUNK_MASK;
+ if (offset + sibs > XA_CHUNK_MASK)
+ sibs = XA_CHUNK_MASK - offset;
+ if ((((first + sibs + 1) << shift) - 1) > last)
+ sibs -= 1;
+
+ xas->xa_shift = shift;
+ xas->xa_sibs = sibs;
+}
+
+/**
+ * xa_store_range() - Store this entry at a range of indices in the XArray.
+ * @xa: XArray.
+ * @first: First index to affect.
+ * @last: Last index to affect.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * After this function returns, loads from any index between @first and @last,
+ * inclusive will return @entry.
+ * Storing into an existing multislot entry updates the entry of every index.
+ * The marks associated with @index are unaffected unless @entry is %NULL.
+ *
+ * Context: Process context. Takes and releases the xa_lock. May sleep
+ * if the @gfp flags permit.
+ * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
+ * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
+ */
+void *xa_store_range(struct xarray *xa, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, 0);
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return XA_ERROR(-EINVAL);
+ if (last < first)
+ return XA_ERROR(-EINVAL);
+
+ do {
+ xas_lock(&xas);
+ if (entry) {
+ unsigned int order = (last == ~0UL) ? 64 :
+ ilog2(last + 1);
+ xas_set_order(&xas, last, order);
+ xas_create(&xas);
+ if (xas_error(&xas))
+ goto unlock;
+ }
+ do {
+ xas_set_range(&xas, first, last);
+ xas_store(&xas, entry);
+ if (xas_error(&xas))
+ goto unlock;
+ first += xas_size(&xas);
+ } while (first <= last);
+unlock:
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, NULL);
+}
+EXPORT_SYMBOL(xa_store_range);
+#endif /* CONFIG_XARRAY_MULTI */
+
+/**
+ * __xa_alloc() - Find somewhere to store this entry in the XArray.
+ * @xa: XArray.
+ * @id: Pointer to ID.
+ * @max: Maximum ID to allocate (inclusive).
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocates an unused ID in the range specified by @id and @max.
+ * Updates the @id pointer with the index, then stores the entry at that
+ * index. A concurrent lookup will not see an uninitialised @id.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
+ * there is no more space in the XArray.
+ */
+int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, 0);
+ int err;
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return -EINVAL;
+ if (WARN_ON_ONCE(!xa_track_free(xa)))
+ return -EINVAL;
+
+ if (!entry)
+ entry = XA_ZERO_ENTRY;
+
+ do {
+ xas.xa_index = *id;
+ xas_find_marked(&xas, max, XA_FREE_MARK);
+ if (xas.xa_node == XAS_RESTART)
+ xas_set_err(&xas, -ENOSPC);
+ xas_store(&xas, entry);
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ } while (__xas_nomem(&xas, gfp));
+
+ err = xas_error(&xas);
+ if (!err)
+ *id = xas.xa_index;
+ return err;
+}
+EXPORT_SYMBOL(__xa_alloc);
+
+/**
+ * __xa_set_mark() - Set this mark on this entry while locked.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * Attempting to set a mark on a NULL entry does not succeed.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry.
+ */
+void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ XA_STATE(xas, xa, index);
+ void *entry = xas_load(&xas);
+
+ if (entry)
+ xas_set_mark(&xas, mark);
+}
+EXPORT_SYMBOL_GPL(__xa_set_mark);
+
+/**
+ * __xa_clear_mark() - Clear this mark on this entry while locked.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry.
+ */
+void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ XA_STATE(xas, xa, index);
+ void *entry = xas_load(&xas);
+
+ if (entry)
+ xas_clear_mark(&xas, mark);
+}
+EXPORT_SYMBOL_GPL(__xa_clear_mark);
+
+/**
+ * xa_get_mark() - Inquire whether this mark is set on this entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * This function uses the RCU read lock, so the result may be out of date
+ * by the time it returns. If you need the result to be stable, use a lock.
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: True if the entry at @index has this mark set, false if it doesn't.
+ */
+bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+
+ rcu_read_lock();
+ entry = xas_start(&xas);
+ while (xas_get_mark(&xas, mark)) {
+ if (!xa_is_node(entry))
+ goto found;
+ entry = xas_descend(&xas, xa_to_node(entry));
+ }
+ rcu_read_unlock();
+ return false;
+ found:
+ rcu_read_unlock();
+ return true;
+}
+EXPORT_SYMBOL(xa_get_mark);
+
+/**
+ * xa_set_mark() - Set this mark on this entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * Attempting to set a mark on a NULL entry does not succeed.
+ *
+ * Context: Process context. Takes and releases the xa_lock.
+ */
+void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ xa_lock(xa);
+ __xa_set_mark(xa, index, mark);
+ xa_unlock(xa);
+}
+EXPORT_SYMBOL(xa_set_mark);
+
+/**
+ * xa_clear_mark() - Clear this mark on this entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * Clearing a mark always succeeds.
+ *
+ * Context: Process context. Takes and releases the xa_lock.
+ */
+void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ xa_lock(xa);
+ __xa_clear_mark(xa, index, mark);
+ xa_unlock(xa);
+}
+EXPORT_SYMBOL(xa_clear_mark);
+
+/**
+ * xa_find() - Search the XArray for an entry.
+ * @xa: XArray.
+ * @indexp: Pointer to an index.
+ * @max: Maximum index to search to.
+ * @filter: Selection criterion.
+ *
+ * Finds the entry in @xa which matches the @filter, and has the lowest
+ * index that is at least @indexp and no more than @max.
+ * If an entry is found, @indexp is updated to be the index of the entry.
+ * This function is protected by the RCU read lock, so it may not find
+ * entries which are being simultaneously added. It will not return an
+ * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: The entry, if found, otherwise %NULL.
+ */
+void *xa_find(struct xarray *xa, unsigned long *indexp,
+ unsigned long max, xa_mark_t filter)
+{
+ XA_STATE(xas, xa, *indexp);
+ void *entry;
+
+ rcu_read_lock();
+ do {
+ if ((__force unsigned int)filter < XA_MAX_MARKS)
+ entry = xas_find_marked(&xas, max, filter);
+ else
+ entry = xas_find(&xas, max);
+ } while (xas_retry(&xas, entry));
+ rcu_read_unlock();
+
+ if (entry)
+ *indexp = xas.xa_index;
+ return entry;
+}
+EXPORT_SYMBOL(xa_find);
+
+/**
+ * xa_find_after() - Search the XArray for a present entry.
+ * @xa: XArray.
+ * @indexp: Pointer to an index.
+ * @max: Maximum index to search to.
+ * @filter: Selection criterion.
+ *
+ * Finds the entry in @xa which matches the @filter and has the lowest
+ * index that is above @indexp and no more than @max.
+ * If an entry is found, @indexp is updated to be the index of the entry.
+ * This function is protected by the RCU read lock, so it may miss entries
+ * which are being simultaneously added. It will not return an
+ * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: The pointer, if found, otherwise %NULL.
+ */
+void *xa_find_after(struct xarray *xa, unsigned long *indexp,
+ unsigned long max, xa_mark_t filter)
+{
+ XA_STATE(xas, xa, *indexp + 1);
+ void *entry;
+
+ rcu_read_lock();
+ for (;;) {
+ if ((__force unsigned int)filter < XA_MAX_MARKS)
+ entry = xas_find_marked(&xas, max, filter);
+ else
+ entry = xas_find(&xas, max);
+ if (xas.xa_shift) {
+ if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
+ continue;
+ } else {
+ if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
+ continue;
+ }
+ if (!xas_retry(&xas, entry))
+ break;
+ }
+ rcu_read_unlock();
+
+ if (entry)
+ *indexp = xas.xa_index;
+ return entry;
+}
+EXPORT_SYMBOL(xa_find_after);
+
+static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
+ unsigned long max, unsigned int n)
+{
+ void *entry;
+ unsigned int i = 0;
+
+ rcu_read_lock();
+ xas_for_each(xas, entry, max) {
+ if (xas_retry(xas, entry))
+ continue;
+ dst[i++] = entry;
+ if (i == n)
+ break;
+ }
+ rcu_read_unlock();
+
+ return i;
+}
+
+static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
+ unsigned long max, unsigned int n, xa_mark_t mark)
+{
+ void *entry;
+ unsigned int i = 0;
+
+ rcu_read_lock();
+ xas_for_each_marked(xas, entry, max, mark) {
+ if (xas_retry(xas, entry))
+ continue;
+ dst[i++] = entry;
+ if (i == n)
+ break;
+ }
+ rcu_read_unlock();
+
+ return i;
+}
+
+/**
+ * xa_extract() - Copy selected entries from the XArray into a normal array.
+ * @xa: The source XArray to copy from.
+ * @dst: The buffer to copy entries into.
+ * @start: The first index in the XArray eligible to be selected.
+ * @max: The last index in the XArray eligible to be selected.
+ * @n: The maximum number of entries to copy.
+ * @filter: Selection criterion.
+ *
+ * Copies up to @n entries that match @filter from the XArray. The
+ * copied entries will have indices between @start and @max, inclusive.
+ *
+ * The @filter may be an XArray mark value, in which case entries which are
+ * marked with that mark will be copied. It may also be %XA_PRESENT, in
+ * which case all entries which are not NULL will be copied.
+ *
+ * The entries returned may not represent a snapshot of the XArray at a
+ * moment in time. For example, if another thread stores to index 5, then
+ * index 10, calling xa_extract() may return the old contents of index 5
+ * and the new contents of index 10. Indices not modified while this
+ * function is running will not be skipped.
+ *
+ * If you need stronger guarantees, holding the xa_lock across calls to this
+ * function will prevent concurrent modification.
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: The number of entries copied.
+ */
+unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
+ unsigned long max, unsigned int n, xa_mark_t filter)
+{
+ XA_STATE(xas, xa, start);
+
+ if (!n)
+ return 0;
+
+ if ((__force unsigned int)filter < XA_MAX_MARKS)
+ return xas_extract_marked(&xas, dst, max, n, filter);
+ return xas_extract_present(&xas, dst, max, n);
+}
+EXPORT_SYMBOL(xa_extract);
+
+/**
+ * xa_destroy() - Free all internal data structures.
+ * @xa: XArray.
+ *
+ * After calling this function, the XArray is empty and has freed all memory
+ * allocated for its internal data structures. You are responsible for
+ * freeing the objects referenced by the XArray.
+ *
+ * Context: Any context. Takes and releases the xa_lock, interrupt-safe.
+ */
+void xa_destroy(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned long flags;
+ void *entry;
+
+ xas.xa_node = NULL;
+ xas_lock_irqsave(&xas, flags);
+ entry = xa_head_locked(xa);
+ RCU_INIT_POINTER(xa->xa_head, NULL);
+ xas_init_marks(&xas);
+ /* lockdep checks we're still holding the lock in xas_free_nodes() */
+ if (xa_is_node(entry))
+ xas_free_nodes(&xas, xa_to_node(entry));
+ xas_unlock_irqrestore(&xas, flags);
+}
+EXPORT_SYMBOL(xa_destroy);
+
+#ifdef XA_DEBUG
+void xa_dump_node(const struct xa_node *node)
+{
+ unsigned i, j;
+
+ if (!node)
+ return;
+ if ((unsigned long)node & 3) {
+ pr_cont("node %px\n", node);
+ return;
+ }
+
+ pr_cont("node %px %s %d parent %px shift %d count %d values %d "
+ "array %px list %px %px marks",
+ node, node->parent ? "offset" : "max", node->offset,
+ node->parent, node->shift, node->count, node->nr_values,
+ node->array, node->private_list.prev, node->private_list.next);
+ for (i = 0; i < XA_MAX_MARKS; i++)
+ for (j = 0; j < XA_MARK_LONGS; j++)
+ pr_cont(" %lx", node->marks[i][j]);
+ pr_cont("\n");
+}
+
+void xa_dump_index(unsigned long index, unsigned int shift)
+{
+ if (!shift)
+ pr_info("%lu: ", index);
+ else if (shift >= BITS_PER_LONG)
+ pr_info("0-%lu: ", ~0UL);
+ else
+ pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
+}
+
+void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
+{
+ if (!entry)
+ return;
+
+ xa_dump_index(index, shift);
+
+ if (xa_is_node(entry)) {
+ if (shift == 0) {
+ pr_cont("%px\n", entry);
+ } else {
+ unsigned long i;
+ struct xa_node *node = xa_to_node(entry);
+ xa_dump_node(node);
+ for (i = 0; i < XA_CHUNK_SIZE; i++)
+ xa_dump_entry(node->slots[i],
+ index + (i << node->shift), node->shift);
+ }
+ } else if (xa_is_value(entry))
+ pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
+ xa_to_value(entry), entry);
+ else if (!xa_is_internal(entry))
+ pr_cont("%px\n", entry);
+ else if (xa_is_retry(entry))
+ pr_cont("retry (%ld)\n", xa_to_internal(entry));
+ else if (xa_is_sibling(entry))
+ pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
+ else if (xa_is_zero(entry))
+ pr_cont("zero (%ld)\n", xa_to_internal(entry));
+ else
+ pr_cont("UNKNOWN ENTRY (%px)\n", entry);
+}
+
+void xa_dump(const struct xarray *xa)
+{
+ void *entry = xa->xa_head;
+ unsigned int shift = 0;
+
+ pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
+ xa->xa_flags, xa_marked(xa, XA_MARK_0),
+ xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
+ if (xa_is_node(entry))
+ shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
+ xa_dump_entry(entry, 0, shift);
+}
+#endif
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 34532d14fd4c..912aae5fa09e 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -29,7 +29,7 @@ STATIC_RW_DATA uint32_t xz_crc32_table[256];
XZ_EXTERN void xz_crc32_init(void)
{
- const uint32_t poly = 0xEDB88320;
+ const uint32_t poly = CRC32_POLY_LE;
uint32_t i;
uint32_t j;
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
index 482b90f363fe..09360ebb510e 100644
--- a/lib/xz/xz_private.h
+++ b/lib/xz/xz_private.h
@@ -102,6 +102,10 @@
# endif
#endif
+#ifndef CRC32_POLY_LE
+#define CRC32_POLY_LE 0xedb88320
+#endif
+
/*
* Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
* before calling xz_dec_lzma2_run().
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index 58a733b10387..48f14cd58c77 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -382,6 +382,7 @@ int zlib_inflate(z_streamp strm, int flush)
strm->adler = state->check = REVERSE(hold);
INITBITS();
state->mode = DICT;
+ /* fall through */
case DICT:
if (state->havedict == 0) {
RESTORE();
@@ -389,8 +390,10 @@ int zlib_inflate(z_streamp strm, int flush)
}
strm->adler = state->check = zlib_adler32(0L, NULL, 0);
state->mode = TYPE;
+ /* fall through */
case TYPE:
if (flush == Z_BLOCK) goto inf_leave;
+ /* fall through */
case TYPEDO:
if (state->last) {
BYTEBITS();
@@ -428,6 +431,7 @@ int zlib_inflate(z_streamp strm, int flush)
state->length = (unsigned)hold & 0xffff;
INITBITS();
state->mode = COPY;
+ /* fall through */
case COPY:
copy = state->length;
if (copy) {
@@ -461,6 +465,7 @@ int zlib_inflate(z_streamp strm, int flush)
#endif
state->have = 0;
state->mode = LENLENS;
+ /* fall through */
case LENLENS:
while (state->have < state->ncode) {
NEEDBITS(3);
@@ -481,6 +486,7 @@ int zlib_inflate(z_streamp strm, int flush)
}
state->have = 0;
state->mode = CODELENS;
+ /* fall through */
case CODELENS:
while (state->have < state->nlen + state->ndist) {
for (;;) {
@@ -554,6 +560,7 @@ int zlib_inflate(z_streamp strm, int flush)
break;
}
state->mode = LEN;
+ /* fall through */
case LEN:
if (have >= 6 && left >= 258) {
RESTORE();
@@ -593,6 +600,7 @@ int zlib_inflate(z_streamp strm, int flush)
}
state->extra = (unsigned)(this.op) & 15;
state->mode = LENEXT;
+ /* fall through */
case LENEXT:
if (state->extra) {
NEEDBITS(state->extra);
@@ -600,6 +608,7 @@ int zlib_inflate(z_streamp strm, int flush)
DROPBITS(state->extra);
}
state->mode = DIST;
+ /* fall through */
case DIST:
for (;;) {
this = state->distcode[BITS(state->distbits)];
@@ -625,6 +634,7 @@ int zlib_inflate(z_streamp strm, int flush)
state->offset = (unsigned)this.val;
state->extra = (unsigned)(this.op) & 15;
state->mode = DISTEXT;
+ /* fall through */
case DISTEXT:
if (state->extra) {
NEEDBITS(state->extra);
@@ -644,6 +654,7 @@ int zlib_inflate(z_streamp strm, int flush)
break;
}
state->mode = MATCH;
+ /* fall through */
case MATCH:
if (left == 0) goto inf_leave;
copy = out - left;
@@ -694,6 +705,7 @@ int zlib_inflate(z_streamp strm, int flush)
INITBITS();
}
state->mode = DONE;
+ /* fall through */
case DONE:
ret = Z_STREAM_END;
goto inf_leave;