From 74838b75379a53678ffc5f59de86161d21e2c808 Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Fri, 27 Jul 2012 20:55:27 -0400 Subject: swiotlb: add the late swiotlb initialization function with iotlb memory This enables the caller to initialize swiotlb with its own iotlb memory late in the bootup. See git commit eb605a5754d050a25a9f00d718fb173f24c486ef "swiotlb: add swiotlb_tbl_map_single library function" which will explain the full details of what it can be used for. CC: FUJITA Tomonori [v1: Fold in smatch warning] Signed-off-by: Konrad Rzeszutek Wilk --- lib/swiotlb.c | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 45bc1f83a5ad..f114bf6a8e13 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -170,7 +170,7 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. */ -void __init +static void __init swiotlb_init_with_default_size(size_t default_size, int verbose) { unsigned long bytes; @@ -206,8 +206,9 @@ swiotlb_init(int verbose) int swiotlb_late_init_with_default_size(size_t default_size) { - unsigned long i, bytes, req_nslabs = io_tlb_nslabs; + unsigned long bytes, req_nslabs = io_tlb_nslabs; unsigned int order; + int rc = 0; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); @@ -229,16 +230,32 @@ swiotlb_late_init_with_default_size(size_t default_size) order--; } - if (!io_tlb_start) - goto cleanup1; - + if (!io_tlb_start) { + io_tlb_nslabs = req_nslabs; + return -ENOMEM; + } if (order != get_order(bytes)) { printk(KERN_WARNING "Warning: only able to allocate %ld MB " "for software IO TLB\n", (PAGE_SIZE << order) >> 20); io_tlb_nslabs = SLABS_PER_PAGE << order; - bytes = io_tlb_nslabs << IO_TLB_SHIFT; } + rc = swiotlb_late_init_with_tbl(io_tlb_start, io_tlb_nslabs); + if (rc) + free_pages((unsigned long)io_tlb_start, order); + return rc; +} + +int +swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) +{ + unsigned long i, bytes; + + bytes = nslabs << IO_TLB_SHIFT; + + io_tlb_nslabs = nslabs; + io_tlb_start = tlb; io_tlb_end = io_tlb_start + bytes; + memset(io_tlb_start, 0, bytes); /* @@ -288,10 +305,8 @@ cleanup3: io_tlb_list = NULL; cleanup2: io_tlb_end = NULL; - free_pages((unsigned long)io_tlb_start, order); io_tlb_start = NULL; -cleanup1: - io_tlb_nslabs = req_nslabs; + io_tlb_nslabs = 0; return -ENOMEM; } -- cgit From 9eca2eb9942ee0b6f2bc76485e16e334aee2c0bf Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Sat, 25 Aug 2012 22:47:57 +0000 Subject: netlink: add minlen validation for the new signed types Signed-off-by: Julian Anastasov Signed-off-by: David S. Miller --- lib/nlattr.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'lib') diff --git a/lib/nlattr.c b/lib/nlattr.c index 4226dfeb5178..18eca7809b08 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -22,6 +22,10 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = { [NLA_U64] = sizeof(u64), [NLA_MSECS] = sizeof(u64), [NLA_NESTED] = NLA_HDRLEN, + [NLA_S8] = sizeof(s8), + [NLA_S16] = sizeof(s16), + [NLA_S32] = sizeof(s32), + [NLA_S64] = sizeof(s64), }; static int validate_nla(const struct nlattr *nla, int maxtype, -- cgit From 9785e10aedfa0fad5c1aac709dce5ada1b123783 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sat, 8 Sep 2012 02:53:53 +0000 Subject: netlink: kill netlink_set_nonroot Replace netlink_set_nonroot by one new field `flags' in struct netlink_kernel_cfg that is passed to netlink_kernel_create. This patch also renames NL_NONROOT_* to NL_CFG_F_NONROOT_* since now the flags field in nl_table is generic (so we can add more flags if needed in the future). Also adjust all callers in the net-next tree to use these flags instead of netlink_set_nonroot. Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- lib/kobject_uevent.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 0401d2916d9f..c2e97787d01e 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -375,6 +375,7 @@ static int uevent_net_init(struct net *net) struct uevent_sock *ue_sk; struct netlink_kernel_cfg cfg = { .groups = 1, + .flags = NL_CFG_F_NONROOT_RECV, }; ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); @@ -422,7 +423,6 @@ static struct pernet_operations uevent_net_ops = { static int __init kobject_uevent_init(void) { - netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV); return register_pernet_subsys(&uevent_net_ops); } -- cgit From 9f00d9776bc5beb92e8bfc884a7e96ddc5589e2e Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sat, 8 Sep 2012 02:53:54 +0000 Subject: netlink: hide struct module parameter in netlink_kernel_create This patch defines netlink_kernel_create as a wrapper function of __netlink_kernel_create to hide the struct module *me parameter (which seems to be THIS_MODULE in all existing netlink subsystems). Suggested by David S. Miller. Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- lib/kobject_uevent.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index c2e97787d01e..52e5abbc41db 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -382,8 +382,7 @@ static int uevent_net_init(struct net *net) if (!ue_sk) return -ENOMEM; - ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, - THIS_MODULE, &cfg); + ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg); if (!ue_sk->sk) { printk(KERN_ERR "kobject_uevent: unable to create netlink socket!\n"); -- cgit From b53d657d84f530e5d83f34ff1b81ceedad3faa31 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 7 Sep 2012 14:31:45 +0200 Subject: usb/core: use bin2bcd() for bcdDevice in RH The kernel's version number is used as decimal in the bcdDevice field of the RH descriptor. For kernel version v3.12 we would see 3.0c in lsusb. I am not sure how important it is to stick with bcd values since this is this way since we started git history and nobody complained (however back then we reported only 2.6). Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Greg Kroah-Hartman --- lib/bcd.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/bcd.c b/lib/bcd.c index 55efaf742346..40d304efe272 100644 --- a/lib/bcd.c +++ b/lib/bcd.c @@ -1,14 +1,14 @@ #include #include -unsigned bcd2bin(unsigned char val) +unsigned _bcd2bin(unsigned char val) { return (val & 0x0f) + (val >> 4) * 10; } -EXPORT_SYMBOL(bcd2bin); +EXPORT_SYMBOL(_bcd2bin); -unsigned char bin2bcd(unsigned val) +unsigned char _bin2bcd(unsigned val) { return ((val / 10) << 4) + val % 10; } -EXPORT_SYMBOL(bin2bcd); +EXPORT_SYMBOL(_bin2bcd); -- cgit From bc01637a80f5b670bd70a0279d3f93fa8de1c96d Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Wed, 12 Sep 2012 13:26:55 +0300 Subject: digsig: add hash size comparision on signature verification When pkcs_1_v1_5_decode_emsa() returns without error and hash sizes do not match, hash comparision is not done and digsig_verify_rsa() returns no error. This is a bug and this patch fixes it. The bug was introduced in v3.3 by commit b35e286a640f ("lib/digsig: pkcs_1_v1_5_decode_emsa cleanup"). Cc: stable@vger.kernel.org Signed-off-by: Dmitry Kasatkin Signed-off-by: Linus Torvalds --- lib/digsig.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/digsig.c b/lib/digsig.c index 286d558033e2..8c0e62975c88 100644 --- a/lib/digsig.c +++ b/lib/digsig.c @@ -163,9 +163,11 @@ static int digsig_verify_rsa(struct key *key, memcpy(out1 + head, p, l); err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); + if (err) + goto err; - if (!err && len == hlen) - err = memcmp(out2, h, hlen); + if (len != hlen || memcmp(out2, h, hlen)) + err = -EINVAL; err: mpi_free(in); -- cgit From 8c2c3df31e3b87cb5348e48776c366ebd1dc5a7a Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 20 Apr 2012 14:45:54 +0100 Subject: arm64: Build infrastructure This patch adds Makefile and Kconfig files required for building an AArch64 kernel. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Acked-by: Tony Lindgren Acked-by: Nicolas Pitre Acked-by: Olof Johansson Acked-by: Santosh Shilimkar Acked-by: Arnd Bergmann --- lib/Kconfig.debug | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2403a63b5da5..cfb4578ac2f7 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -452,7 +452,8 @@ config SLUB_STATS config DEBUG_KMEMLEAK bool "Kernel memory leak detector" depends on DEBUG_KERNEL && EXPERIMENTAL && \ - (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) + (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || \ + MICROBLAZE || TILE || ARM64) select DEBUG_FS select STACKTRACE if STACKTRACE_SUPPORT @@ -739,7 +740,8 @@ config DEBUG_BUGVERBOSE bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT depends on BUG depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ - FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE + FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || \ + TILE || ARM64 default y help Say Y here to make BUG() panics output the file name and line number -- cgit From 798efc60e4276825df34af0e91ecbe0781237834 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Sep 2012 20:11:29 -0700 Subject: dev_dbg/dynamic_debug: Update to use printk_emit, optimize stack commit c4e00daaa9 ("driver-core: extend dev_printk() to pass structured data") changed __dev_printk and broke dynamic-debug's ability to control the dynamic prefix of dev_dbg(dev,..). commit af7f2158fd ("drivers-core: make structured logging play nice with dynamic-debug") made a minimal correction. The current dynamic debug code uses up to 3 recursion levels via %pV. This can consume quite a bit of stack. Directly call printk_emit to reduce the recursion depth. These changes include: dev_dbg: o Create and use function create_syslog_header to format the syslog header for printk_emit uses. o Call create_syslog_header and neaten __dev_printk o Make __dev_printk static not global o Remove include header declaration of __dev_printk o Remove now unused EXPORT_SYMBOL() of __dev_printk o Whitespace neatening dynamic_dev_dbg: o Remove KERN_DEBUG from dynamic_emit_prefix o Call create_syslog_header and printk_emit o Whitespace neatening Signed-off-by: Joe Perches Acked-by: David S. Miller Tested-by: Jim Cromie Acked-by: Jason Baron Signed-off-by: Greg Kroah-Hartman --- lib/dynamic_debug.c | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) (limited to 'lib') diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 7ca29a0a3019..29ff2e4cfb75 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -521,25 +521,25 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf) int pos_after_tid; int pos = 0; - pos += snprintf(buf + pos, remaining(pos), "%s", KERN_DEBUG); + *buf = '\0'; + if (desc->flags & _DPRINTK_FLAGS_INCL_TID) { if (in_interrupt()) - pos += snprintf(buf + pos, remaining(pos), "%s ", - ""); + pos += snprintf(buf + pos, remaining(pos), " "); else pos += snprintf(buf + pos, remaining(pos), "[%d] ", - task_pid_vnr(current)); + task_pid_vnr(current)); } pos_after_tid = pos; if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME) pos += snprintf(buf + pos, remaining(pos), "%s:", - desc->modname); + desc->modname); if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME) pos += snprintf(buf + pos, remaining(pos), "%s:", - desc->function); + desc->function); if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO) pos += snprintf(buf + pos, remaining(pos), "%d:", - desc->lineno); + desc->lineno); if (pos - pos_after_tid) pos += snprintf(buf + pos, remaining(pos), " "); if (pos >= PREFIX_SIZE) @@ -559,9 +559,13 @@ int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...) BUG_ON(!fmt); va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - res = printk("%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf); + + res = printk(KERN_DEBUG "%s%pV", + dynamic_emit_prefix(descriptor, buf), &vaf); + va_end(args); return res; @@ -574,15 +578,30 @@ int __dynamic_dev_dbg(struct _ddebug *descriptor, struct va_format vaf; va_list args; int res; - char buf[PREFIX_SIZE]; BUG_ON(!descriptor); BUG_ON(!fmt); va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - res = __dev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf); + + if (!dev) { + res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf); + } else { + char buf[PREFIX_SIZE]; + char dict[128]; + size_t dictlen; + + dictlen = create_syslog_header(dev, dict, sizeof(dict)); + + res = printk_emit(0, 7, dictlen ? dict : NULL, dictlen, + "%s%s %s: %pV", + dynamic_emit_prefix(descriptor, buf), + dev_driver_string(dev), dev_name(dev), &vaf); + } + va_end(args); return res; -- cgit From b004ff4972e2a42aa4512c90cc6a9e4dc1bb36b6 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Sep 2012 20:12:19 -0700 Subject: netdev_printk/dynamic_netdev_dbg: Directly call printk_emit A lot of stack is used in recursive printks with %pV. Using multiple levels of %pV (a logging function with %pV that calls another logging function with %pV) can consume more stack than necessary. Avoid excessive stack use by not calling dev_printk from netdev_printk and dynamic_netdev_dbg. Duplicate the logic and form of dev_printk instead. Make __netdev_printk static. Remove EXPORT_SYMBOL(__netdev_printk) Whitespace and brace style neatening. Signed-off-by: Joe Perches Acked-by: David S. Miller Tested-by: Jim Cromie Acked-by: Jason Baron Signed-off-by: Greg Kroah-Hartman --- lib/dynamic_debug.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 29ff2e4cfb75..2a29f4e04bdf 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -611,20 +611,40 @@ EXPORT_SYMBOL(__dynamic_dev_dbg); #ifdef CONFIG_NET int __dynamic_netdev_dbg(struct _ddebug *descriptor, - const struct net_device *dev, const char *fmt, ...) + const struct net_device *dev, const char *fmt, ...) { struct va_format vaf; va_list args; int res; - char buf[PREFIX_SIZE]; BUG_ON(!descriptor); BUG_ON(!fmt); va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - res = __netdev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf); + + if (dev && dev->dev.parent) { + char buf[PREFIX_SIZE]; + char dict[128]; + size_t dictlen; + + dictlen = create_syslog_header(dev->dev.parent, + dict, sizeof(dict)); + + res = printk_emit(0, 7, dictlen ? dict : NULL, dictlen, + "%s%s %s: %s: %pV", + dynamic_emit_prefix(descriptor, buf), + dev_driver_string(dev->dev.parent), + dev_name(dev->dev.parent), + netdev_name(dev), &vaf); + } else if (dev) { + res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf); + } else { + res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf); + } + va_end(args); return res; -- cgit From c2c5a7051c556036b7beb8f4a89eefdc91c3245b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Sep 2012 20:13:01 -0700 Subject: netdev_printk/netif_printk: Remove a superfluous logging colon netdev_printk originally called dev_printk with %pV. This style emitted the complete dev_printk header with a colon followed by the netdev_name prefix followed by a colon. Now that netdev_printk does not call dev_printk, the extra colon is superfluous. Remove it. Example: old: sky2 0000:02:00.0: eth0: Link is up at 100 Mbps, full duplex, flow control both new: sky2 0000:02:00.0 eth0: Link is up at 100 Mbps, full duplex, flow control both Signed-off-by: Joe Perches Acked-by: David S. Miller Tested-by: Jim Cromie Acked-by: Jason Baron Signed-off-by: Greg Kroah-Hartman --- lib/dynamic_debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 2a29f4e04bdf..6b3ebabacfa8 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -634,7 +634,7 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor, dict, sizeof(dict)); res = printk_emit(0, 7, dictlen ? dict : NULL, dictlen, - "%s%s %s: %s: %pV", + "%s%s %s %s: %pV", dynamic_emit_prefix(descriptor, buf), dev_driver_string(dev->dev.parent), dev_name(dev->dev.parent), -- cgit From 666f355f3805d68b6ed5f7013806f1f65abfbf03 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Sep 2012 20:14:11 -0700 Subject: device and dynamic_debug: Use dev_vprintk_emit and dev_printk_emit Convert direct calls of vprintk_emit and printk_emit to the dev_ equivalents. Make create_syslog_header static. Signed-off-by: Joe Perches Acked-by: David S. Miller Tested-by: Jim Cromie Acked-by: Jason Baron Signed-off-by: Greg Kroah-Hartman --- lib/dynamic_debug.c | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) (limited to 'lib') diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 6b3ebabacfa8..e7f7d993357a 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -591,15 +591,11 @@ int __dynamic_dev_dbg(struct _ddebug *descriptor, res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf); } else { char buf[PREFIX_SIZE]; - char dict[128]; - size_t dictlen; - dictlen = create_syslog_header(dev, dict, sizeof(dict)); - - res = printk_emit(0, 7, dictlen ? dict : NULL, dictlen, - "%s%s %s: %pV", - dynamic_emit_prefix(descriptor, buf), - dev_driver_string(dev), dev_name(dev), &vaf); + res = dev_printk_emit(7, dev, "%s%s %s: %pV", + dynamic_emit_prefix(descriptor, buf), + dev_driver_string(dev), dev_name(dev), + &vaf); } va_end(args); @@ -627,18 +623,13 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor, if (dev && dev->dev.parent) { char buf[PREFIX_SIZE]; - char dict[128]; - size_t dictlen; - - dictlen = create_syslog_header(dev->dev.parent, - dict, sizeof(dict)); - - res = printk_emit(0, 7, dictlen ? dict : NULL, dictlen, - "%s%s %s %s: %pV", - dynamic_emit_prefix(descriptor, buf), - dev_driver_string(dev->dev.parent), - dev_name(dev->dev.parent), - netdev_name(dev), &vaf); + + res = dev_printk_emit(7, dev->dev.parent, + "%s%s %s %s: %pV", + dynamic_emit_prefix(descriptor, buf), + dev_driver_string(dev->dev.parent), + dev_name(dev->dev.parent), + netdev_name(dev), &vaf); } else if (dev) { res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf); } else { -- cgit From e3ebfb96f396731ca2d0b108785d5da31b53ab00 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 2 Jul 2012 14:42:01 -0700 Subject: rcu: Add PROVE_RCU_DELAY to provoke difficult races There have been some recent bugs that were triggered only when preemptible RCU's __rcu_read_unlock() was preempted just after setting ->rcu_read_lock_nesting to INT_MIN, which is a low-probability event. Therefore, reproducing those bugs (to say nothing of gaining confidence in alleged fixes) was quite difficult. This commit therefore creates a new debug-only RCU kernel config option that forces a short delay in __rcu_read_unlock() to increase the probability of those sorts of bugs occurring. Signed-off-by: Paul E. McKenney Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- lib/Kconfig.debug | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2403a63b5da5..dacbbe4d7a80 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -629,6 +629,20 @@ config PROVE_RCU_REPEATEDLY Say N if you are unsure. +config PROVE_RCU_DELAY + bool "RCU debugging: preemptible RCU race provocation" + depends on DEBUG_KERNEL && PREEMPT_RCU + default n + help + There is a class of races that involve an unlikely preemption + of __rcu_read_unlock() just after ->rcu_read_lock_nesting has + been set to INT_MIN. This feature inserts a delay at that + point to increase the probability of these races. + + Say Y to increase probability of preemption of __rcu_read_unlock(). + + Say N if you are unsure. + config SPARSE_RCU_POINTER bool "RCU debugging: sparse-based checks for pointer usage" default n -- cgit From b5bd6a0e5fa8c0376d9746c566fe3daaa51ec825 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 24 Sep 2012 17:17:35 -0700 Subject: lib/flex_proportions.c: fix corruption of denominator in flexible proportions When racing with CPU hotplug, percpu_counter_sum() can return negative values for the number of observed events. This confuses fprop_new_period(), which uses unsigned type and as a result number of events is set to big *positive* number. From that moment on, things go pear shaped and can result e.g. in division by zero as denominator is later truncated to 32-bits. This bug causes a divide-by-zero oops in bdi_dirty_limit() in Borislav's 3.6.0-rc6 based kernel. Fix the issue by using a signed type in fprop_new_period(). That makes us bail out from the function without doing anything (mistakenly) thinking there are no events to age. That makes aging somewhat inaccurate but getting accurate data would be rather hard. Signed-off-by: Jan Kara Reported-by: Borislav Petkov Reported-by: Srivatsa S. Bhat Cc: Wu Fengguang Cc: Peter Zijlstra Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/flex_proportions.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index c785554f9523..ebf3bac460b0 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -62,7 +62,7 @@ void fprop_global_destroy(struct fprop_global *p) */ bool fprop_new_period(struct fprop_global *p, int periods) { - u64 events; + s64 events; unsigned long flags; local_irq_save(flags); -- cgit From 759643ce39f13c416236e2609ccaed9792107b2f Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Mon, 1 Oct 2012 12:48:31 -0600 Subject: dma-debug: Remove local BUS_NOTIFY_UNBOUND_DRIVER define Remove local BUS_NOTIFY_UNBOUND_DRIVER define. This is not used since BUS_NOTIFY_UNBOUND_DRIVER is defined in include/linux/device.h Signed-off-by: Shuah Khan Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 66ce41489133..b9087bff008b 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -120,11 +120,6 @@ static const char *type2name[4] = { "single", "page", static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", "DMA_FROM_DEVICE", "DMA_NONE" }; -/* little merge helper - remove it after the merge window */ -#ifndef BUS_NOTIFY_UNBOUND_DRIVER -#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 -#endif - /* * The access to some variables in this macro is racy. We can't use atomic_t * here because all these variables are exported to debugfs. Some of them even -- cgit From 8f243af42adef5f589b8e39656284ca9c9374e44 Mon Sep 17 00:00:00 2001 From: Joe Mario Date: Thu, 4 Oct 2012 17:12:15 -0700 Subject: sections: fix const sections for crc32 table Fix the const sections for the code generated by crc32 table. There's no ro version of the cacheline aligned section, so we cannot put in const data without a conflict Just don't make the crc tables const for now. [ak@linux.intel.com: some fixes and new description] [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Joe Mario Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/crc32.c | 9 ++++++--- lib/gen_crc32table.c | 6 +++--- 2 files changed, 9 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/crc32.c b/lib/crc32.c index 61774b8db4de..072fbd8234d5 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -188,11 +188,13 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) #else u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) { - return crc32_le_generic(crc, p, len, crc32table_le, CRCPOLY_LE); + return crc32_le_generic(crc, p, len, + (const u32 (*)[256])crc32table_le, CRCPOLY_LE); } u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) { - return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE); + return crc32_le_generic(crc, p, len, + (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); } #endif EXPORT_SYMBOL(crc32_le); @@ -253,7 +255,8 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) #else u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) { - return crc32_be_generic(crc, p, len, crc32table_be, CRCPOLY_BE); + return crc32_be_generic(crc, p, len, + (const u32 (*)[256])crc32table_be, CRCPOLY_BE); } #endif EXPORT_SYMBOL(crc32_be); diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c index 8f8d5439e2d9..71fcfcd96410 100644 --- a/lib/gen_crc32table.c +++ b/lib/gen_crc32table.c @@ -109,7 +109,7 @@ int main(int argc, char** argv) if (CRC_LE_BITS > 1) { crc32init_le(); - printf("static const u32 __cacheline_aligned " + printf("static u32 __cacheline_aligned " "crc32table_le[%d][%d] = {", LE_TABLE_ROWS, LE_TABLE_SIZE); output_table(crc32table_le, LE_TABLE_ROWS, @@ -119,7 +119,7 @@ int main(int argc, char** argv) if (CRC_BE_BITS > 1) { crc32init_be(); - printf("static const u32 __cacheline_aligned " + printf("static u32 __cacheline_aligned " "crc32table_be[%d][%d] = {", BE_TABLE_ROWS, BE_TABLE_SIZE); output_table(crc32table_be, LE_TABLE_ROWS, @@ -128,7 +128,7 @@ int main(int argc, char** argv) } if (CRC_LE_BITS > 1) { crc32cinit_le(); - printf("static const u32 __cacheline_aligned " + printf("static u32 __cacheline_aligned " "crc32ctable_le[%d][%d] = {", LE_TABLE_ROWS, LE_TABLE_SIZE); output_table(crc32ctable_le, LE_TABLE_ROWS, -- cgit From e49317d415f5a44bad8377a208d61902d752303e Mon Sep 17 00:00:00 2001 From: George Spelvin Date: Thu, 4 Oct 2012 17:12:27 -0700 Subject: lib: vsprintf: optimize division by 10 for small integers Shrink the reciprocal approximations used in put_dec_full4() based on the comments in put_dec_full9(). Signed-off-by: George Spelvin Cc: Denys Vlasenko Cc: Michal Nazarewicz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 0e337541f005..67e74cbefa90 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -243,13 +243,14 @@ char *put_dec(char *buf, unsigned long long n) /* Second algorithm: valid only for 64-bit long longs */ +/* See comment in put_dec_full9 for choice of constants */ static noinline_for_stack char *put_dec_full4(char *buf, unsigned q) { unsigned r; - r = (q * 0xcccd) >> 19; + r = (q * 0xccd) >> 15; *buf++ = (q - 10 * r) + '0'; - q = (r * 0x199a) >> 16; + q = (r * 0xcd) >> 11; *buf++ = (r - 10 * q) + '0'; r = (q * 0xcd) >> 11; *buf++ = (q - 10 * r) + '0'; -- cgit From 2359172a75986359ce9cf041a9aca6a32cdf8779 Mon Sep 17 00:00:00 2001 From: George Spelvin Date: Thu, 4 Oct 2012 17:12:29 -0700 Subject: lib: vsprintf: optimize division by 10000 The same multiply-by-inverse technique can be used to convert division by 10000 to a 32x32->64-bit multiply. Signed-off-by: George Spelvin Cc: Denys Vlasenko Cc: Michal Nazarewicz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 60 ++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 33 insertions(+), 27 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 67e74cbefa90..8cb7635b2ce3 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -245,17 +245,32 @@ char *put_dec(char *buf, unsigned long long n) /* See comment in put_dec_full9 for choice of constants */ static noinline_for_stack -char *put_dec_full4(char *buf, unsigned q) +void put_dec_full4(char *buf, unsigned q) { unsigned r; r = (q * 0xccd) >> 15; - *buf++ = (q - 10 * r) + '0'; + buf[0] = (q - 10 * r) + '0'; q = (r * 0xcd) >> 11; - *buf++ = (r - 10 * q) + '0'; + buf[1] = (r - 10 * q) + '0'; r = (q * 0xcd) >> 11; - *buf++ = (q - 10 * r) + '0'; - *buf++ = r + '0'; - return buf; + buf[2] = (q - 10 * r) + '0'; + buf[3] = r + '0'; +} + +/* + * Call put_dec_full4 on x % 10000, return x / 10000. + * The approximation x/10000 == (x * 0x346DC5D7) >> 43 + * holds for all x < 1,128,869,999. The largest value this + * helper will ever be asked to convert is 1,125,520,955. + * (d1 in the put_dec code, assuming n is all-ones). + */ +static +unsigned put_dec_helper4(char *buf, unsigned x) +{ + uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43; + + put_dec_full4(buf, x - q * 10000); + return q; } /* Based on code by Douglas W. Jones found at @@ -277,28 +292,19 @@ char *put_dec(char *buf, unsigned long long n) d3 = (h >> 16); /* implicit "& 0xffff" */ q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff); + q = put_dec_helper4(buf, q); + + q += 7671 * d3 + 9496 * d2 + 6 * d1; + q = put_dec_helper4(buf+4, q); + + q += 4749 * d3 + 42 * d2; + q = put_dec_helper4(buf+8, q); - buf = put_dec_full4(buf, q % 10000); - q = q / 10000; - - d1 = q + 7671 * d3 + 9496 * d2 + 6 * d1; - buf = put_dec_full4(buf, d1 % 10000); - q = d1 / 10000; - - d2 = q + 4749 * d3 + 42 * d2; - buf = put_dec_full4(buf, d2 % 10000); - q = d2 / 10000; - - d3 = q + 281 * d3; - if (!d3) - goto done; - buf = put_dec_full4(buf, d3 % 10000); - q = d3 / 10000; - if (!q) - goto done; - buf = put_dec_full4(buf, q); - done: - while (buf[-1] == '0') + q += 281 * d3; + buf += 12; + if (q) + buf = put_dec_trunc8(buf, q); + else while (buf[-1] == '0') --buf; return buf; -- cgit From cb239d0a97d573150d6106a92c0641da0d03f6a1 Mon Sep 17 00:00:00 2001 From: George Spelvin Date: Thu, 4 Oct 2012 17:12:30 -0700 Subject: lib: vsprintf: optimize put_dec_trunc8() If you're going to have a conditional branch after each 32x32->64-bit multiply, might as well shrink the code and make it a loop. This also avoids using the long multiply for small integers. (This leaves the comments in a confusing state, but that's a separate patch to make review easier.) Signed-off-by: George Spelvin Cc: Denys Vlasenko Cc: Michal Nazarewicz Cc: Rabin Vincent Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 8cb7635b2ce3..c2236f14640f 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -174,22 +174,12 @@ char *put_dec_trunc8(char *buf, unsigned r) unsigned q; /* Copy of previous function's body with added early returns */ - q = (r * (uint64_t)0x1999999a) >> 32; - *buf++ = (r - 10 * q) + '0'; /* 2 */ - if (q == 0) - return buf; - r = (q * (uint64_t)0x1999999a) >> 32; - *buf++ = (q - 10 * r) + '0'; /* 3 */ - if (r == 0) - return buf; - q = (r * (uint64_t)0x1999999a) >> 32; - *buf++ = (r - 10 * q) + '0'; /* 4 */ - if (q == 0) - return buf; - r = (q * (uint64_t)0x1999999a) >> 32; - *buf++ = (q - 10 * r) + '0'; /* 5 */ - if (r == 0) - return buf; + while (r >= 10000) { + q = r + '0'; + r = (r * (uint64_t)0x1999999a) >> 32; + *buf++ = q - 10*r; + } + q = (r * 0x199a) >> 16; *buf++ = (r - 10 * q) + '0'; /* 6 */ if (q == 0) -- cgit From f40005165f7f0bda6cc268bdbcaad98a8f26fb1a Mon Sep 17 00:00:00 2001 From: George Spelvin Date: Thu, 4 Oct 2012 17:12:32 -0700 Subject: lib: vsprintf: fix broken comments Numbering the 8 potential digits 2 though 9 never did make a lot of sense. Signed-off-by: George Spelvin Cc: Denys Vlasenko Cc: Michal Nazarewicz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index c2236f14640f..852f89f590a6 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -180,19 +180,19 @@ char *put_dec_trunc8(char *buf, unsigned r) *buf++ = q - 10*r; } - q = (r * 0x199a) >> 16; - *buf++ = (r - 10 * q) + '0'; /* 6 */ + q = (r * 0x199a) >> 16; /* r <= 9999 */ + *buf++ = (r - 10 * q) + '0'; if (q == 0) return buf; - r = (q * 0xcd) >> 11; - *buf++ = (q - 10 * r) + '0'; /* 7 */ + r = (q * 0xcd) >> 11; /* q <= 999 */ + *buf++ = (q - 10 * r) + '0'; if (r == 0) return buf; - q = (r * 0xcd) >> 11; - *buf++ = (r - 10 * q) + '0'; /* 8 */ + q = (r * 0xcd) >> 11; /* r <= 99 */ + *buf++ = (r - 10 * q) + '0'; if (q == 0) return buf; - *buf++ = q + '0'; /* 9 */ + *buf++ = q + '0'; /* q <= 9 */ return buf; } -- cgit From 7c59154e7548429ff80384803577176466d2ab9a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 4 Oct 2012 17:12:33 -0700 Subject: lib/vsprintf: update documentation to cover all of %p[Mm][FR] Acked-by: Andrei Emeltchenko Signed-off-by: Andy Shevchenko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 852f89f590a6..9287e2549936 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -987,7 +987,7 @@ int kptr_restrict __read_mostly; * - 'm' For a 6-byte MAC address, it prints the hex address without colons * - 'MF' For a 6-byte MAC FDDI address, it prints the address * with a dash-separated hex notation - * - '[mM]R For a 6-byte MAC address, Reverse order (Bluetooth) + * - '[mM]R' For a 6-byte MAC address, Reverse order (Bluetooth) * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) * IPv6 uses colon separated network-order 16 bit hex with leading 0's @@ -1338,7 +1338,10 @@ qualifier: * %pR output the address range in a struct resource with decoded flags * %pr output the address range in a struct resource with raw flags * %pM output a 6-byte MAC address with colons + * %pMR output a 6-byte MAC address with colons in reversed order + * %pMF output a 6-byte MAC address with dashes * %pm output a 6-byte MAC address without colons + * %pmR output a 6-byte MAC address without colons in reversed order * %pI4 print an IPv4 address without leading zeros * %pi4 print an IPv4 address with leading zeros * %pI6 print an IPv6 address with colons -- cgit From 125c4c706b680c7831f0966ff873c1ad0354ec25 Mon Sep 17 00:00:00 2001 From: Fengguang Wu Date: Thu, 4 Oct 2012 17:13:15 -0700 Subject: idr: rename MAX_LEVEL to MAX_IDR_LEVEL To avoid name conflicts: drivers/video/riva/fbdev.c:281:9: sparse: preprocessor token MAX_LEVEL redefined While at it, also make the other names more consistent and add parentheses. [akpm@linux-foundation.org: repair fallout] [sfr@canb.auug.org.au: IB/mlx4: fix for MAX_ID_MASK to MAX_IDR_MASK name change] Signed-off-by: Fengguang Wu Cc: Bernd Petrovitsch Cc: walter harms Cc: Glauber Costa Signed-off-by: Stephen Rothwell Cc: Roland Dreier Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/idr.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'lib') diff --git a/lib/idr.c b/lib/idr.c index 4046e29c0a99..648239079dd2 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -20,7 +20,7 @@ * that id to this code and it returns your pointer. * You can release ids at any time. When all ids are released, most of - * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we + * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we * don't need to go to the memory "store" during an id allocate, just * so you don't need to be too concerned about locking and conflicts * with the slab allocator. @@ -122,7 +122,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) */ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) { - while (idp->id_free_cnt < IDR_FREE_MAX) { + while (idp->id_free_cnt < MAX_IDR_FREE) { struct idr_layer *new; new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); if (new == NULL) @@ -179,7 +179,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) sh = IDR_BITS*l; id = ((id >> sh) ^ n ^ m) << sh; } - if ((id >= MAX_ID_BIT) || (id < 0)) + if ((id >= MAX_IDR_BIT) || (id < 0)) return IDR_NOMORE_SPACE; if (l == 0) break; @@ -223,7 +223,7 @@ build_up: * Add a new layer to the top of the tree if the requested * id is larger than the currently allocated space. */ - while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { + while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { layers++; if (!p->count) { /* special case: if the tree is currently empty, @@ -265,7 +265,7 @@ build_up: static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) { - struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL]; int id; id = idr_get_empty_slot(idp, starting_id, pa); @@ -357,7 +357,7 @@ static void idr_remove_warning(int id) static void sub_remove(struct idr *idp, int shift, int id) { struct idr_layer *p = idp->top; - struct idr_layer **pa[MAX_LEVEL]; + struct idr_layer **pa[MAX_IDR_LEVEL]; struct idr_layer ***paa = &pa[0]; struct idr_layer *to_free; int n; @@ -402,7 +402,7 @@ void idr_remove(struct idr *idp, int id) struct idr_layer *to_free; /* Mask off upper bits we don't use for the search. */ - id &= MAX_ID_MASK; + id &= MAX_IDR_MASK; sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); if (idp->top && idp->top->count == 1 && (idp->layers > 1) && @@ -420,7 +420,7 @@ void idr_remove(struct idr *idp, int id) to_free->bitmap = to_free->count = 0; free_layer(to_free); } - while (idp->id_free_cnt >= IDR_FREE_MAX) { + while (idp->id_free_cnt >= MAX_IDR_FREE) { p = get_from_free_list(idp); /* * Note: we don't call the rcu callback here, since the only @@ -451,7 +451,7 @@ void idr_remove_all(struct idr *idp) int n, id, max; int bt_mask; struct idr_layer *p; - struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL]; struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; @@ -517,7 +517,7 @@ void *idr_find(struct idr *idp, int id) n = (p->layer+1) * IDR_BITS; /* Mask off upper bits we don't use for the search. */ - id &= MAX_ID_MASK; + id &= MAX_IDR_MASK; if (id >= (1 << n)) return NULL; @@ -555,7 +555,7 @@ int idr_for_each(struct idr *idp, { int n, id, max, error = 0; struct idr_layer *p; - struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL]; struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; @@ -601,7 +601,7 @@ EXPORT_SYMBOL(idr_for_each); */ void *idr_get_next(struct idr *idp, int *nextidp) { - struct idr_layer *p, *pa[MAX_LEVEL]; + struct idr_layer *p, *pa[MAX_IDR_LEVEL]; struct idr_layer **paa = &pa[0]; int id = *nextidp; int n, max; @@ -659,7 +659,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) n = (p->layer+1) * IDR_BITS; - id &= MAX_ID_MASK; + id &= MAX_IDR_MASK; if (id >= (1 << n)) return ERR_PTR(-EINVAL); @@ -780,7 +780,7 @@ EXPORT_SYMBOL(ida_pre_get); */ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) { - struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL]; struct ida_bitmap *bitmap; unsigned long flags; int idr_id = starting_id / IDA_BITMAP_BITS; @@ -793,7 +793,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) if (t < 0) return _idr_rc_to_errno(t); - if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) + if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) return -ENOSPC; if (t != idr_id) @@ -827,7 +827,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) } id = idr_id * IDA_BITMAP_BITS + t; - if (id >= MAX_ID_BIT) + if (id >= MAX_IDR_BIT) return -ENOSPC; __set_bit(t, bitmap->bitmap); -- cgit From 77dd3b0bd17a0849b2f98b915ce3fc9247db1013 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 4 Oct 2012 17:13:16 -0700 Subject: lib/parser.c: avoid overflow in match_number() The result of converting an integer value to another signed integer type that's unable to represent the original value is implementation defined. (See notes in section 6.3.1.3 of the C standard.) In match_number(), the result of simple_strtol() (which returns type long) is assigned to a value of type int. Instead, handle the result of simple_strtol() in a well-defined way, and return -ERANGE if the result won't fit in the int variable used to hold the parsed result. No current callers pay attention to the particular error value returned, so this additional return code shouldn't do any harm. [akpm@linux-foundation.org: coding-style tweaks] Signed-off-by: Alex Elder Cc: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/parser.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/parser.c b/lib/parser.c index c43410084838..52cfa69f73df 100644 --- a/lib/parser.c +++ b/lib/parser.c @@ -122,13 +122,14 @@ int match_token(char *s, const match_table_t table, substring_t args[]) * * Description: Given a &substring_t and a base, attempts to parse the substring * as a number in that base. On success, sets @result to the integer represented - * by the string and returns 0. Returns either -ENOMEM or -EINVAL on failure. + * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. */ static int match_number(substring_t *s, int *result, int base) { char *endp; char *buf; int ret; + long val; size_t len = s->to - s->from; buf = kmalloc(len + 1, GFP_KERNEL); @@ -136,10 +137,15 @@ static int match_number(substring_t *s, int *result, int base) return -ENOMEM; memcpy(buf, s->from, len); buf[len] = '\0'; - *result = simple_strtol(buf, &endp, base); + ret = 0; + val = simple_strtol(buf, &endp, base); if (endp == buf) ret = -EINVAL; + else if (val < (long)INT_MIN || val > (long)INT_MAX) + ret = -ERANGE; + else + *result = (int) val; kfree(buf); return ret; } -- cgit From 8f1f66ed7e1bdb7c88bb0bc45ac78cd075430d78 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Thu, 4 Oct 2012 17:13:17 -0700 Subject: lib/Kconfig.debug: adjust hard-lockup related Kconfig options The main option should not appear in the resulting .config when the dependencies aren't met (i.e. use "depends on" rather than directly setting the default from the combined dependency values). The sub-options should depend on the main option rather than a more generic higher level one. Signed-off-by: Jan Beulich Acked-by: Don Zickus Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 35c4565ee8fa..7fba3a98967f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -196,12 +196,13 @@ config LOCKUP_DETECTOR thresholds can be controlled through the sysctl watchdog_thresh. config HARDLOCKUP_DETECTOR - def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \ - !HAVE_NMI_WATCHDOG + def_bool y + depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG + depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI config BOOTPARAM_HARDLOCKUP_PANIC bool "Panic (Reboot) On Hard Lockups" - depends on LOCKUP_DETECTOR + depends on HARDLOCKUP_DETECTOR help Say Y here to enable the kernel to panic on "hard lockups", which are bugs that cause the kernel to loop in kernel @@ -212,7 +213,7 @@ config BOOTPARAM_HARDLOCKUP_PANIC config BOOTPARAM_HARDLOCKUP_PANIC_VALUE int - depends on LOCKUP_DETECTOR + depends on HARDLOCKUP_DETECTOR range 0 1 default 0 if !BOOTPARAM_HARDLOCKUP_PANIC default 1 if BOOTPARAM_HARDLOCKUP_PANIC -- cgit From e96875677fb2b7cb739c5d7769824dff7260d31d Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Thu, 4 Oct 2012 17:13:18 -0700 Subject: lib/gcd.c: prevent possible div by 0 Account for all properties when a and/or b are 0: gcd(0, 0) = 0 gcd(a, 0) = a gcd(0, b) = b Fixes no known problems in current kernels. Signed-off-by: Davidlohr Bueso Cc: Eric Dumazet Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/gcd.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'lib') diff --git a/lib/gcd.c b/lib/gcd.c index cce4f3cd14b3..3657f129d7b8 100644 --- a/lib/gcd.c +++ b/lib/gcd.c @@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b) if (a < b) swap(a, b); + + if (!b) + return a; while ((r = a % b) != 0) { a = b; b = r; -- cgit From ca279cf1065fb689abea1dc7d8c11787729bb185 Mon Sep 17 00:00:00 2001 From: Benjamin Gaignard Date: Thu, 4 Oct 2012 17:13:20 -0700 Subject: genalloc: make it possible to use a custom allocation algorithm Premit use of another algorithm than the default first-fit one. For example a custom algorithm could be used to manage alignment requirements. As I can't predict all the possible requirements/needs for all allocation uses cases, I add a "free" field 'void *data' to pass any needed information to the allocation function. For example 'data' could be used to handle a structure where you store the alignment, the expected memory bank, the requester device, or any information that could influence the allocation algorithm. An usage example may look like this: struct my_pool_constraints { int align; int bank; ... }; unsigned long my_custom_algo(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data) { struct my_pool_constraints *constraints = data; ... deal with allocation contraints ... return the index in bitmap where perform the allocation } void create_my_pool() { struct my_pool_constraints c; struct gen_pool *pool = gen_pool_create(...); gen_pool_add(pool, ...); gen_pool_set_algo(pool, my_custom_algo, &c); } Add of best-fit algorithm function: most of the time best-fit is slower then first-fit but memory fragmentation is lower. The random buffer allocation/free tests don't show any arithmetic relation between the allocation time and fragmentation but the best-fit algorithm is sometime able to perform the allocation when the first-fit can't. This new algorithm help to remove static allocations on ESRAM, a small but fast on-chip RAM of few KB, used for high-performance uses cases like DMA linked lists, graphic accelerators, encoders/decoders. On the Ux500 (in the ARM tree) we have define 5 ESRAM banks of 128 KB each and use of static allocations becomes unmaintainable: cd arch/arm/mach-ux500 && grep -r ESRAM . ./include/mach/db8500-regs.h:/* Base address and bank offsets for ESRAM */ ./include/mach/db8500-regs.h:#define U8500_ESRAM_BASE 0x40000000 ./include/mach/db8500-regs.h:#define U8500_ESRAM_BANK_SIZE 0x00020000 ./include/mach/db8500-regs.h:#define U8500_ESRAM_BANK0 U8500_ESRAM_BASE ./include/mach/db8500-regs.h:#define U8500_ESRAM_BANK1 (U8500_ESRAM_BASE + U8500_ESRAM_BANK_SIZE) ./include/mach/db8500-regs.h:#define U8500_ESRAM_BANK2 (U8500_ESRAM_BANK1 + U8500_ESRAM_BANK_SIZE) ./include/mach/db8500-regs.h:#define U8500_ESRAM_BANK3 (U8500_ESRAM_BANK2 + U8500_ESRAM_BANK_SIZE) ./include/mach/db8500-regs.h:#define U8500_ESRAM_BANK4 (U8500_ESRAM_BANK3 + U8500_ESRAM_BANK_SIZE) ./include/mach/db8500-regs.h:#define U8500_ESRAM_DMA_LCPA_OFFSET 0x10000 ./include/mach/db8500-regs.h:#define U8500_DMA_LCPA_BASE (U8500_ESRAM_BANK0 + U8500_ESRAM_DMA_LCPA_OFFSET) ./include/mach/db8500-regs.h:#define U8500_DMA_LCLA_BASE U8500_ESRAM_BANK4 I want to use genalloc to do dynamic allocations but I need to be able to fine tune the allocation algorithm. I my case best-fit algorithm give better results than first-fit, but it will not be true for every use case. Signed-off-by: Benjamin Gaignard Cc: Huang Ying Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/genalloc.c | 88 +++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 84 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/genalloc.c b/lib/genalloc.c index 6bc04aab6ec7..ca208a92628c 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -152,6 +152,8 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); pool->min_alloc_order = min_alloc_order; + pool->algo = gen_pool_first_fit; + pool->data = NULL; } return pool; } @@ -255,8 +257,9 @@ EXPORT_SYMBOL(gen_pool_destroy); * @size: number of bytes to allocate from the pool * * Allocate the requested number of bytes from the specified pool. - * Uses a first-fit algorithm. Can not be used in NMI handler on - * architectures without NMI-safe cmpxchg implementation. + * Uses the pool allocation function (with first-fit algorithm by default). + * Can not be used in NMI handler on architectures without + * NMI-safe cmpxchg implementation. */ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) { @@ -280,8 +283,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) end_bit = (chunk->end_addr - chunk->start_addr) >> order; retry: - start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, - start_bit, nbits, 0); + start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, + pool->data); if (start_bit >= end_bit) continue; remain = bitmap_set_ll(chunk->bits, start_bit, nbits); @@ -400,3 +403,80 @@ size_t gen_pool_size(struct gen_pool *pool) return size; } EXPORT_SYMBOL_GPL(gen_pool_size); + +/** + * gen_pool_set_algo - set the allocation algorithm + * @pool: pool to change allocation algorithm + * @algo: custom algorithm function + * @data: additional data used by @algo + * + * Call @algo for each memory allocation in the pool. + * If @algo is NULL use gen_pool_first_fit as default + * memory allocation function. + */ +void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) +{ + rcu_read_lock(); + + pool->algo = algo; + if (!pool->algo) + pool->algo = gen_pool_first_fit; + + pool->data = data; + + rcu_read_unlock(); +} +EXPORT_SYMBOL(gen_pool_set_algo); + +/** + * gen_pool_first_fit - find the first available region + * of memory matching the size requirement (no alignment constraint) + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @data: additional data - unused + */ +unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, void *data) +{ + return bitmap_find_next_zero_area(map, size, start, nr, 0); +} +EXPORT_SYMBOL(gen_pool_first_fit); + +/** + * gen_pool_best_fit - find the best fitting region of memory + * macthing the size requirement (no alignment constraint) + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @data: additional data - unused + * + * Iterate over the bitmap to find the smallest free region + * which we can allocate the memory. + */ +unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, void *data) +{ + unsigned long start_bit = size; + unsigned long len = size + 1; + unsigned long index; + + index = bitmap_find_next_zero_area(map, size, start, nr, 0); + + while (index < size) { + int next_bit = find_next_bit(map, size, index + nr); + if ((next_bit - index) < len) { + len = next_bit - index; + start_bit = index; + if (len == nr) + return start_bit; + } + index = bitmap_find_next_zero_area(map, size, + next_bit + 1, nr, 0); + } + + return start_bit; +} +EXPORT_SYMBOL(gen_pool_best_fit); -- cgit From 214f766ea0909e743122966c4617b3a112e405d7 Mon Sep 17 00:00:00 2001 From: Vikram Mulukutla Date: Thu, 4 Oct 2012 17:13:22 -0700 Subject: lib/spinlock_debug: avoid livelock in do_raw_spin_lock() The logic in do_raw_spin_lock() attempts to acquire a spinlock by invoking arch_spin_trylock() in a loop with a delay between each attempt. Now consider the following situation in a 2 CPU system: 1. CPU-0 continually acquires and releases a spinlock in a tight loop; it stays in this loop until some condition X is satisfied. X can only be satisfied by another CPU. 2. CPU-1 tries to acquire the same spinlock, in an attempt to satisfy the aforementioned condition X. However, it never sees the unlocked value of the lock because the debug spinlock code uses trylock instead of just lock; it checks at all the wrong moments - whenever CPU-0 has locked the lock. Now in the absence of debug spinlocks, the architecture specific spinlock code can correctly allow CPU-1 to wait in a "queue" (e.g., ticket spinlocks), ensuring that it acquires the lock at some point. However, with the debug spinlock code, livelock can easily occur due to the use of try_lock, which obviously cannot put the CPU in that "queue". This queueing mechanism is implemented in both x86 and ARM spinlock code. Note that the situation mentioned above is not hypothetical. A real problem was encountered where CPU-0 was running hrtimer_cancel with interrupts disabled, and CPU-1 was attempting to run the hrtimer that CPU-0 was trying to cancel. Address this by actually attempting arch_spin_lock once it is suspected that there is a spinlock lockup. If we're in a situation that is described above, the arch_spin_lock should succeed; otherwise other timeout mechanisms (e.g., watchdog) should alert the system of a lockup. Therefore, if there is a genuine system problem and the spinlock can't be acquired, the end result (irrespective of this change being present) is the same. If there is a livelock caused by the debug code, this change will allow the lock to be acquired, depending on the implementation of the lower level arch specific spinlock code. [akpm@linux-foundation.org: tweak comment] Signed-off-by: Vikram Mulukutla Cc: Thomas Gleixner Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/spinlock_debug.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) (limited to 'lib') diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index eb10578ae055..0374a596cffa 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -107,23 +107,27 @@ static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; - int print_once = 1; - for (;;) { - for (i = 0; i < loops; i++) { - if (arch_spin_trylock(&lock->raw_lock)) - return; - __delay(1); - } - /* lockup suspected: */ - if (print_once) { - print_once = 0; - spin_dump(lock, "lockup suspected"); + for (i = 0; i < loops; i++) { + if (arch_spin_trylock(&lock->raw_lock)) + return; + __delay(1); + } + /* lockup suspected: */ + spin_dump(lock, "lockup suspected"); #ifdef CONFIG_SMP - trigger_all_cpu_backtrace(); + trigger_all_cpu_backtrace(); #endif - } - } + + /* + * The trylock above was causing a livelock. Give the lower level arch + * specific lock code a chance to acquire the lock. We have already + * printed a warning/backtrace at this point. The non-debug arch + * specific code might actually succeed in acquiring the lock. If it is + * not successful, the end-result is the same - there is no forward + * progress. + */ + arch_spin_lock(&lock->raw_lock); } void do_raw_spin_lock(raw_spinlock_t *lock) -- cgit From da99075c1d368315e1508b6143226c0d27b621e0 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Thu, 4 Oct 2012 17:13:24 -0700 Subject: lib/vsprintf.c: improve standard conformance of sscanf() Xen's pciback points out a couple of deficiencies with vsscanf()'s standard conformance: - Trailing character matching cannot be checked by the caller: With a format string of "(%x:%x.%x) %n" absence of the closing parenthesis cannot be checked, as input of "(00:00.0)" doesn't cause the %n to be evaluated (because of the code not skipping white space before the trailing %n). - The parameter corresponding to a trailing %n could get filled even if there was a matching error: With a format string of "(%x:%x.%x)%n", input of "(00:00.0]" would still fill the respective variable pointed to (and hence again make the mismatch non-detectable by the caller). This patch aims at fixing those, but leaves other non-conforming aspects of it untouched, among them these possibly relevant ones: - improper handling of the assignment suppression character '*' (blindly discarding all succeeding non-white space from the format and input strings), - not honoring conversion specifiers for %n, - not recognizing the C99 conversion specifier 't' (recognized by vsprintf()). Signed-off-by: Jan Beulich Cc: Konrad Rzeszutek Wilk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 9287e2549936..39c99fea7c03 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -2017,7 +2017,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args) s16 field_width; bool is_sign; - while (*fmt && *str) { + while (*fmt) { /* skip any white space in format */ /* white space in format matchs any amount of * white space, including none, in the input. @@ -2042,6 +2042,8 @@ int vsscanf(const char *buf, const char *fmt, va_list args) * advance both strings to next white space */ if (*fmt == '*') { + if (!*str) + break; while (!isspace(*fmt) && *fmt != '%' && *fmt) fmt++; while (!isspace(*str) && *str) @@ -2070,7 +2072,17 @@ int vsscanf(const char *buf, const char *fmt, va_list args) } } - if (!*fmt || !*str) + if (!*fmt) + break; + + if (*fmt == 'n') { + /* return number of characters read so far */ + *va_arg(args, int *) = str - buf; + ++fmt; + continue; + } + + if (!*str) break; base = 10; @@ -2103,13 +2115,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args) num++; } continue; - case 'n': - /* return number of characters read so far */ - { - int *i = (int *)va_arg(args, int*); - *i = str - buf; - } - continue; case 'o': base = 8; break; @@ -2210,16 +2215,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args) str = next; } - /* - * Now we've come all the way through so either the input string or the - * format ended. In the former case, there can be a %n at the current - * position in the format that needs to be filled. - */ - if (*fmt == '%' && *(fmt + 1) == 'n') { - int *p = (int *)va_arg(args, int *); - *p = str - buf; - } - return num; } EXPORT_SYMBOL(vsscanf); -- cgit From 17d7aac9a55b164874f3d4b7b4f5af87ab457b84 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Thu, 4 Oct 2012 17:13:27 -0700 Subject: lib/plist.c: make plist test announcements KERN_DEBUG They show up in dmesg [ 4.041094] start plist test [ 4.045804] end plist test without a lot of meaning so hide them behind debug loglevel. Signed-off-by: Borislav Petkov Cc: Lai Jiangshan Acked-by: Steven Rostedt Cc: Paul Gortmaker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/plist.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/plist.c b/lib/plist.c index 6ab0e521c48b..1ebc95f7a46f 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -175,7 +175,7 @@ static int __init plist_test(void) int nr_expect = 0, i, loop; unsigned int r = local_clock(); - printk(KERN_INFO "start plist test\n"); + pr_debug("start plist test\n"); plist_head_init(&test_head); for (i = 0; i < ARRAY_SIZE(test_node); i++) plist_node_init(test_node + i, 0); @@ -203,7 +203,7 @@ static int __init plist_test(void) plist_test_check(nr_expect); } - printk(KERN_INFO "end plist test\n"); + pr_debug("end plist test\n"); return 0; } -- cgit From 8290e2d2dcbf0d379d4b1379e17916515ee20a39 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 4 Oct 2012 17:13:28 -0700 Subject: scatterlist: atomic sg_mapping_iter() no longer needs disabled IRQs SG mapping iterator w/ SG_MITER_ATOMIC set required IRQ disabled because it originally used KM_BIO_SRC_IRQ to allow use from IRQ handlers. kmap_atomic() has long been updated to handle stacking atomic mapping requests on per-cpu basis and only requires not sleeping while mapped. Update sg_mapping_iter such that atomic iterators only require disabling preemption instead of disabling IRQ. While at it, convert wte weird @ARG@ notations to @ARG in the comment of sg_miter_start(). Signed-off-by: Tejun Heo Cc: Maxim Levitsky Cc: Alex Dubov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/scatterlist.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/scatterlist.c b/lib/scatterlist.c index fadae774a20c..e76d85cf3175 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -404,14 +404,13 @@ EXPORT_SYMBOL(sg_miter_start); * @miter: sg mapping iter to proceed * * Description: - * Proceeds @miter@ to the next mapping. @miter@ should have been - * started using sg_miter_start(). On successful return, - * @miter@->page, @miter@->addr and @miter@->length point to the - * current mapping. + * Proceeds @miter to the next mapping. @miter should have been started + * using sg_miter_start(). On successful return, @miter->page, + * @miter->addr and @miter->length point to the current mapping. * * Context: - * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till - * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. + * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled + * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. * * Returns: * true if @miter contains the next mapping. false if end of sg @@ -465,7 +464,8 @@ EXPORT_SYMBOL(sg_miter_next); * resources (kmap) need to be released during iteration. * * Context: - * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care + * otherwise. */ void sg_miter_stop(struct sg_mapping_iter *miter) { @@ -479,7 +479,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { - WARN_ON(!irqs_disabled()); + WARN_ON_ONCE(preemptible()); kunmap_atomic(miter->addr); } else kunmap(miter->page); -- cgit From 33e2a4227ddff7c18921ac175fae3ab0e3ff8a76 Mon Sep 17 00:00:00 2001 From: Hein Tibosch Date: Thu, 4 Oct 2012 17:16:58 -0700 Subject: lib/decompress.c add __init to decompress_method and data Fix the warning: WARNING: vmlinux.o(.text+0x14cfd8): Section mismatch in reference from the variable compressed_formats to the function .init.text:gunzip() The function compressed_formats() references the function __init gunzip(). etc.. Within decompress.c, compressed_formats[] needs 'a __initdata annotation', because some of it's data members refer to functions which will be unloaded after init. Consequently, its user decompress_method() will get the __init prefix. Signed-off-by: Hein Tibosch Cc: Albin Tonnerre Cc: Phillip Lougher Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/decompress.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/decompress.c b/lib/decompress.c index 3d766b7f60ab..31a804277282 100644 --- a/lib/decompress.c +++ b/lib/decompress.c @@ -14,6 +14,7 @@ #include #include +#include #ifndef CONFIG_DECOMPRESS_GZIP # define gunzip NULL @@ -31,11 +32,13 @@ # define unlzo NULL #endif -static const struct compress_format { +struct compress_format { unsigned char magic[2]; const char *name; decompress_fn decompressor; -} compressed_formats[] = { +}; + +static const struct compress_format compressed_formats[] __initdata = { { {037, 0213}, "gzip", gunzip }, { {037, 0236}, "gzip", gunzip }, { {0x42, 0x5a}, "bzip2", bunzip2 }, @@ -45,7 +48,7 @@ static const struct compress_format { { {0, 0}, NULL, NULL } }; -decompress_fn decompress_method(const unsigned char *inbuf, int len, +decompress_fn __init decompress_method(const unsigned char *inbuf, int len, const char **name) { const struct compress_format *cf; -- cgit From b69ec42b1b194cc88f04b3fbcda8d3f93182d6c3 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 8 Oct 2012 16:28:11 -0700 Subject: Kconfig: clean up the long arch list for the DEBUG_KMEMLEAK config option Introduce HAVE_DEBUG_KMEMLEAK config option and select it in corresponding architecture Kconfig files. DEBUG_KMEMLEAK now only depends on HAVE_DEBUG_KMEMLEAK. Signed-off-by: Catalin Marinas Cc: Russell King Cc: Michal Simek Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Paul Mundt Cc: "David S. Miller" Cc: Chris Metcalf Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7fba3a98967f..736db3990506 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -450,12 +450,12 @@ config SLUB_STATS out which slabs are relevant to a particular load. Try running: slabinfo -DA +config HAVE_DEBUG_KMEMLEAK + bool + config DEBUG_KMEMLEAK bool "Kernel memory leak detector" - depends on DEBUG_KERNEL && EXPERIMENTAL && \ - (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || \ - MICROBLAZE || TILE || ARM64) - + depends on DEBUG_KERNEL && EXPERIMENTAL && HAVE_DEBUG_KMEMLEAK select DEBUG_FS select STACKTRACE if STACKTRACE_SUPPORT select KALLSYMS -- cgit From 9b2a60c484715e2d2f07d8192fd9f18435cbc77c Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 8 Oct 2012 16:28:13 -0700 Subject: Kconfig: clean up the long arch list for the DEBUG_BUGVERBOSE config option Introduce HAVE_DEBUG_BUGVERBOSE config option and select it in corresponding architecture Kconfig files. Architectures that already select GENERIC_BUG don't need to select HAVE_DEBUG_BUGVERBOSE. Signed-off-by: Catalin Marinas Acked-by: Geert Uytterhoeven Cc: David Howells Cc: Hirokazu Takata Cc: Paul Mundt Cc: "David S. Miller" Cc: Chris Metcalf Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 736db3990506..b7281e4d1473 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -751,12 +751,12 @@ config DEBUG_HIGHMEM This options enables addition error checking for high memory systems. Disable for production systems. +config HAVE_DEBUG_BUGVERBOSE + bool + config DEBUG_BUGVERBOSE bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT - depends on BUG - depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ - FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || \ - TILE || ARM64 + depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE) default y help Say Y here to make BUG() panics output the file name and line number -- cgit From 4c199a93a2d36b277a9fd209a0f2793f8460a215 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:30:32 -0700 Subject: rbtree: empty nodes have no color Empty nodes have no color. We can make use of this property to simplify the code emitted by the RB_EMPTY_NODE and RB_CLEAR_NODE macros. Also, we can get rid of the rb_init_node function which had been introduced by commit 88d19cf37952 ("timers: Add rb_init_node() to allow for stack allocated rb nodes") to avoid some issue with the empty node's color not being initialized. I'm not sure what the RB_EMPTY_NODE checks in rb_prev() / rb_next() are doing there, though. axboe introduced them in commit 10fd48f2376d ("rbtree: fixed reversed RB_EMPTY_NODE and rb_next/prev"). The way I see it, the 'empty node' abstraction is only used by rbtree users to flag nodes that they haven't inserted in any rbtree, so asking the predecessor or successor of such nodes doesn't make any sense. One final rb_init_node() caller was recently added in sysctl code to implement faster sysctl name lookups. This code doesn't make use of RB_EMPTY_NODE at all, and from what I could see it only called rb_init_node() under the mistaken assumption that such initialization was required before node insertion. [sfr@canb.auug.org.au: fix net/ceph/osd_client.c build] Signed-off-by: Michel Lespinasse Cc: Andrea Arcangeli Acked-by: David Woodhouse Cc: Rik van Riel Cc: Peter Zijlstra Cc: Daniel Santos Cc: Jens Axboe Cc: "Eric W. Biederman" Cc: John Stultz Signed-off-by: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index d4175565dc2c..fe43c8c5f527 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -387,7 +387,7 @@ struct rb_node *rb_next(const struct rb_node *node) { struct rb_node *parent; - if (rb_parent(node) == node) + if (RB_EMPTY_NODE(node)) return NULL; /* If we have a right-hand child, go down and then left as far @@ -416,7 +416,7 @@ struct rb_node *rb_prev(const struct rb_node *node) { struct rb_node *parent; - if (rb_parent(node) == node) + if (RB_EMPTY_NODE(node)) return NULL; /* If we have a left-hand child, go down and then right as far -- cgit From bf7ad8eeab995710c766df49c9c69a8592ca0216 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:30:37 -0700 Subject: rbtree: move some implementation details from rbtree.h to rbtree.c rbtree users must use the documented APIs to manipulate the tree structure. Low-level helpers to manipulate node colors and parenthood are not part of that API, so move them to lib/rbtree.c [dwmw2@infradead.org: fix jffs2 build issue due to renamed __rb_parent_color field] Signed-off-by: Michel Lespinasse Cc: Andrea Arcangeli Acked-by: David Woodhouse Cc: Rik van Riel Cc: Peter Zijlstra Cc: Daniel Santos Cc: Jens Axboe Cc: "Eric W. Biederman" Signed-off-by: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index fe43c8c5f527..ccada9abe6f5 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -23,6 +23,24 @@ #include #include +#define RB_RED 0 +#define RB_BLACK 1 + +#define rb_color(r) ((r)->__rb_parent_color & 1) +#define rb_is_red(r) (!rb_color(r)) +#define rb_is_black(r) rb_color(r) +#define rb_set_red(r) do { (r)->__rb_parent_color &= ~1; } while (0) +#define rb_set_black(r) do { (r)->__rb_parent_color |= 1; } while (0) + +static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) +{ + rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; +} +static inline void rb_set_color(struct rb_node *rb, int color) +{ + rb->__rb_parent_color = (rb->__rb_parent_color & ~1) | color; +} + static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) { struct rb_node *right = node->rb_right; @@ -255,7 +273,7 @@ void rb_erase(struct rb_node *node, struct rb_root *root) rb_set_parent(old->rb_right, node); } - node->rb_parent_color = old->rb_parent_color; + node->__rb_parent_color = old->__rb_parent_color; node->rb_left = old->rb_left; rb_set_parent(old->rb_left, node); -- cgit From 910a742d4ba863848c7283d69c21bfa779d3b9a8 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:30:39 -0700 Subject: rbtree: performance and correctness test This small module helps measure the performance of rbtree insert and erase. Additionally, we run a few correctness tests to check that the rbtrees have all desired properties: - contains the right number of nodes in the order desired, - never two consecutive red nodes on any path, - all paths to leaf nodes have the same number of black nodes, - root node is black [akpm@linux-foundation.org: fix printk warning: sparc64 cycles_t is unsigned long] Signed-off-by: Michel Lespinasse Cc: Andrea Arcangeli Acked-by: David Woodhouse Cc: Rik van Riel Cc: Peter Zijlstra Cc: Daniel Santos Cc: Jens Axboe Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 7 +++ lib/Makefile | 2 + lib/rbtree_test.c | 135 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 144 insertions(+) create mode 100644 lib/rbtree_test.c (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b7281e4d1473..a4e5d93b0f41 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1282,6 +1282,13 @@ config LATENCYTOP source mm/Kconfig.debug source kernel/trace/Kconfig +config RBTREE_TEST + tristate "Red-Black tree test" + depends on m && DEBUG_KERNEL + help + A benchmark measuring the performance of the rbtree library. + Also includes rbtree invariant checks. + config PROVIDE_OHCI1394_DMA_INIT bool "Remote debugging over FireWire early on boot" depends on PCI && X86 diff --git a/lib/Makefile b/lib/Makefile index 42d283edc4d3..f49445d26b65 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -140,6 +140,8 @@ $(foreach file, $(libfdt_files), \ $(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt)) lib-$(CONFIG_LIBFDT) += $(libfdt_files) +obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c new file mode 100644 index 000000000000..19dfca9ff7d7 --- /dev/null +++ b/lib/rbtree_test.c @@ -0,0 +1,135 @@ +#include +#include +#include +#include + +#define NODES 100 +#define PERF_LOOPS 100000 +#define CHECK_LOOPS 100 + +struct test_node { + struct rb_node rb; + u32 key; +}; + +static struct rb_root root = RB_ROOT; +static struct test_node nodes[NODES]; + +static struct rnd_state rnd; + +static void insert(struct test_node *node, struct rb_root *root) +{ + struct rb_node **new = &root->rb_node, *parent = NULL; + + while (*new) { + parent = *new; + if (node->key < rb_entry(parent, struct test_node, rb)->key) + new = &parent->rb_left; + else + new = &parent->rb_right; + } + + rb_link_node(&node->rb, parent, new); + rb_insert_color(&node->rb, root); +} + +static inline void erase(struct test_node *node, struct rb_root *root) +{ + rb_erase(&node->rb, root); +} + +static void init(void) +{ + int i; + for (i = 0; i < NODES; i++) + nodes[i].key = prandom32(&rnd); +} + +static bool is_red(struct rb_node *rb) +{ + return !(rb->__rb_parent_color & 1); +} + +static int black_path_count(struct rb_node *rb) +{ + int count; + for (count = 0; rb; rb = rb_parent(rb)) + count += !is_red(rb); + return count; +} + +static void check(int nr_nodes) +{ + struct rb_node *rb; + int count = 0; + int blacks; + u32 prev_key = 0; + + for (rb = rb_first(&root); rb; rb = rb_next(rb)) { + struct test_node *node = rb_entry(rb, struct test_node, rb); + WARN_ON_ONCE(node->key < prev_key); + WARN_ON_ONCE(is_red(rb) && + (!rb_parent(rb) || is_red(rb_parent(rb)))); + if (!count) + blacks = black_path_count(rb); + else + WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) && + blacks != black_path_count(rb)); + prev_key = node->key; + count++; + } + WARN_ON_ONCE(count != nr_nodes); +} + +static int rbtree_test_init(void) +{ + int i, j; + cycles_t time1, time2, time; + + printk(KERN_ALERT "rbtree testing"); + + prandom32_seed(&rnd, 3141592653589793238); + init(); + + time1 = get_cycles(); + + for (i = 0; i < PERF_LOOPS; i++) { + for (j = 0; j < NODES; j++) + insert(nodes + j, &root); + for (j = 0; j < NODES; j++) + erase(nodes + j, &root); + } + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, PERF_LOOPS); + printk(" -> %llu cycles\n", (unsigned long long)time); + + for (i = 0; i < CHECK_LOOPS; i++) { + init(); + for (j = 0; j < NODES; j++) { + check(j); + insert(nodes + j, &root); + } + for (j = 0; j < NODES; j++) { + check(NODES - j); + erase(nodes + j, &root); + } + check(0); + } + + return -EAGAIN; /* Fail will directly unload the module */ +} + +static void rbtree_test_exit(void) +{ + printk(KERN_ALERT "test exit\n"); +} + +module_init(rbtree_test_init) +module_exit(rbtree_test_exit) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michel Lespinasse"); +MODULE_DESCRIPTION("Red Black Tree test"); -- cgit From 1f0528653e41ec230c60f5738820e8a544731399 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:30:42 -0700 Subject: rbtree: break out of rb_insert_color loop after tree rotation It is a well known property of rbtrees that insertion never requires more than two tree rotations. In our implementation, after one loop iteration identified one or two necessary tree rotations, we would iterate and look for more. However at that point the node's parent would always be black, which would cause us to exit the loop. We can make the code flow more obvious by just adding a break statement after the tree rotations, where we know we are done. Additionally, in the cases where two tree rotations are necessary, we don't have to update the 'node' pointer as it wouldn't be used until the next loop iteration, which we now avoid due to this break statement. Signed-off-by: Michel Lespinasse Cc: Andrea Arcangeli Acked-by: David Woodhouse Cc: Rik van Riel Cc: Peter Zijlstra Cc: Daniel Santos Cc: Jens Axboe Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index ccada9abe6f5..12abb8abf442 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -109,18 +109,15 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) } } - if (parent->rb_right == node) - { - register struct rb_node *tmp; + if (parent->rb_right == node) { __rb_rotate_left(parent, root); - tmp = parent; parent = node; - node = tmp; } rb_set_black(parent); rb_set_red(gparent); __rb_rotate_right(gparent, root); + break; } else { { register struct rb_node *uncle = gparent->rb_left; @@ -134,18 +131,15 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) } } - if (parent->rb_left == node) - { - register struct rb_node *tmp; + if (parent->rb_left == node) { __rb_rotate_right(parent, root); - tmp = parent; parent = node; - node = tmp; } rb_set_black(parent); rb_set_red(gparent); __rb_rotate_left(gparent, root); + break; } } -- cgit From 6d58452dc066db61acdff7b84671db1b11a3de1c Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:30:44 -0700 Subject: rbtree: adjust root color in rb_insert_color() only when necessary The root node of an rbtree must always be black. However, rb_insert_color() only needs to maintain this invariant when it has been broken - that is, when it exits the loop due to the current (red) node being the root. In all other cases (exiting after tree rotations, or exiting due to an existing black parent) the invariant is already satisfied, so there is no need to adjust the root node color. Signed-off-by: Michel Lespinasse Cc: Andrea Arcangeli Acked-by: David Woodhouse Cc: Rik van Riel Cc: Peter Zijlstra Cc: Daniel Santos Cc: Jens Axboe Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index 12abb8abf442..d0be5fcaafe8 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -91,8 +91,21 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) { struct rb_node *parent, *gparent; - while ((parent = rb_parent(node)) && rb_is_red(parent)) - { + while (true) { + /* + * Loop invariant: node is red + * + * If there is a black parent, we are done. + * Otherwise, take some corrective action as we don't + * want a red root or two consecutive red nodes. + */ + parent = rb_parent(node); + if (!parent) { + rb_set_black(node); + break; + } else if (rb_is_black(parent)) + break; + gparent = rb_parent(parent); if (parent == gparent->rb_left) @@ -142,8 +155,6 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) break; } } - - rb_set_black(root->rb_node); } EXPORT_SYMBOL(rb_insert_color); -- cgit From 5bc9188aa207dafd47eab57df7c4fe5b3d3f636a Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:30:47 -0700 Subject: rbtree: low level optimizations in rb_insert_color() - Use the newly introduced rb_set_parent_color() function to flip the color of nodes whose parent is already known. - Optimize rb_parent() when the node is known to be red - there is no need to mask out the color in that case. - Flipping gparent's color to red requires us to fetch its rb_parent_color field, so we can reuse it as the parent value for the next loop iteration. - Do not use __rb_rotate_left() and __rb_rotate_right() to handle tree rotations: we already have pointers to all relevant nodes, and know their colors (either because we want to adjust it, or because we've tested it, or we can deduce it as black due to the node proximity to a known red node). So we can generate more efficient code by making use of the node pointers we already have, and setting both the parent and color attributes for nodes all at once. Also in Case 2, some node attributes don't have to be set because we know another tree rotation (Case 3) will always follow and override them. Signed-off-by: Michel Lespinasse Cc: Andrea Arcangeli Acked-by: David Woodhouse Cc: Rik van Riel Cc: Peter Zijlstra Cc: Daniel Santos Cc: Jens Axboe Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 166 ++++++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 131 insertions(+), 35 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index d0be5fcaafe8..41cf19b2fe51 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -23,6 +23,25 @@ #include #include +/* + * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree + * + * 1) A node is either red or black + * 2) The root is black + * 3) All leaves (NULL) are black + * 4) Both children of every red node are black + * 5) Every simple path from root to leaves contains the same number + * of black nodes. + * + * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two + * consecutive red nodes in a path and every red node is therefore followed by + * a black. So if B is the number of black nodes on every simple path (as per + * 5), then the longest possible path due to 4 is 2B. + * + * We shall indicate color with case, where black nodes are uppercase and red + * nodes will be lowercase. + */ + #define RB_RED 0 #define RB_BLACK 1 @@ -41,6 +60,17 @@ static inline void rb_set_color(struct rb_node *rb, int color) rb->__rb_parent_color = (rb->__rb_parent_color & ~1) | color; } +static inline void rb_set_parent_color(struct rb_node *rb, + struct rb_node *p, int color) +{ + rb->__rb_parent_color = (unsigned long)p | color; +} + +static inline struct rb_node *rb_red_parent(struct rb_node *red) +{ + return (struct rb_node *)red->__rb_parent_color; +} + static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) { struct rb_node *right = node->rb_right; @@ -87,9 +117,30 @@ static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) rb_set_parent(node, left); } +/* + * Helper function for rotations: + * - old's parent and color get assigned to new + * - old gets assigned new as a parent and 'color' as a color. + */ +static inline void +__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, + struct rb_root *root, int color) +{ + struct rb_node *parent = rb_parent(old); + new->__rb_parent_color = old->__rb_parent_color; + rb_set_parent_color(old, new, color); + if (parent) { + if (parent->rb_left == old) + parent->rb_left = new; + else + parent->rb_right = new; + } else + root->rb_node = new; +} + void rb_insert_color(struct rb_node *node, struct rb_root *root) { - struct rb_node *parent, *gparent; + struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; while (true) { /* @@ -99,59 +150,104 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) * Otherwise, take some corrective action as we don't * want a red root or two consecutive red nodes. */ - parent = rb_parent(node); if (!parent) { - rb_set_black(node); + rb_set_parent_color(node, NULL, RB_BLACK); break; } else if (rb_is_black(parent)) break; - gparent = rb_parent(parent); - - if (parent == gparent->rb_left) - { - { - register struct rb_node *uncle = gparent->rb_right; - if (uncle && rb_is_red(uncle)) - { - rb_set_black(uncle); - rb_set_black(parent); - rb_set_red(gparent); - node = gparent; - continue; - } + gparent = rb_red_parent(parent); + + if (parent == gparent->rb_left) { + tmp = gparent->rb_right; + if (tmp && rb_is_red(tmp)) { + /* + * Case 1 - color flips + * + * G g + * / \ / \ + * p u --> P U + * / / + * n N + * + * However, since g's parent might be red, and + * 4) does not allow this, we need to recurse + * at g. + */ + rb_set_parent_color(tmp, gparent, RB_BLACK); + rb_set_parent_color(parent, gparent, RB_BLACK); + node = gparent; + parent = rb_parent(node); + rb_set_parent_color(node, parent, RB_RED); + continue; } if (parent->rb_right == node) { - __rb_rotate_left(parent, root); + /* + * Case 2 - left rotate at parent + * + * G G + * / \ / \ + * p U --> n U + * \ / + * n p + * + * This still leaves us in violation of 4), the + * continuation into Case 3 will fix that. + */ + parent->rb_right = tmp = node->rb_left; + node->rb_left = parent; + if (tmp) + rb_set_parent_color(tmp, parent, + RB_BLACK); + rb_set_parent_color(parent, node, RB_RED); parent = node; } - rb_set_black(parent); - rb_set_red(gparent); - __rb_rotate_right(gparent, root); + /* + * Case 3 - right rotate at gparent + * + * G P + * / \ / \ + * p U --> n g + * / \ + * n U + */ + gparent->rb_left = tmp = parent->rb_right; + parent->rb_right = gparent; + if (tmp) + rb_set_parent_color(tmp, gparent, RB_BLACK); + __rb_rotate_set_parents(gparent, parent, root, RB_RED); break; } else { - { - register struct rb_node *uncle = gparent->rb_left; - if (uncle && rb_is_red(uncle)) - { - rb_set_black(uncle); - rb_set_black(parent); - rb_set_red(gparent); - node = gparent; - continue; - } + tmp = gparent->rb_left; + if (tmp && rb_is_red(tmp)) { + /* Case 1 - color flips */ + rb_set_parent_color(tmp, gparent, RB_BLACK); + rb_set_parent_color(parent, gparent, RB_BLACK); + node = gparent; + parent = rb_parent(node); + rb_set_parent_color(node, parent, RB_RED); + continue; } if (parent->rb_left == node) { - __rb_rotate_right(parent, root); + /* Case 2 - right rotate at parent */ + parent->rb_left = tmp = node->rb_right; + node->rb_right = parent; + if (tmp) + rb_set_parent_color(tmp, parent, + RB_BLACK); + rb_set_parent_color(parent, node, RB_RED); parent = node; } - rb_set_black(parent); - rb_set_red(gparent); - __rb_rotate_left(gparent, root); + /* Case 3 - left rotate at gparent */ + gparent->rb_right = tmp = parent->rb_left; + parent->rb_left = gparent; + if (tmp) + rb_set_parent_color(tmp, gparent, RB_BLACK); + __rb_rotate_set_parents(gparent, parent, root, RB_RED); break; } } -- cgit From d6ff1273928ebf15466a85b7e1810cd00e72998b Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:30:50 -0700 Subject: rbtree: adjust node color in __rb_erase_color() only when necessary In __rb_erase_color(), we were always setting a node to black after exiting the main loop. And in one case, after fixing up the tree to satisfy all rbtree invariants, we were setting the current node to root just to guarantee a loop exit, at which point the root would be set to black. However this is not necessary, as the root of an rbtree is already known to be black. The only case where the color flip is required is when we exit the loop due to the current node being red, and it's easiest to just do the flip at that point instead of doing it after the loop. [adrian.hunter@intel.com: perf tools: fix build for another rbtree.c change] Signed-off-by: Michel Lespinasse Cc: Andrea Arcangeli Acked-by: David Woodhouse Cc: Rik van Riel Cc: Peter Zijlstra Cc: Daniel Santos Cc: Jens Axboe Cc: "Eric W. Biederman" Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index 41cf19b2fe51..baf7c835c57c 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -259,10 +259,22 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, { struct rb_node *other; - while ((!node || rb_is_black(node)) && node != root->rb_node) - { - if (parent->rb_left == node) - { + while (true) { + /* + * Loop invariant: all leaf paths going through node have a + * black node count that is 1 lower than other leaf paths. + * + * If node is red, we can flip it to black to adjust. + * If node is the root, all leaf paths go through it. + * Otherwise, we need to adjust the tree through color flips + * and tree rotations as per one of the 4 cases below. + */ + if (node && rb_is_red(node)) { + rb_set_black(node); + break; + } else if (!parent) { + break; + } else if (parent->rb_left == node) { other = parent->rb_right; if (rb_is_red(other)) { @@ -291,12 +303,9 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, rb_set_black(parent); rb_set_black(other->rb_right); __rb_rotate_left(parent, root); - node = root->rb_node; break; } - } - else - { + } else { other = parent->rb_left; if (rb_is_red(other)) { @@ -325,13 +334,10 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, rb_set_black(parent); rb_set_black(other->rb_left); __rb_rotate_right(parent, root); - node = root->rb_node; break; } } } - if (node) - rb_set_black(node); } void rb_erase(struct rb_node *node, struct rb_root *root) -- cgit From e125d1471a4f8f1bf7ea9a83deb8d23cb40bd712 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:30:54 -0700 Subject: rbtree: optimize case selection logic in __rb_erase_color() In __rb_erase_color(), we have to select one of 3 cases depending on the color on the 'other' node children. If both children are black, we flip a few node colors and iterate. Otherwise, we do either one or two tree rotations, depending on the color of the 'other' child opposite to 'node', and then we are done. The corresponding logic had duplicate checks for the color of the 'other' child opposite to 'node'. It was checking it first to determine if both children are black, and then to determine how many tree rotations are required. Rearrange the logic to avoid that extra check. Signed-off-by: Michel Lespinasse Cc: Andrea Arcangeli Acked-by: David Woodhouse Cc: Rik van Riel Cc: Peter Zijlstra Cc: Daniel Santos Cc: Jens Axboe Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 68 +++++++++++++++++++++++++++--------------------------------- 1 file changed, 30 insertions(+), 38 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index baf7c835c57c..eb823a31099c 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -283,28 +283,24 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, __rb_rotate_left(parent, root); other = parent->rb_right; } - if ((!other->rb_left || rb_is_black(other->rb_left)) && - (!other->rb_right || rb_is_black(other->rb_right))) - { - rb_set_red(other); - node = parent; - parent = rb_parent(node); - } - else - { - if (!other->rb_right || rb_is_black(other->rb_right)) - { - rb_set_black(other->rb_left); + if (!other->rb_right || rb_is_black(other->rb_right)) { + if (!other->rb_left || + rb_is_black(other->rb_left)) { rb_set_red(other); - __rb_rotate_right(other, root); - other = parent->rb_right; + node = parent; + parent = rb_parent(node); + continue; } - rb_set_color(other, rb_color(parent)); - rb_set_black(parent); - rb_set_black(other->rb_right); - __rb_rotate_left(parent, root); - break; + rb_set_black(other->rb_left); + rb_set_red(other); + __rb_rotate_right(other, root); + other = parent->rb_right; } + rb_set_color(other, rb_color(parent)); + rb_set_black(parent); + rb_set_black(other->rb_right); + __rb_rotate_left(parent, root); + break; } else { other = parent->rb_left; if (rb_is_red(other)) @@ -314,28 +310,24 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, __rb_rotate_right(parent, root); other = parent->rb_left; } - if ((!other->rb_left || rb_is_black(other->rb_left)) && - (!other->rb_right || rb_is_black(other->rb_right))) - { - rb_set_red(other); - node = parent; - parent = rb_parent(node); - } - else - { - if (!other->rb_left || rb_is_black(other->rb_left)) - { - rb_set_black(other->rb_right); + if (!other->rb_left || rb_is_black(other->rb_left)) { + if (!other->rb_right || + rb_is_black(other->rb_right)) { rb_set_red(other); - __rb_rotate_left(other, root); - other = parent->rb_left; + node = parent; + parent = rb_parent(node); + continue; } - rb_set_color(other, rb_color(parent)); - rb_set_black(parent); - rb_set_black(other->rb_left); - __rb_rotate_right(parent, root); - break; + rb_set_black(other->rb_right); + rb_set_red(other); + __rb_rotate_left(other, root); + other = parent->rb_left; } + rb_set_color(other, rb_color(parent)); + rb_set_black(parent); + rb_set_black(other->rb_left); + __rb_rotate_right(parent, root); + break; } } } -- cgit From 6280d2356fd8ad0936a63c10dc1e6accf48d0c61 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:30:57 -0700 Subject: rbtree: low level optimizations in __rb_erase_color() In __rb_erase_color(), we often already have pointers to the nodes being rotated and/or know what their colors must be, so we can generate more efficient code than the generic __rb_rotate_left() and __rb_rotate_right() functions. Also when the current node is red or when flipping the sibling's color, the parent is already known so we can use the more efficient rb_set_parent_color() function to set the desired color. Signed-off-by: Michel Lespinasse Cc: Andrea Arcangeli Acked-by: David Woodhouse Cc: Rik van Riel Cc: Peter Zijlstra Cc: Daniel Santos Cc: Jens Axboe Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 208 +++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 115 insertions(+), 93 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index eb823a31099c..a38e473d8fe7 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -39,7 +39,8 @@ * 5), then the longest possible path due to 4 is 2B. * * We shall indicate color with case, where black nodes are uppercase and red - * nodes will be lowercase. + * nodes will be lowercase. Unknown color nodes shall be drawn as red within + * parentheses and have some accompanying text comment. */ #define RB_RED 0 @@ -48,17 +49,11 @@ #define rb_color(r) ((r)->__rb_parent_color & 1) #define rb_is_red(r) (!rb_color(r)) #define rb_is_black(r) rb_color(r) -#define rb_set_red(r) do { (r)->__rb_parent_color &= ~1; } while (0) -#define rb_set_black(r) do { (r)->__rb_parent_color |= 1; } while (0) static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) { rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; } -static inline void rb_set_color(struct rb_node *rb, int color) -{ - rb->__rb_parent_color = (rb->__rb_parent_color & ~1) | color; -} static inline void rb_set_parent_color(struct rb_node *rb, struct rb_node *p, int color) @@ -71,52 +66,6 @@ static inline struct rb_node *rb_red_parent(struct rb_node *red) return (struct rb_node *)red->__rb_parent_color; } -static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) -{ - struct rb_node *right = node->rb_right; - struct rb_node *parent = rb_parent(node); - - if ((node->rb_right = right->rb_left)) - rb_set_parent(right->rb_left, node); - right->rb_left = node; - - rb_set_parent(right, parent); - - if (parent) - { - if (node == parent->rb_left) - parent->rb_left = right; - else - parent->rb_right = right; - } - else - root->rb_node = right; - rb_set_parent(node, right); -} - -static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) -{ - struct rb_node *left = node->rb_left; - struct rb_node *parent = rb_parent(node); - - if ((node->rb_left = left->rb_right)) - rb_set_parent(left->rb_right, node); - left->rb_right = node; - - rb_set_parent(left, parent); - - if (parent) - { - if (node == parent->rb_right) - parent->rb_right = left; - else - parent->rb_left = left; - } - else - root->rb_node = left; - rb_set_parent(node, left); -} - /* * Helper function for rotations: * - old's parent and color get assigned to new @@ -257,7 +206,7 @@ EXPORT_SYMBOL(rb_insert_color); static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, struct rb_root *root) { - struct rb_node *other; + struct rb_node *sibling, *tmp1, *tmp2; while (true) { /* @@ -270,63 +219,136 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, * and tree rotations as per one of the 4 cases below. */ if (node && rb_is_red(node)) { - rb_set_black(node); + rb_set_parent_color(node, parent, RB_BLACK); break; } else if (!parent) { break; } else if (parent->rb_left == node) { - other = parent->rb_right; - if (rb_is_red(other)) - { - rb_set_black(other); - rb_set_red(parent); - __rb_rotate_left(parent, root); - other = parent->rb_right; + sibling = parent->rb_right; + if (rb_is_red(sibling)) { + /* + * Case 1 - left rotate at parent + * + * P S + * / \ / \ + * N s --> p Sr + * / \ / \ + * Sl Sr N Sl + */ + parent->rb_right = tmp1 = sibling->rb_left; + sibling->rb_left = parent; + rb_set_parent_color(tmp1, parent, RB_BLACK); + __rb_rotate_set_parents(parent, sibling, root, + RB_RED); + sibling = tmp1; } - if (!other->rb_right || rb_is_black(other->rb_right)) { - if (!other->rb_left || - rb_is_black(other->rb_left)) { - rb_set_red(other); + tmp1 = sibling->rb_right; + if (!tmp1 || rb_is_black(tmp1)) { + tmp2 = sibling->rb_left; + if (!tmp2 || rb_is_black(tmp2)) { + /* + * Case 2 - sibling color flip + * (p could be either color here) + * + * (p) (p) + * / \ / \ + * N S --> N s + * / \ / \ + * Sl Sr Sl Sr + * + * This leaves us violating 5), so + * recurse at p. If p is red, the + * recursion will just flip it to black + * and exit. If coming from Case 1, + * p is known to be red. + */ + rb_set_parent_color(sibling, parent, + RB_RED); node = parent; parent = rb_parent(node); continue; } - rb_set_black(other->rb_left); - rb_set_red(other); - __rb_rotate_right(other, root); - other = parent->rb_right; + /* + * Case 3 - right rotate at sibling + * (p could be either color here) + * + * (p) (p) + * / \ / \ + * N S --> N Sl + * / \ \ + * sl Sr s + * \ + * Sr + */ + sibling->rb_left = tmp1 = tmp2->rb_right; + tmp2->rb_right = sibling; + parent->rb_right = tmp2; + if (tmp1) + rb_set_parent_color(tmp1, sibling, + RB_BLACK); + tmp1 = sibling; + sibling = tmp2; } - rb_set_color(other, rb_color(parent)); - rb_set_black(parent); - rb_set_black(other->rb_right); - __rb_rotate_left(parent, root); + /* + * Case 4 - left rotate at parent + color flips + * (p and sl could be either color here. + * After rotation, p becomes black, s acquires + * p's color, and sl keeps its color) + * + * (p) (s) + * / \ / \ + * N S --> P Sr + * / \ / \ + * (sl) sr N (sl) + */ + parent->rb_right = tmp2 = sibling->rb_left; + sibling->rb_left = parent; + rb_set_parent_color(tmp1, sibling, RB_BLACK); + if (tmp2) + rb_set_parent(tmp2, parent); + __rb_rotate_set_parents(parent, sibling, root, + RB_BLACK); break; } else { - other = parent->rb_left; - if (rb_is_red(other)) - { - rb_set_black(other); - rb_set_red(parent); - __rb_rotate_right(parent, root); - other = parent->rb_left; + sibling = parent->rb_left; + if (rb_is_red(sibling)) { + /* Case 1 - right rotate at parent */ + parent->rb_left = tmp1 = sibling->rb_right; + sibling->rb_right = parent; + rb_set_parent_color(tmp1, parent, RB_BLACK); + __rb_rotate_set_parents(parent, sibling, root, + RB_RED); + sibling = tmp1; } - if (!other->rb_left || rb_is_black(other->rb_left)) { - if (!other->rb_right || - rb_is_black(other->rb_right)) { - rb_set_red(other); + tmp1 = sibling->rb_left; + if (!tmp1 || rb_is_black(tmp1)) { + tmp2 = sibling->rb_right; + if (!tmp2 || rb_is_black(tmp2)) { + /* Case 2 - sibling color flip */ + rb_set_parent_color(sibling, parent, + RB_RED); node = parent; parent = rb_parent(node); continue; } - rb_set_black(other->rb_right); - rb_set_red(other); - __rb_rotate_left(other, root); - other = parent->rb_left; + /* Case 3 - right rotate at sibling */ + sibling->rb_right = tmp1 = tmp2->rb_left; + tmp2->rb_left = sibling; + parent->rb_left = tmp2; + if (tmp1) + rb_set_parent_color(tmp1, sibling, + RB_BLACK); + tmp1 = sibling; + sibling = tmp2; } - rb_set_color(other, rb_color(parent)); - rb_set_black(parent); - rb_set_black(other->rb_left); - __rb_rotate_right(parent, root); + /* Case 4 - left rotate at parent + color flips */ + parent->rb_left = tmp2 = sibling->rb_right; + sibling->rb_right = parent; + rb_set_parent_color(tmp1, sibling, RB_BLACK); + if (tmp2) + rb_set_parent(tmp2, parent); + __rb_rotate_set_parents(parent, sibling, root, + RB_BLACK); break; } } -- cgit From 7ce6ff9e5de99e7b72019c7de82fb438fe1dc5a0 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:01 -0700 Subject: rbtree: coding style adjustments Set comment and indentation style to be consistent with linux coding style and the rest of the file, as suggested by Peter Zijlstra Signed-off-by: Michel Lespinasse Cc: Andrea Arcangeli Acked-by: David Woodhouse Cc: Rik van Riel Cc: Peter Zijlstra Cc: Daniel Santos Cc: Jens Axboe Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index a38e473d8fe7..08926709b4f9 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -363,8 +363,7 @@ void rb_erase(struct rb_node *node, struct rb_root *root) child = node->rb_right; else if (!node->rb_right) child = node->rb_left; - else - { + else { struct rb_node *old = node, *left; node = node->rb_right; @@ -406,17 +405,15 @@ void rb_erase(struct rb_node *node, struct rb_root *root) if (child) rb_set_parent(child, parent); - if (parent) - { + if (parent) { if (parent->rb_left == node) parent->rb_left = child; else parent->rb_right = child; - } - else + } else root->rb_node = child; - color: +color: if (color == RB_BLACK) __rb_erase_color(child, parent, root); } @@ -529,8 +526,10 @@ struct rb_node *rb_next(const struct rb_node *node) if (RB_EMPTY_NODE(node)) return NULL; - /* If we have a right-hand child, go down and then left as far - as we can. */ + /* + * If we have a right-hand child, go down and then left as far + * as we can. + */ if (node->rb_right) { node = node->rb_right; while (node->rb_left) @@ -538,12 +537,13 @@ struct rb_node *rb_next(const struct rb_node *node) return (struct rb_node *)node; } - /* No right-hand children. Everything down and left is - smaller than us, so any 'next' node must be in the general - direction of our parent. Go up the tree; any time the - ancestor is a right-hand child of its parent, keep going - up. First time it's a left-hand child of its parent, said - parent is our 'next' node. */ + /* + * No right-hand children. Everything down and left is smaller than us, + * so any 'next' node must be in the general direction of our parent. + * Go up the tree; any time the ancestor is a right-hand child of its + * parent, keep going up. First time it's a left-hand child of its + * parent, said parent is our 'next' node. + */ while ((parent = rb_parent(node)) && node == parent->rb_right) node = parent; @@ -558,8 +558,10 @@ struct rb_node *rb_prev(const struct rb_node *node) if (RB_EMPTY_NODE(node)) return NULL; - /* If we have a left-hand child, go down and then right as far - as we can. */ + /* + * If we have a left-hand child, go down and then right as far + * as we can. + */ if (node->rb_left) { node = node->rb_left; while (node->rb_right) @@ -567,8 +569,10 @@ struct rb_node *rb_prev(const struct rb_node *node) return (struct rb_node *)node; } - /* No left-hand children. Go up till we find an ancestor which - is a right-hand child of its parent */ + /* + * No left-hand children. Go up till we find an ancestor which + * is a right-hand child of its parent. + */ while ((parent = rb_parent(node)) && node == parent->rb_left) node = parent; -- cgit From 59633abf34e2f44b8e772a2c12a92132aa7c2220 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:02 -0700 Subject: rbtree: optimize fetching of sibling node When looking to fetch a node's sibling, we went through a sequence of: - check if node is the parent's left child - if it is, then fetch the parent's right child This can be replaced with: - fetch the parent's right child as an assumed sibling - check that node is NOT the fetched child This avoids fetching the parent's left child when node is actually that child. Saves a bit on code size, though it doesn't seem to make a large difference in speed. Signed-off-by: Michel Lespinasse Cc: Andrea Arcangeli Cc: David Woodhouse Acked-by: Rik van Riel Cc: Peter Zijlstra Cc: Daniel Santos Cc: Jens Axboe Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index 08926709b4f9..61cdd0e3e538 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -107,8 +107,8 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) gparent = rb_red_parent(parent); - if (parent == gparent->rb_left) { - tmp = gparent->rb_right; + tmp = gparent->rb_right; + if (parent != tmp) { /* parent == gparent->rb_left */ if (tmp && rb_is_red(tmp)) { /* * Case 1 - color flips @@ -131,7 +131,8 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) continue; } - if (parent->rb_right == node) { + tmp = parent->rb_right; + if (node == tmp) { /* * Case 2 - left rotate at parent * @@ -151,6 +152,7 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) RB_BLACK); rb_set_parent_color(parent, node, RB_RED); parent = node; + tmp = node->rb_right; } /* @@ -162,7 +164,7 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) * / \ * n U */ - gparent->rb_left = tmp = parent->rb_right; + gparent->rb_left = tmp; /* == parent->rb_right */ parent->rb_right = gparent; if (tmp) rb_set_parent_color(tmp, gparent, RB_BLACK); @@ -180,7 +182,8 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) continue; } - if (parent->rb_left == node) { + tmp = parent->rb_left; + if (node == tmp) { /* Case 2 - right rotate at parent */ parent->rb_left = tmp = node->rb_right; node->rb_right = parent; @@ -189,10 +192,11 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) RB_BLACK); rb_set_parent_color(parent, node, RB_RED); parent = node; + tmp = node->rb_left; } /* Case 3 - left rotate at gparent */ - gparent->rb_right = tmp = parent->rb_left; + gparent->rb_right = tmp; /* == parent->rb_left */ parent->rb_left = gparent; if (tmp) rb_set_parent_color(tmp, gparent, RB_BLACK); @@ -223,8 +227,9 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, break; } else if (!parent) { break; - } else if (parent->rb_left == node) { - sibling = parent->rb_right; + } + sibling = parent->rb_right; + if (node != sibling) { /* node == parent->rb_left */ if (rb_is_red(sibling)) { /* * Case 1 - left rotate at parent -- cgit From 28d7530928d01638678f63c3c70113540b0e6abe Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:04 -0700 Subject: rbtree test: fix sparse warning about 64-bit constant Just a small fix to make sparse happy. Signed-off-by: Michel Lespinasse Reported-by: Fengguang Wu Acked-by: Rik van Riel Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index 19dfca9ff7d7..fd09465d82ca 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c @@ -88,7 +88,7 @@ static int rbtree_test_init(void) printk(KERN_ALERT "rbtree testing"); - prandom32_seed(&rnd, 3141592653589793238); + prandom32_seed(&rnd, 3141592653589793238ULL); init(); time1 = get_cycles(); -- cgit From 7abc704ae399fcb9c51ca200b0456f8a975a8011 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:07 -0700 Subject: rbtree: add __rb_change_child() helper function Add __rb_change_child() as an inline helper function to replace code that would otherwise be duplicated 4 times in the source. No changes to binary size or speed. Signed-off-by: Michel Lespinasse Reviewed-by: Rik van Riel Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 46 +++++++++++++++++----------------------------- 1 file changed, 17 insertions(+), 29 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index 61cdd0e3e538..de89a614b1ba 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -66,6 +66,19 @@ static inline struct rb_node *rb_red_parent(struct rb_node *red) return (struct rb_node *)red->__rb_parent_color; } +static inline void +__rb_change_child(struct rb_node *old, struct rb_node *new, + struct rb_node *parent, struct rb_root *root) +{ + if (parent) { + if (parent->rb_left == old) + parent->rb_left = new; + else + parent->rb_right = new; + } else + root->rb_node = new; +} + /* * Helper function for rotations: * - old's parent and color get assigned to new @@ -78,13 +91,7 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, struct rb_node *parent = rb_parent(old); new->__rb_parent_color = old->__rb_parent_color; rb_set_parent_color(old, new, color); - if (parent) { - if (parent->rb_left == old) - parent->rb_left = new; - else - parent->rb_right = new; - } else - root->rb_node = new; + __rb_change_child(old, new, parent, root); } void rb_insert_color(struct rb_node *node, struct rb_root *root) @@ -375,13 +382,7 @@ void rb_erase(struct rb_node *node, struct rb_root *root) while ((left = node->rb_left) != NULL) node = left; - if (rb_parent(old)) { - if (rb_parent(old)->rb_left == old) - rb_parent(old)->rb_left = node; - else - rb_parent(old)->rb_right = node; - } else - root->rb_node = node; + __rb_change_child(old, node, rb_parent(old), root); child = node->rb_right; parent = rb_parent(node); @@ -410,13 +411,7 @@ void rb_erase(struct rb_node *node, struct rb_root *root) if (child) rb_set_parent(child, parent); - if (parent) { - if (parent->rb_left == node) - parent->rb_left = child; - else - parent->rb_right = child; - } else - root->rb_node = child; + __rb_change_child(node, child, parent, root); color: if (color == RB_BLACK) @@ -591,14 +586,7 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_node *parent = rb_parent(victim); /* Set the surrounding nodes to point to the replacement */ - if (parent) { - if (victim == parent->rb_left) - parent->rb_left = new; - else - parent->rb_right = new; - } else { - root->rb_node = new; - } + __rb_change_child(victim, new, parent, root); if (victim->rb_left) rb_set_parent(victim->rb_left, new); if (victim->rb_right) -- cgit From 60670b8034d6e2ba860af79c9379b7788d09db73 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:10 -0700 Subject: rbtree: place easiest case first in rb_erase() In rb_erase, move the easy case (node to erase has no more than 1 child) first. I feel the code reads easier that way. Signed-off-by: Michel Lespinasse Reviewed-by: Rik van Riel Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index de89a614b1ba..bde1b5c5fb33 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -368,17 +368,28 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, void rb_erase(struct rb_node *node, struct rb_root *root) { - struct rb_node *child, *parent; + struct rb_node *child = node->rb_right, *tmp = node->rb_left; + struct rb_node *parent; int color; - if (!node->rb_left) - child = node->rb_right; - else if (!node->rb_right) - child = node->rb_left; - else { + if (!tmp) { + case1: + /* Case 1: node to erase has no more than 1 child (easy!) */ + + parent = rb_parent(node); + color = rb_color(node); + + if (child) + rb_set_parent(child, parent); + __rb_change_child(node, child, parent, root); + } else if (!child) { + /* Still case 1, but this time the child is node->rb_left */ + child = tmp; + goto case1; + } else { struct rb_node *old = node, *left; - node = node->rb_right; + node = child; while ((left = node->rb_left) != NULL) node = left; @@ -402,18 +413,8 @@ void rb_erase(struct rb_node *node, struct rb_root *root) node->__rb_parent_color = old->__rb_parent_color; node->rb_left = old->rb_left; rb_set_parent(old->rb_left, node); - - goto color; } - parent = rb_parent(node); - color = rb_color(node); - - if (child) - rb_set_parent(child, parent); - __rb_change_child(node, child, parent, root); - -color: if (color == RB_BLACK) __rb_erase_color(child, parent, root); } -- cgit From 46b6135a7402ac23c5b25f2bd79b03bab8f98278 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:11 -0700 Subject: rbtree: handle 1-child recoloring in rb_erase() instead of rb_erase_color() An interesting observation for rb_erase() is that when a node has exactly one child, the node must be black and the child must be red. An interesting consequence is that removing such a node can be done by simply replacing it with its child and making the child black, which we can do efficiently in rb_erase(). __rb_erase_color() then only needs to handle the no-childs case and can be modified accordingly. Signed-off-by: Michel Lespinasse Acked-by: Rik van Riel Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 105 +++++++++++++++++++++++++++++++++++------------------------ 1 file changed, 62 insertions(+), 43 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index bde1b5c5fb33..80b092538fa9 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -2,7 +2,8 @@ Red Black Trees (C) 1999 Andrea Arcangeli (C) 2002 David Woodhouse - + (C) 2012 Michel Lespinasse + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or @@ -50,6 +51,11 @@ #define rb_is_red(r) (!rb_color(r)) #define rb_is_black(r) rb_color(r) +static inline void rb_set_black(struct rb_node *rb) +{ + rb->__rb_parent_color |= RB_BLACK; +} + static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) { rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; @@ -214,27 +220,18 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) } EXPORT_SYMBOL(rb_insert_color); -static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, - struct rb_root *root) +static void __rb_erase_color(struct rb_node *parent, struct rb_root *root) { - struct rb_node *sibling, *tmp1, *tmp2; + struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; while (true) { /* - * Loop invariant: all leaf paths going through node have a - * black node count that is 1 lower than other leaf paths. - * - * If node is red, we can flip it to black to adjust. - * If node is the root, all leaf paths go through it. - * Otherwise, we need to adjust the tree through color flips - * and tree rotations as per one of the 4 cases below. + * Loop invariants: + * - node is black (or NULL on first iteration) + * - node is not the root (parent is not NULL) + * - All leaf paths going through parent and node have a + * black node count that is 1 lower than other leaf paths. */ - if (node && rb_is_red(node)) { - rb_set_parent_color(node, parent, RB_BLACK); - break; - } else if (!parent) { - break; - } sibling = parent->rb_right; if (node != sibling) { /* node == parent->rb_left */ if (rb_is_red(sibling)) { @@ -268,17 +265,22 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, * / \ / \ * Sl Sr Sl Sr * - * This leaves us violating 5), so - * recurse at p. If p is red, the - * recursion will just flip it to black - * and exit. If coming from Case 1, - * p is known to be red. + * This leaves us violating 5) which + * can be fixed by flipping p to black + * if it was red, or by recursing at p. + * p is red when coming from Case 1. */ rb_set_parent_color(sibling, parent, RB_RED); - node = parent; - parent = rb_parent(node); - continue; + if (rb_is_red(parent)) + rb_set_black(parent); + else { + node = parent; + parent = rb_parent(node); + if (parent) + continue; + } + break; } /* * Case 3 - right rotate at sibling @@ -339,9 +341,15 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, /* Case 2 - sibling color flip */ rb_set_parent_color(sibling, parent, RB_RED); - node = parent; - parent = rb_parent(node); - continue; + if (rb_is_red(parent)) + rb_set_black(parent); + else { + node = parent; + parent = rb_parent(node); + if (parent) + continue; + } + break; } /* Case 3 - right rotate at sibling */ sibling->rb_right = tmp1 = tmp2->rb_left; @@ -369,23 +377,31 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, void rb_erase(struct rb_node *node, struct rb_root *root) { struct rb_node *child = node->rb_right, *tmp = node->rb_left; - struct rb_node *parent; - int color; + struct rb_node *parent, *rebalance; if (!tmp) { - case1: - /* Case 1: node to erase has no more than 1 child (easy!) */ + /* + * Case 1: node to erase has no more than 1 child (easy!) + * + * Note that if there is one child it must be red due to 5) + * and node must be black due to 4). We adjust colors locally + * so as to bypass __rb_erase_color() later on. + */ parent = rb_parent(node); - color = rb_color(node); - - if (child) - rb_set_parent(child, parent); __rb_change_child(node, child, parent, root); + if (child) { + rb_set_parent_color(child, parent, RB_BLACK); + rebalance = NULL; + } else { + rebalance = rb_is_black(node) ? parent : NULL; + } } else if (!child) { /* Still case 1, but this time the child is node->rb_left */ - child = tmp; - goto case1; + parent = rb_parent(node); + __rb_change_child(node, tmp, parent, root); + rb_set_parent_color(tmp, parent, RB_BLACK); + rebalance = NULL; } else { struct rb_node *old = node, *left; @@ -397,26 +413,29 @@ void rb_erase(struct rb_node *node, struct rb_root *root) child = node->rb_right; parent = rb_parent(node); - color = rb_color(node); if (parent == old) { parent = node; } else { - if (child) - rb_set_parent(child, parent); parent->rb_left = child; node->rb_right = old->rb_right; rb_set_parent(old->rb_right, node); } + if (child) { + rb_set_parent_color(child, parent, RB_BLACK); + rebalance = NULL; + } else { + rebalance = rb_is_black(node) ? parent : NULL; + } node->__rb_parent_color = old->__rb_parent_color; node->rb_left = old->rb_left; rb_set_parent(old->rb_left, node); } - if (color == RB_BLACK) - __rb_erase_color(child, parent, root); + if (rebalance) + __rb_erase_color(rebalance, root); } EXPORT_SYMBOL(rb_erase); -- cgit From 4f035ad67f4633c233cb3642711d49b4efc9c82d Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:13 -0700 Subject: rbtree: low level optimizations in rb_erase() Various minor optimizations in rb_erase(): - Avoid multiple loading of node->__rb_parent_color when computing parent and color information (possibly not in close sequence, as there might be further branches in the algorithm) - In the 1-child subcase of case 1, copy the __rb_parent_color field from the erased node to the child instead of recomputing it from the desired parent and color - When searching for the erased node's successor, differentiate between cases 2 and 3 based on whether any left links were followed. This avoids a condition later down. - In case 3, keep a pointer to the erased node's right child so we don't have to refetch it later to adjust its parent. - In the no-childs subcase of cases 2 and 3, place the rebalance assigment last so that the compiler can remove the following if(rebalance) test. Also, added some comments to illustrate cases 2 and 3. Signed-off-by: Michel Lespinasse Acked-by: Rik van Riel Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 98 +++++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 64 insertions(+), 34 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index 80b092538fa9..938061ecbe61 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -47,9 +47,14 @@ #define RB_RED 0 #define RB_BLACK 1 -#define rb_color(r) ((r)->__rb_parent_color & 1) -#define rb_is_red(r) (!rb_color(r)) -#define rb_is_black(r) rb_color(r) +#define __rb_parent(pc) ((struct rb_node *)(pc & ~3)) + +#define __rb_color(pc) ((pc) & 1) +#define __rb_is_black(pc) __rb_color(pc) +#define __rb_is_red(pc) (!__rb_color(pc)) +#define rb_color(rb) __rb_color((rb)->__rb_parent_color) +#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color) +#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) static inline void rb_set_black(struct rb_node *rb) { @@ -378,6 +383,7 @@ void rb_erase(struct rb_node *node, struct rb_root *root) { struct rb_node *child = node->rb_right, *tmp = node->rb_left; struct rb_node *parent, *rebalance; + unsigned long pc; if (!tmp) { /* @@ -387,51 +393,75 @@ void rb_erase(struct rb_node *node, struct rb_root *root) * and node must be black due to 4). We adjust colors locally * so as to bypass __rb_erase_color() later on. */ - - parent = rb_parent(node); + pc = node->__rb_parent_color; + parent = __rb_parent(pc); __rb_change_child(node, child, parent, root); if (child) { - rb_set_parent_color(child, parent, RB_BLACK); + child->__rb_parent_color = pc; rebalance = NULL; - } else { - rebalance = rb_is_black(node) ? parent : NULL; - } + } else + rebalance = __rb_is_black(pc) ? parent : NULL; } else if (!child) { /* Still case 1, but this time the child is node->rb_left */ - parent = rb_parent(node); + tmp->__rb_parent_color = pc = node->__rb_parent_color; + parent = __rb_parent(pc); __rb_change_child(node, tmp, parent, root); - rb_set_parent_color(tmp, parent, RB_BLACK); rebalance = NULL; } else { - struct rb_node *old = node, *left; - - node = child; - while ((left = node->rb_left) != NULL) - node = left; - - __rb_change_child(old, node, rb_parent(old), root); - - child = node->rb_right; - parent = rb_parent(node); - - if (parent == old) { - parent = node; + struct rb_node *successor = child, *child2; + tmp = child->rb_left; + if (!tmp) { + /* + * Case 2: node's successor is its right child + * + * (n) (s) + * / \ / \ + * (x) (s) -> (x) (c) + * \ + * (c) + */ + parent = child; + child2 = child->rb_right; } else { - parent->rb_left = child; - - node->rb_right = old->rb_right; - rb_set_parent(old->rb_right, node); + /* + * Case 3: node's successor is leftmost under + * node's right child subtree + * + * (n) (s) + * / \ / \ + * (x) (y) -> (x) (y) + * / / + * (p) (p) + * / / + * (s) (c) + * \ + * (c) + */ + do { + parent = successor; + successor = tmp; + tmp = tmp->rb_left; + } while (tmp); + parent->rb_left = child2 = successor->rb_right; + successor->rb_right = child; + rb_set_parent(child, successor); } - if (child) { - rb_set_parent_color(child, parent, RB_BLACK); + successor->rb_left = tmp = node->rb_left; + rb_set_parent(tmp, successor); + + pc = node->__rb_parent_color; + tmp = __rb_parent(pc); + __rb_change_child(node, successor, tmp, root); + if (child2) { + successor->__rb_parent_color = pc; + rb_set_parent_color(child2, parent, RB_BLACK); rebalance = NULL; } else { - rebalance = rb_is_black(node) ? parent : NULL; + unsigned long pc2 = successor->__rb_parent_color; + successor->__rb_parent_color = pc; + rebalance = __rb_is_black(pc2) ? parent : NULL; } - node->__rb_parent_color = old->__rb_parent_color; - node->rb_left = old->rb_left; - rb_set_parent(old->rb_left, node); } if (rebalance) -- cgit From dadf93534f125b9eda486b471446a8456a603d27 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:15 -0700 Subject: rbtree: augmented rbtree test Small test to measure the performance of augmented rbtrees. Signed-off-by: Michel Lespinasse Acked-by: Rik van Riel Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree_test.c | 103 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 101 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index fd09465d82ca..66e41d4bfc39 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c @@ -10,6 +10,10 @@ struct test_node { struct rb_node rb; u32 key; + + /* following fields used for testing augmented rbtree functionality */ + u32 val; + u32 augmented; }; static struct rb_root root = RB_ROOT; @@ -20,10 +24,11 @@ static struct rnd_state rnd; static void insert(struct test_node *node, struct rb_root *root) { struct rb_node **new = &root->rb_node, *parent = NULL; + u32 key = node->key; while (*new) { parent = *new; - if (node->key < rb_entry(parent, struct test_node, rb)->key) + if (key < rb_entry(parent, struct test_node, rb)->key) new = &parent->rb_left; else new = &parent->rb_right; @@ -38,11 +43,62 @@ static inline void erase(struct test_node *node, struct rb_root *root) rb_erase(&node->rb, root); } +static inline u32 augment_recompute(struct test_node *node) +{ + u32 max = node->val, child_augmented; + if (node->rb.rb_left) { + child_augmented = rb_entry(node->rb.rb_left, struct test_node, + rb)->augmented; + if (max < child_augmented) + max = child_augmented; + } + if (node->rb.rb_right) { + child_augmented = rb_entry(node->rb.rb_right, struct test_node, + rb)->augmented; + if (max < child_augmented) + max = child_augmented; + } + return max; +} + +static void augment_callback(struct rb_node *rb, void *unused) +{ + struct test_node *node = rb_entry(rb, struct test_node, rb); + node->augmented = augment_recompute(node); +} + +static void insert_augmented(struct test_node *node, struct rb_root *root) +{ + struct rb_node **new = &root->rb_node, *parent = NULL; + u32 key = node->key; + + while (*new) { + parent = *new; + if (key < rb_entry(parent, struct test_node, rb)->key) + new = &parent->rb_left; + else + new = &parent->rb_right; + } + + rb_link_node(&node->rb, parent, new); + rb_insert_color(&node->rb, root); + rb_augment_insert(&node->rb, augment_callback, NULL); +} + +static void erase_augmented(struct test_node *node, struct rb_root *root) +{ + struct rb_node *deepest = rb_augment_erase_begin(&node->rb); + rb_erase(&node->rb, root); + rb_augment_erase_end(deepest, augment_callback, NULL); +} + static void init(void) { int i; - for (i = 0; i < NODES; i++) + for (i = 0; i < NODES; i++) { nodes[i].key = prandom32(&rnd); + nodes[i].val = prandom32(&rnd); + } } static bool is_red(struct rb_node *rb) @@ -81,6 +137,17 @@ static void check(int nr_nodes) WARN_ON_ONCE(count != nr_nodes); } +static void check_augmented(int nr_nodes) +{ + struct rb_node *rb; + + check(nr_nodes); + for (rb = rb_first(&root); rb; rb = rb_next(rb)) { + struct test_node *node = rb_entry(rb, struct test_node, rb); + WARN_ON_ONCE(node->augmented != augment_recompute(node)); + } +} + static int rbtree_test_init(void) { int i, j; @@ -119,6 +186,38 @@ static int rbtree_test_init(void) check(0); } + printk(KERN_ALERT "augmented rbtree testing"); + + init(); + + time1 = get_cycles(); + + for (i = 0; i < PERF_LOOPS; i++) { + for (j = 0; j < NODES; j++) + insert_augmented(nodes + j, &root); + for (j = 0; j < NODES; j++) + erase_augmented(nodes + j, &root); + } + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, PERF_LOOPS); + printk(" -> %llu cycles\n", (unsigned long long)time); + + for (i = 0; i < CHECK_LOOPS; i++) { + init(); + for (j = 0; j < NODES; j++) { + check_augmented(j); + insert_augmented(nodes + j, &root); + } + for (j = 0; j < NODES; j++) { + check_augmented(NODES - j); + erase_augmented(nodes + j, &root); + } + check_augmented(0); + } + return -EAGAIN; /* Fail will directly unload the module */ } -- cgit From 14b94af0b251a2c80885b60538166fb7d04a642e Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:17 -0700 Subject: rbtree: faster augmented rbtree manipulation Introduce new augmented rbtree APIs that allow minimal recalculation of augmented node information. A new callback is added to the rbtree insertion and erase rebalancing functions, to be called on each tree rotations. Such rotations preserve the subtree's root augmented value, but require recalculation of the one child that was previously located at the subtree root. In the insertion case, the handcoded search phase must be updated to maintain the augmented information on insertion, and then the rbtree coloring/rebalancing algorithms keep it up to date. In the erase case, things are more complicated since it is library code that manipulates the rbtree in order to remove internal nodes. This requires a couple additional callbacks to copy a subtree's augmented value when a new root is stitched in, and to recompute augmented values down the ancestry path when a node is removed from the tree. In order to preserve maximum speed for the non-augmented case, we provide two versions of each tree manipulation function. rb_insert_augmented() is the augmented equivalent of rb_insert_color(), and rb_erase_augmented() is the augmented equivalent of rb_erase(). Signed-off-by: Michel Lespinasse Acked-by: Rik van Riel Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++++----- lib/rbtree_test.c | 58 ++++++++++++++++++++++++++++---------- 2 files changed, 120 insertions(+), 21 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index 938061ecbe61..a37ee7954b8f 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -105,7 +105,9 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, __rb_change_child(old, new, parent, root); } -void rb_insert_color(struct rb_node *node, struct rb_root *root) +static __always_inline void +__rb_insert(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; @@ -169,6 +171,7 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) rb_set_parent_color(tmp, parent, RB_BLACK); rb_set_parent_color(parent, node, RB_RED); + augment_rotate(parent, node); parent = node; tmp = node->rb_right; } @@ -187,6 +190,7 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) if (tmp) rb_set_parent_color(tmp, gparent, RB_BLACK); __rb_rotate_set_parents(gparent, parent, root, RB_RED); + augment_rotate(gparent, parent); break; } else { tmp = gparent->rb_left; @@ -209,6 +213,7 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) rb_set_parent_color(tmp, parent, RB_BLACK); rb_set_parent_color(parent, node, RB_RED); + augment_rotate(parent, node); parent = node; tmp = node->rb_left; } @@ -219,13 +224,15 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root) if (tmp) rb_set_parent_color(tmp, gparent, RB_BLACK); __rb_rotate_set_parents(gparent, parent, root, RB_RED); + augment_rotate(gparent, parent); break; } } } -EXPORT_SYMBOL(rb_insert_color); -static void __rb_erase_color(struct rb_node *parent, struct rb_root *root) +static __always_inline void +__rb_erase_color(struct rb_node *parent, struct rb_root *root, + const struct rb_augment_callbacks *augment) { struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; @@ -254,6 +261,7 @@ static void __rb_erase_color(struct rb_node *parent, struct rb_root *root) rb_set_parent_color(tmp1, parent, RB_BLACK); __rb_rotate_set_parents(parent, sibling, root, RB_RED); + augment->rotate(parent, sibling); sibling = tmp1; } tmp1 = sibling->rb_right; @@ -305,6 +313,7 @@ static void __rb_erase_color(struct rb_node *parent, struct rb_root *root) if (tmp1) rb_set_parent_color(tmp1, sibling, RB_BLACK); + augment->rotate(sibling, tmp2); tmp1 = sibling; sibling = tmp2; } @@ -327,6 +336,7 @@ static void __rb_erase_color(struct rb_node *parent, struct rb_root *root) rb_set_parent(tmp2, parent); __rb_rotate_set_parents(parent, sibling, root, RB_BLACK); + augment->rotate(parent, sibling); break; } else { sibling = parent->rb_left; @@ -337,6 +347,7 @@ static void __rb_erase_color(struct rb_node *parent, struct rb_root *root) rb_set_parent_color(tmp1, parent, RB_BLACK); __rb_rotate_set_parents(parent, sibling, root, RB_RED); + augment->rotate(parent, sibling); sibling = tmp1; } tmp1 = sibling->rb_left; @@ -363,6 +374,7 @@ static void __rb_erase_color(struct rb_node *parent, struct rb_root *root) if (tmp1) rb_set_parent_color(tmp1, sibling, RB_BLACK); + augment->rotate(sibling, tmp2); tmp1 = sibling; sibling = tmp2; } @@ -374,12 +386,15 @@ static void __rb_erase_color(struct rb_node *parent, struct rb_root *root) rb_set_parent(tmp2, parent); __rb_rotate_set_parents(parent, sibling, root, RB_BLACK); + augment->rotate(parent, sibling); break; } } } -void rb_erase(struct rb_node *node, struct rb_root *root) +static __always_inline void +__rb_erase(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) { struct rb_node *child = node->rb_right, *tmp = node->rb_left; struct rb_node *parent, *rebalance; @@ -401,12 +416,14 @@ void rb_erase(struct rb_node *node, struct rb_root *root) rebalance = NULL; } else rebalance = __rb_is_black(pc) ? parent : NULL; + tmp = parent; } else if (!child) { /* Still case 1, but this time the child is node->rb_left */ tmp->__rb_parent_color = pc = node->__rb_parent_color; parent = __rb_parent(pc); __rb_change_child(node, tmp, parent, root); rebalance = NULL; + tmp = parent; } else { struct rb_node *successor = child, *child2; tmp = child->rb_left; @@ -420,8 +437,9 @@ void rb_erase(struct rb_node *node, struct rb_root *root) * \ * (c) */ - parent = child; - child2 = child->rb_right; + parent = successor; + child2 = successor->rb_right; + augment->copy(node, successor); } else { /* * Case 3: node's successor is leftmost under @@ -445,6 +463,8 @@ void rb_erase(struct rb_node *node, struct rb_root *root) parent->rb_left = child2 = successor->rb_right; successor->rb_right = child; rb_set_parent(child, successor); + augment->copy(node, successor); + augment->propagate(parent, successor); } successor->rb_left = tmp = node->rb_left; @@ -462,13 +482,62 @@ void rb_erase(struct rb_node *node, struct rb_root *root) successor->__rb_parent_color = pc; rebalance = __rb_is_black(pc2) ? parent : NULL; } + tmp = successor; } + augment->propagate(tmp, NULL); if (rebalance) - __rb_erase_color(rebalance, root); + __rb_erase_color(rebalance, root, augment); +} + +/* + * Non-augmented rbtree manipulation functions. + * + * We use dummy augmented callbacks here, and have the compiler optimize them + * out of the rb_insert_color() and rb_erase() function definitions. + */ + +static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {} +static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {} +static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} + +static const struct rb_augment_callbacks dummy_callbacks = { + dummy_propagate, dummy_copy, dummy_rotate +}; + +void rb_insert_color(struct rb_node *node, struct rb_root *root) +{ + __rb_insert(node, root, dummy_rotate); +} +EXPORT_SYMBOL(rb_insert_color); + +void rb_erase(struct rb_node *node, struct rb_root *root) +{ + __rb_erase(node, root, &dummy_callbacks); } EXPORT_SYMBOL(rb_erase); +/* + * Augmented rbtree manipulation functions. + * + * This instantiates the same __always_inline functions as in the non-augmented + * case, but this time with user-defined callbacks. + */ + +void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + __rb_insert(node, root, augment_rotate); +} +EXPORT_SYMBOL(__rb_insert_augmented); + +void rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + __rb_erase(node, root, augment); +} +EXPORT_SYMBOL(rb_erase_augmented); + static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data) { struct rb_node *parent; diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index 66e41d4bfc39..e28345df09bf 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c @@ -61,35 +61,65 @@ static inline u32 augment_recompute(struct test_node *node) return max; } -static void augment_callback(struct rb_node *rb, void *unused) +static void augment_propagate(struct rb_node *rb, struct rb_node *stop) { - struct test_node *node = rb_entry(rb, struct test_node, rb); - node->augmented = augment_recompute(node); + while (rb != stop) { + struct test_node *node = rb_entry(rb, struct test_node, rb); + u32 augmented = augment_recompute(node); + if (node->augmented == augmented) + break; + node->augmented = augmented; + rb = rb_parent(&node->rb); + } +} + +static void augment_copy(struct rb_node *rb_old, struct rb_node *rb_new) +{ + struct test_node *old = rb_entry(rb_old, struct test_node, rb); + struct test_node *new = rb_entry(rb_new, struct test_node, rb); + new->augmented = old->augmented; } +static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new) +{ + struct test_node *old = rb_entry(rb_old, struct test_node, rb); + struct test_node *new = rb_entry(rb_new, struct test_node, rb); + + /* Rotation doesn't change subtree's augmented value */ + new->augmented = old->augmented; + old->augmented = augment_recompute(old); +} + +static const struct rb_augment_callbacks augment_callbacks = { + augment_propagate, augment_copy, augment_rotate +}; + static void insert_augmented(struct test_node *node, struct rb_root *root) { - struct rb_node **new = &root->rb_node, *parent = NULL; + struct rb_node **new = &root->rb_node, *rb_parent = NULL; u32 key = node->key; + u32 val = node->val; + struct test_node *parent; while (*new) { - parent = *new; - if (key < rb_entry(parent, struct test_node, rb)->key) - new = &parent->rb_left; + rb_parent = *new; + parent = rb_entry(rb_parent, struct test_node, rb); + if (parent->augmented < val) + parent->augmented = val; + if (key < parent->key) + new = &parent->rb.rb_left; else - new = &parent->rb_right; + new = &parent->rb.rb_right; } - rb_link_node(&node->rb, parent, new); - rb_insert_color(&node->rb, root); - rb_augment_insert(&node->rb, augment_callback, NULL); + node->augmented = val; + rb_link_node(&node->rb, rb_parent, new); + rb_insert_augmented(&node->rb, root, &augment_callbacks); } static void erase_augmented(struct test_node *node, struct rb_root *root) { - struct rb_node *deepest = rb_augment_erase_begin(&node->rb); - rb_erase(&node->rb, root); - rb_augment_erase_end(deepest, augment_callback, NULL); + rb_erase_augmented(&node->rb, root, &augment_callbacks); } static void init(void) -- cgit From 9d9e6f9703bbd642f3f2f807e6aaa642a4cbcec9 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:20 -0700 Subject: rbtree: remove prior augmented rbtree implementation convert arch/x86/mm/pat_rbtree.c to the proposed augmented rbtree api and remove the old augmented rbtree implementation. Signed-off-by: Michel Lespinasse Acked-by: Rik van Riel Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 71 ------------------------------------------------------------ 1 file changed, 71 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index a37ee7954b8f..c0088ca345f9 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -538,77 +538,6 @@ void rb_erase_augmented(struct rb_node *node, struct rb_root *root, } EXPORT_SYMBOL(rb_erase_augmented); -static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data) -{ - struct rb_node *parent; - -up: - func(node, data); - parent = rb_parent(node); - if (!parent) - return; - - if (node == parent->rb_left && parent->rb_right) - func(parent->rb_right, data); - else if (parent->rb_left) - func(parent->rb_left, data); - - node = parent; - goto up; -} - -/* - * after inserting @node into the tree, update the tree to account for - * both the new entry and any damage done by rebalance - */ -void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data) -{ - if (node->rb_left) - node = node->rb_left; - else if (node->rb_right) - node = node->rb_right; - - rb_augment_path(node, func, data); -} -EXPORT_SYMBOL(rb_augment_insert); - -/* - * before removing the node, find the deepest node on the rebalance path - * that will still be there after @node gets removed - */ -struct rb_node *rb_augment_erase_begin(struct rb_node *node) -{ - struct rb_node *deepest; - - if (!node->rb_right && !node->rb_left) - deepest = rb_parent(node); - else if (!node->rb_right) - deepest = node->rb_left; - else if (!node->rb_left) - deepest = node->rb_right; - else { - deepest = rb_next(node); - if (deepest->rb_right) - deepest = deepest->rb_right; - else if (rb_parent(deepest) != node) - deepest = rb_parent(deepest); - } - - return deepest; -} -EXPORT_SYMBOL(rb_augment_erase_begin); - -/* - * after removal, update the tree to account for the removed entry - * and any rebalance damage. - */ -void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data) -{ - if (node) - rb_augment_path(node, func, data); -} -EXPORT_SYMBOL(rb_augment_erase_end); - /* * This function returns the first node (in sort order) of the tree. */ -- cgit From 3908836aa77e3621aaf2101f2920e01d7c8460d6 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:21 -0700 Subject: rbtree: add RB_DECLARE_CALLBACKS() macro As proposed by Peter Zijlstra, this makes it easier to define the augmented rbtree callbacks. Signed-off-by: Michel Lespinasse Cc: Rik van Riel Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree_test.c | 34 ++-------------------------------- 1 file changed, 2 insertions(+), 32 deletions(-) (limited to 'lib') diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index e28345df09bf..b20e99969b0f 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c @@ -61,38 +61,8 @@ static inline u32 augment_recompute(struct test_node *node) return max; } -static void augment_propagate(struct rb_node *rb, struct rb_node *stop) -{ - while (rb != stop) { - struct test_node *node = rb_entry(rb, struct test_node, rb); - u32 augmented = augment_recompute(node); - if (node->augmented == augmented) - break; - node->augmented = augmented; - rb = rb_parent(&node->rb); - } -} - -static void augment_copy(struct rb_node *rb_old, struct rb_node *rb_new) -{ - struct test_node *old = rb_entry(rb_old, struct test_node, rb); - struct test_node *new = rb_entry(rb_new, struct test_node, rb); - new->augmented = old->augmented; -} - -static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new) -{ - struct test_node *old = rb_entry(rb_old, struct test_node, rb); - struct test_node *new = rb_entry(rb_new, struct test_node, rb); - - /* Rotation doesn't change subtree's augmented value */ - new->augmented = old->augmented; - old->augmented = augment_recompute(old); -} - -static const struct rb_augment_callbacks augment_callbacks = { - augment_propagate, augment_copy, augment_rotate -}; +RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb, + u32, augmented, augment_recompute) static void insert_augmented(struct test_node *node, struct rb_root *root) { -- cgit From fff3fd8a1210a165252cd7cd01206da7a90d3a06 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:23 -0700 Subject: rbtree: add prio tree and interval tree tests Patch 1 implements support for interval trees, on top of the augmented rbtree API. It also adds synthetic tests to compare the performance of interval trees vs prio trees. Short answers is that interval trees are slightly faster (~25%) on insert/erase, and much faster (~2.4 - 3x) on search. It is debatable how realistic the synthetic test is, and I have not made such measurements yet, but my impression is that interval trees would still come out faster. Patch 2 uses a preprocessor template to make the interval tree generic, and uses it as a replacement for the vma prio_tree. Patch 3 takes the other prio_tree user, kmemleak, and converts it to use a basic rbtree. We don't actually need the augmented rbtree support here because the intervals are always non-overlapping. Patch 4 removes the now-unused prio tree library. Patch 5 proposes an additional optimization to rb_erase_augmented, now providing it as an inline function so that the augmented callbacks can be inlined in. This provides an additional 5-10% performance improvement for the interval tree insert/erase benchmark. There is a maintainance cost as it exposes augmented rbtree users to some of the rbtree library internals; however I think this cost shouldn't be too high as I expect the augmented rbtree will always have much less users than the base rbtree. I should probably add a quick summary of why I think it makes sense to replace prio trees with augmented rbtree based interval trees now. One of the drivers is that we need augmented rbtrees for Rik's vma gap finding code, and once you have them, it just makes sense to use them for interval trees as well, as this is the simpler and more well known algorithm. prio trees, in comparison, seem *too* clever: they impose an additional 'heap' constraint on the tree, which they use to guarantee a faster worst-case complexity of O(k+log N) for stabbing queries in a well-balanced prio tree, vs O(k*log N) for interval trees (where k=number of matches, N=number of intervals). Now this sounds great, but in practice prio trees don't realize this theorical benefit. First, the additional constraint makes them harder to update, so that the kernel implementation has to simplify things by balancing them like a radix tree, which is not always ideal. Second, the fact that there are both index and heap properties makes both tree manipulation and search more complex, which results in a higher multiplicative time constant. As it turns out, the simple interval tree algorithm ends up running faster than the more clever prio tree. This patch: Add two test modules: - prio_tree_test measures the performance of lib/prio_tree.c, both for insertion/removal and for stabbing searches - interval_tree_test measures the performance of a library of equivalent functionality, built using the augmented rbtree support. In order to support the second test module, lib/interval_tree.c is introduced. It is kept separate from the interval_tree_test main file for two reasons: first we don't want to provide an unfair advantage over prio_tree_test by having everything in a single compilation unit, and second there is the possibility that the interval tree functionality could get some non-test users in kernel over time. Signed-off-by: Michel Lespinasse Cc: Rik van Riel Cc: Hillf Danton Cc: Peter Zijlstra Cc: Catalin Marinas Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 12 ++++ lib/Makefile | 4 ++ lib/interval_tree.c | 159 ++++++++++++++++++++++++++++++++++++++++++ lib/interval_tree_test_main.c | 105 ++++++++++++++++++++++++++++ lib/prio_tree.c | 4 ++ lib/prio_tree_test.c | 106 ++++++++++++++++++++++++++++ 6 files changed, 390 insertions(+) create mode 100644 lib/interval_tree.c create mode 100644 lib/interval_tree_test_main.c create mode 100644 lib/prio_tree_test.c (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a4e5d93b0f41..ee9f030b6951 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1289,6 +1289,18 @@ config RBTREE_TEST A benchmark measuring the performance of the rbtree library. Also includes rbtree invariant checks. +config PRIO_TREE_TEST + tristate "Prio tree test" + depends on m && DEBUG_KERNEL + help + A benchmark measuring the performance of the prio tree library + +config INTERVAL_TREE_TEST + tristate "Interval tree test" + depends on m && DEBUG_KERNEL + help + A benchmark measuring the performance of the interval tree library + config PROVIDE_OHCI1394_DMA_INIT bool "Remote debugging over FireWire early on boot" depends on PCI && X86 diff --git a/lib/Makefile b/lib/Makefile index f49445d26b65..26f578bf616a 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -141,6 +141,10 @@ $(foreach file, $(libfdt_files), \ lib-$(CONFIG_LIBFDT) += $(libfdt_files) obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o +obj-$(CONFIG_PRIO_TREE_TEST) += prio_tree_test.o +obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o + +interval_tree_test-objs := interval_tree_test_main.o interval_tree.o hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/interval_tree.c b/lib/interval_tree.c new file mode 100644 index 000000000000..6fd540b1e499 --- /dev/null +++ b/lib/interval_tree.c @@ -0,0 +1,159 @@ +#include +#include + +/* Callbacks for augmented rbtree insert and remove */ + +static inline unsigned long +compute_subtree_last(struct interval_tree_node *node) +{ + unsigned long max = node->last, subtree_last; + if (node->rb.rb_left) { + subtree_last = rb_entry(node->rb.rb_left, + struct interval_tree_node, rb)->__subtree_last; + if (max < subtree_last) + max = subtree_last; + } + if (node->rb.rb_right) { + subtree_last = rb_entry(node->rb.rb_right, + struct interval_tree_node, rb)->__subtree_last; + if (max < subtree_last) + max = subtree_last; + } + return max; +} + +RB_DECLARE_CALLBACKS(static, augment_callbacks, struct interval_tree_node, rb, + unsigned long, __subtree_last, compute_subtree_last) + +/* Insert / remove interval nodes from the tree */ + +void interval_tree_insert(struct interval_tree_node *node, + struct rb_root *root) +{ + struct rb_node **link = &root->rb_node, *rb_parent = NULL; + unsigned long start = node->start, last = node->last; + struct interval_tree_node *parent; + + while (*link) { + rb_parent = *link; + parent = rb_entry(rb_parent, struct interval_tree_node, rb); + if (parent->__subtree_last < last) + parent->__subtree_last = last; + if (start < parent->start) + link = &parent->rb.rb_left; + else + link = &parent->rb.rb_right; + } + + node->__subtree_last = last; + rb_link_node(&node->rb, rb_parent, link); + rb_insert_augmented(&node->rb, root, &augment_callbacks); +} + +void interval_tree_remove(struct interval_tree_node *node, + struct rb_root *root) +{ + rb_erase_augmented(&node->rb, root, &augment_callbacks); +} + +/* + * Iterate over intervals intersecting [start;last] + * + * Note that a node's interval intersects [start;last] iff: + * Cond1: node->start <= last + * and + * Cond2: start <= node->last + */ + +static struct interval_tree_node * +subtree_search(struct interval_tree_node *node, + unsigned long start, unsigned long last) +{ + while (true) { + /* + * Loop invariant: start <= node->__subtree_last + * (Cond2 is satisfied by one of the subtree nodes) + */ + if (node->rb.rb_left) { + struct interval_tree_node *left = + rb_entry(node->rb.rb_left, + struct interval_tree_node, rb); + if (start <= left->__subtree_last) { + /* + * Some nodes in left subtree satisfy Cond2. + * Iterate to find the leftmost such node N. + * If it also satisfies Cond1, that's the match + * we are looking for. Otherwise, there is no + * matching interval as nodes to the right of N + * can't satisfy Cond1 either. + */ + node = left; + continue; + } + } + if (node->start <= last) { /* Cond1 */ + if (start <= node->last) /* Cond2 */ + return node; /* node is leftmost match */ + if (node->rb.rb_right) { + node = rb_entry(node->rb.rb_right, + struct interval_tree_node, rb); + if (start <= node->__subtree_last) + continue; + } + } + return NULL; /* No match */ + } +} + +struct interval_tree_node * +interval_tree_iter_first(struct rb_root *root, + unsigned long start, unsigned long last) +{ + struct interval_tree_node *node; + + if (!root->rb_node) + return NULL; + node = rb_entry(root->rb_node, struct interval_tree_node, rb); + if (node->__subtree_last < start) + return NULL; + return subtree_search(node, start, last); +} + +struct interval_tree_node * +interval_tree_iter_next(struct interval_tree_node *node, + unsigned long start, unsigned long last) +{ + struct rb_node *rb = node->rb.rb_right, *prev; + + while (true) { + /* + * Loop invariants: + * Cond1: node->start <= last + * rb == node->rb.rb_right + * + * First, search right subtree if suitable + */ + if (rb) { + struct interval_tree_node *right = + rb_entry(rb, struct interval_tree_node, rb); + if (start <= right->__subtree_last) + return subtree_search(right, start, last); + } + + /* Move up the tree until we come from a node's left child */ + do { + rb = rb_parent(&node->rb); + if (!rb) + return NULL; + prev = &node->rb; + node = rb_entry(rb, struct interval_tree_node, rb); + rb = node->rb.rb_right; + } while (prev == rb); + + /* Check if the node intersects [start;last] */ + if (last < node->start) /* !Cond1 */ + return NULL; + else if (start <= node->last) /* Cond2 */ + return node; + } +} diff --git a/lib/interval_tree_test_main.c b/lib/interval_tree_test_main.c new file mode 100644 index 000000000000..b25903987f7a --- /dev/null +++ b/lib/interval_tree_test_main.c @@ -0,0 +1,105 @@ +#include +#include +#include +#include + +#define NODES 100 +#define PERF_LOOPS 100000 +#define SEARCHES 100 +#define SEARCH_LOOPS 10000 + +static struct rb_root root = RB_ROOT; +static struct interval_tree_node nodes[NODES]; +static u32 queries[SEARCHES]; + +static struct rnd_state rnd; + +static inline unsigned long +search(unsigned long query, struct rb_root *root) +{ + struct interval_tree_node *node; + unsigned long results = 0; + + for (node = interval_tree_iter_first(root, query, query); node; + node = interval_tree_iter_next(node, query, query)) + results++; + return results; +} + +static void init(void) +{ + int i; + for (i = 0; i < NODES; i++) { + u32 a = prandom32(&rnd), b = prandom32(&rnd); + if (a <= b) { + nodes[i].start = a; + nodes[i].last = b; + } else { + nodes[i].start = b; + nodes[i].last = a; + } + } + for (i = 0; i < SEARCHES; i++) + queries[i] = prandom32(&rnd); +} + +static int interval_tree_test_init(void) +{ + int i, j; + unsigned long results; + cycles_t time1, time2, time; + + printk(KERN_ALERT "interval tree insert/remove"); + + prandom32_seed(&rnd, 3141592653589793238ULL); + init(); + + time1 = get_cycles(); + + for (i = 0; i < PERF_LOOPS; i++) { + for (j = 0; j < NODES; j++) + interval_tree_insert(nodes + j, &root); + for (j = 0; j < NODES; j++) + interval_tree_remove(nodes + j, &root); + } + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, PERF_LOOPS); + printk(" -> %llu cycles\n", (unsigned long long)time); + + printk(KERN_ALERT "interval tree search"); + + for (j = 0; j < NODES; j++) + interval_tree_insert(nodes + j, &root); + + time1 = get_cycles(); + + results = 0; + for (i = 0; i < SEARCH_LOOPS; i++) + for (j = 0; j < SEARCHES; j++) + results += search(queries[j], &root); + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, SEARCH_LOOPS); + results = div_u64(results, SEARCH_LOOPS); + printk(" -> %llu cycles (%lu results)\n", + (unsigned long long)time, results); + + return -EAGAIN; /* Fail will directly unload the module */ +} + +static void interval_tree_test_exit(void) +{ + printk(KERN_ALERT "test exit\n"); +} + +module_init(interval_tree_test_init) +module_exit(interval_tree_test_exit) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michel Lespinasse"); +MODULE_DESCRIPTION("Interval Tree test"); diff --git a/lib/prio_tree.c b/lib/prio_tree.c index 8d443af03b4c..4e0d2edff2b4 100644 --- a/lib/prio_tree.c +++ b/lib/prio_tree.c @@ -14,6 +14,7 @@ #include #include #include +#include /* * A clever mix of heap and radix trees forms a radix priority search tree (PST) @@ -241,6 +242,7 @@ struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root, BUG(); return NULL; } +EXPORT_SYMBOL(prio_tree_insert); /* * Remove a prio_tree_node @node from a radix priority search tree @root. The @@ -290,6 +292,7 @@ void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node) while (cur != node) cur = prio_tree_replace(root, cur->parent, cur); } +EXPORT_SYMBOL(prio_tree_remove); static void iter_walk_down(struct prio_tree_iter *iter) { @@ -464,3 +467,4 @@ repeat: goto repeat; } +EXPORT_SYMBOL(prio_tree_next); diff --git a/lib/prio_tree_test.c b/lib/prio_tree_test.c new file mode 100644 index 000000000000..c26084ddc6a4 --- /dev/null +++ b/lib/prio_tree_test.c @@ -0,0 +1,106 @@ +#include +#include +#include +#include + +#define NODES 100 +#define PERF_LOOPS 100000 +#define SEARCHES 100 +#define SEARCH_LOOPS 10000 + +static struct prio_tree_root root; +static struct prio_tree_node nodes[NODES]; +static u32 queries[SEARCHES]; + +static struct rnd_state rnd; + +static inline unsigned long +search(unsigned long query, struct prio_tree_root *root) +{ + struct prio_tree_iter iter; + unsigned long results = 0; + + prio_tree_iter_init(&iter, root, query, query); + while (prio_tree_next(&iter)) + results++; + return results; +} + +static void init(void) +{ + int i; + for (i = 0; i < NODES; i++) { + u32 a = prandom32(&rnd), b = prandom32(&rnd); + if (a <= b) { + nodes[i].start = a; + nodes[i].last = b; + } else { + nodes[i].start = b; + nodes[i].last = a; + } + } + for (i = 0; i < SEARCHES; i++) + queries[i] = prandom32(&rnd); +} + +static int prio_tree_test_init(void) +{ + int i, j; + unsigned long results; + cycles_t time1, time2, time; + + printk(KERN_ALERT "prio tree insert/remove"); + + prandom32_seed(&rnd, 3141592653589793238ULL); + INIT_PRIO_TREE_ROOT(&root); + init(); + + time1 = get_cycles(); + + for (i = 0; i < PERF_LOOPS; i++) { + for (j = 0; j < NODES; j++) + prio_tree_insert(&root, nodes + j); + for (j = 0; j < NODES; j++) + prio_tree_remove(&root, nodes + j); + } + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, PERF_LOOPS); + printk(" -> %llu cycles\n", (unsigned long long)time); + + printk(KERN_ALERT "prio tree search"); + + for (j = 0; j < NODES; j++) + prio_tree_insert(&root, nodes + j); + + time1 = get_cycles(); + + results = 0; + for (i = 0; i < SEARCH_LOOPS; i++) + for (j = 0; j < SEARCHES; j++) + results += search(queries[j], &root); + + time2 = get_cycles(); + time = time2 - time1; + + time = div_u64(time, SEARCH_LOOPS); + results = div_u64(results, SEARCH_LOOPS); + printk(" -> %llu cycles (%lu results)\n", + (unsigned long long)time, results); + + return -EAGAIN; /* Fail will directly unload the module */ +} + +static void prio_tree_test_exit(void) +{ + printk(KERN_ALERT "test exit\n"); +} + +module_init(prio_tree_test_init) +module_exit(prio_tree_test_exit) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michel Lespinasse"); +MODULE_DESCRIPTION("Prio Tree test"); -- cgit From 6b2dbba8b6ac4df26f72eda1e5ea7bab9f950e08 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:25 -0700 Subject: mm: replace vma prio_tree with an interval tree Implement an interval tree as a replacement for the VMA prio_tree. The algorithms are similar to lib/interval_tree.c; however that code can't be directly reused as the interval endpoints are not explicitly stored in the VMA. So instead, the common algorithm is moved into a template and the details (node type, how to get interval endpoints from the node, etc) are filled in using the C preprocessor. Once the interval tree functions are available, using them as a replacement to the VMA prio tree is a relatively simple, mechanical job. Signed-off-by: Michel Lespinasse Cc: Rik van Riel Cc: Hillf Danton Cc: Peter Zijlstra Cc: Catalin Marinas Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/interval_tree.c | 166 ++++------------------------------------------------ lib/prio_tree.c | 19 +----- 2 files changed, 12 insertions(+), 173 deletions(-) (limited to 'lib') diff --git a/lib/interval_tree.c b/lib/interval_tree.c index 6fd540b1e499..77a793e0644b 100644 --- a/lib/interval_tree.c +++ b/lib/interval_tree.c @@ -1,159 +1,13 @@ #include #include -/* Callbacks for augmented rbtree insert and remove */ - -static inline unsigned long -compute_subtree_last(struct interval_tree_node *node) -{ - unsigned long max = node->last, subtree_last; - if (node->rb.rb_left) { - subtree_last = rb_entry(node->rb.rb_left, - struct interval_tree_node, rb)->__subtree_last; - if (max < subtree_last) - max = subtree_last; - } - if (node->rb.rb_right) { - subtree_last = rb_entry(node->rb.rb_right, - struct interval_tree_node, rb)->__subtree_last; - if (max < subtree_last) - max = subtree_last; - } - return max; -} - -RB_DECLARE_CALLBACKS(static, augment_callbacks, struct interval_tree_node, rb, - unsigned long, __subtree_last, compute_subtree_last) - -/* Insert / remove interval nodes from the tree */ - -void interval_tree_insert(struct interval_tree_node *node, - struct rb_root *root) -{ - struct rb_node **link = &root->rb_node, *rb_parent = NULL; - unsigned long start = node->start, last = node->last; - struct interval_tree_node *parent; - - while (*link) { - rb_parent = *link; - parent = rb_entry(rb_parent, struct interval_tree_node, rb); - if (parent->__subtree_last < last) - parent->__subtree_last = last; - if (start < parent->start) - link = &parent->rb.rb_left; - else - link = &parent->rb.rb_right; - } - - node->__subtree_last = last; - rb_link_node(&node->rb, rb_parent, link); - rb_insert_augmented(&node->rb, root, &augment_callbacks); -} - -void interval_tree_remove(struct interval_tree_node *node, - struct rb_root *root) -{ - rb_erase_augmented(&node->rb, root, &augment_callbacks); -} - -/* - * Iterate over intervals intersecting [start;last] - * - * Note that a node's interval intersects [start;last] iff: - * Cond1: node->start <= last - * and - * Cond2: start <= node->last - */ - -static struct interval_tree_node * -subtree_search(struct interval_tree_node *node, - unsigned long start, unsigned long last) -{ - while (true) { - /* - * Loop invariant: start <= node->__subtree_last - * (Cond2 is satisfied by one of the subtree nodes) - */ - if (node->rb.rb_left) { - struct interval_tree_node *left = - rb_entry(node->rb.rb_left, - struct interval_tree_node, rb); - if (start <= left->__subtree_last) { - /* - * Some nodes in left subtree satisfy Cond2. - * Iterate to find the leftmost such node N. - * If it also satisfies Cond1, that's the match - * we are looking for. Otherwise, there is no - * matching interval as nodes to the right of N - * can't satisfy Cond1 either. - */ - node = left; - continue; - } - } - if (node->start <= last) { /* Cond1 */ - if (start <= node->last) /* Cond2 */ - return node; /* node is leftmost match */ - if (node->rb.rb_right) { - node = rb_entry(node->rb.rb_right, - struct interval_tree_node, rb); - if (start <= node->__subtree_last) - continue; - } - } - return NULL; /* No match */ - } -} - -struct interval_tree_node * -interval_tree_iter_first(struct rb_root *root, - unsigned long start, unsigned long last) -{ - struct interval_tree_node *node; - - if (!root->rb_node) - return NULL; - node = rb_entry(root->rb_node, struct interval_tree_node, rb); - if (node->__subtree_last < start) - return NULL; - return subtree_search(node, start, last); -} - -struct interval_tree_node * -interval_tree_iter_next(struct interval_tree_node *node, - unsigned long start, unsigned long last) -{ - struct rb_node *rb = node->rb.rb_right, *prev; - - while (true) { - /* - * Loop invariants: - * Cond1: node->start <= last - * rb == node->rb.rb_right - * - * First, search right subtree if suitable - */ - if (rb) { - struct interval_tree_node *right = - rb_entry(rb, struct interval_tree_node, rb); - if (start <= right->__subtree_last) - return subtree_search(right, start, last); - } - - /* Move up the tree until we come from a node's left child */ - do { - rb = rb_parent(&node->rb); - if (!rb) - return NULL; - prev = &node->rb; - node = rb_entry(rb, struct interval_tree_node, rb); - rb = node->rb.rb_right; - } while (prev == rb); - - /* Check if the node intersects [start;last] */ - if (last < node->start) /* !Cond1 */ - return NULL; - else if (start <= node->last) /* Cond2 */ - return node; - } -} +#define ITSTRUCT struct interval_tree_node +#define ITRB rb +#define ITTYPE unsigned long +#define ITSUBTREE __subtree_last +#define ITSTART(n) ((n)->start) +#define ITLAST(n) ((n)->last) +#define ITSTATIC +#define ITPREFIX interval_tree + +#include diff --git a/lib/prio_tree.c b/lib/prio_tree.c index 4e0d2edff2b4..bba37148c15e 100644 --- a/lib/prio_tree.c +++ b/lib/prio_tree.c @@ -44,27 +44,12 @@ * The following macros are used for implementing prio_tree for i_mmap */ -#define RADIX_INDEX(vma) ((vma)->vm_pgoff) -#define VMA_SIZE(vma) (((vma)->vm_end - (vma)->vm_start) >> PAGE_SHIFT) -/* avoid overflow */ -#define HEAP_INDEX(vma) ((vma)->vm_pgoff + (VMA_SIZE(vma) - 1)) - - static void get_index(const struct prio_tree_root *root, const struct prio_tree_node *node, unsigned long *radix, unsigned long *heap) { - if (root->raw) { - struct vm_area_struct *vma = prio_tree_entry( - node, struct vm_area_struct, shared.prio_tree_node); - - *radix = RADIX_INDEX(vma); - *heap = HEAP_INDEX(vma); - } - else { - *radix = node->start; - *heap = node->last; - } + *radix = node->start; + *heap = node->last; } static unsigned long index_bits_to_maxindex[BITS_PER_LONG]; -- cgit From 147e615f83c2c36caf89e7a3bf78090ade6f266c Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:30 -0700 Subject: prio_tree: remove After both prio_tree users have been converted to use red-black trees, there is no need to keep around the prio tree library anymore. Signed-off-by: Michel Lespinasse Cc: Rik van Riel Cc: Hillf Danton Cc: Peter Zijlstra Cc: Catalin Marinas Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 6 - lib/Makefile | 3 +- lib/prio_tree.c | 455 --------------------------------------------------- lib/prio_tree_test.c | 106 ------------ 4 files changed, 1 insertion(+), 569 deletions(-) delete mode 100644 lib/prio_tree.c delete mode 100644 lib/prio_tree_test.c (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ee9f030b6951..a6e7e7741523 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1289,12 +1289,6 @@ config RBTREE_TEST A benchmark measuring the performance of the rbtree library. Also includes rbtree invariant checks. -config PRIO_TREE_TEST - tristate "Prio tree test" - depends on m && DEBUG_KERNEL - help - A benchmark measuring the performance of the prio tree library - config INTERVAL_TREE_TEST tristate "Interval tree test" depends on m && DEBUG_KERNEL diff --git a/lib/Makefile b/lib/Makefile index 26f578bf616a..3128e357e286 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -9,7 +9,7 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o timerqueue.o\ - idr.o int_sqrt.o extable.o prio_tree.o \ + idr.o int_sqrt.o extable.o \ sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o @@ -141,7 +141,6 @@ $(foreach file, $(libfdt_files), \ lib-$(CONFIG_LIBFDT) += $(libfdt_files) obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o -obj-$(CONFIG_PRIO_TREE_TEST) += prio_tree_test.o obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o interval_tree_test-objs := interval_tree_test_main.o interval_tree.o diff --git a/lib/prio_tree.c b/lib/prio_tree.c deleted file mode 100644 index bba37148c15e..000000000000 --- a/lib/prio_tree.c +++ /dev/null @@ -1,455 +0,0 @@ -/* - * lib/prio_tree.c - priority search tree - * - * Copyright (C) 2004, Rajesh Venkatasubramanian - * - * This file is released under the GPL v2. - * - * Based on the radix priority search tree proposed by Edward M. McCreight - * SIAM Journal of Computing, vol. 14, no.2, pages 257-276, May 1985 - * - * 02Feb2004 Initial version - */ - -#include -#include -#include -#include - -/* - * A clever mix of heap and radix trees forms a radix priority search tree (PST) - * which is useful for storing intervals, e.g, we can consider a vma as a closed - * interval of file pages [offset_begin, offset_end], and store all vmas that - * map a file in a PST. Then, using the PST, we can answer a stabbing query, - * i.e., selecting a set of stored intervals (vmas) that overlap with (map) a - * given input interval X (a set of consecutive file pages), in "O(log n + m)" - * time where 'log n' is the height of the PST, and 'm' is the number of stored - * intervals (vmas) that overlap (map) with the input interval X (the set of - * consecutive file pages). - * - * In our implementation, we store closed intervals of the form [radix_index, - * heap_index]. We assume that always radix_index <= heap_index. McCreight's PST - * is designed for storing intervals with unique radix indices, i.e., each - * interval have different radix_index. However, this limitation can be easily - * overcome by using the size, i.e., heap_index - radix_index, as part of the - * index, so we index the tree using [(radix_index,size), heap_index]. - * - * When the above-mentioned indexing scheme is used, theoretically, in a 32 bit - * machine, the maximum height of a PST can be 64. We can use a balanced version - * of the priority search tree to optimize the tree height, but the balanced - * tree proposed by McCreight is too complex and memory-hungry for our purpose. - */ - -/* - * The following macros are used for implementing prio_tree for i_mmap - */ - -static void get_index(const struct prio_tree_root *root, - const struct prio_tree_node *node, - unsigned long *radix, unsigned long *heap) -{ - *radix = node->start; - *heap = node->last; -} - -static unsigned long index_bits_to_maxindex[BITS_PER_LONG]; - -void __init prio_tree_init(void) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(index_bits_to_maxindex) - 1; i++) - index_bits_to_maxindex[i] = (1UL << (i + 1)) - 1; - index_bits_to_maxindex[ARRAY_SIZE(index_bits_to_maxindex) - 1] = ~0UL; -} - -/* - * Maximum heap_index that can be stored in a PST with index_bits bits - */ -static inline unsigned long prio_tree_maxindex(unsigned int bits) -{ - return index_bits_to_maxindex[bits - 1]; -} - -static void prio_set_parent(struct prio_tree_node *parent, - struct prio_tree_node *child, bool left) -{ - if (left) - parent->left = child; - else - parent->right = child; - - child->parent = parent; -} - -/* - * Extend a priority search tree so that it can store a node with heap_index - * max_heap_index. In the worst case, this algorithm takes O((log n)^2). - * However, this function is used rarely and the common case performance is - * not bad. - */ -static struct prio_tree_node *prio_tree_expand(struct prio_tree_root *root, - struct prio_tree_node *node, unsigned long max_heap_index) -{ - struct prio_tree_node *prev; - - if (max_heap_index > prio_tree_maxindex(root->index_bits)) - root->index_bits++; - - prev = node; - INIT_PRIO_TREE_NODE(node); - - while (max_heap_index > prio_tree_maxindex(root->index_bits)) { - struct prio_tree_node *tmp = root->prio_tree_node; - - root->index_bits++; - - if (prio_tree_empty(root)) - continue; - - prio_tree_remove(root, root->prio_tree_node); - INIT_PRIO_TREE_NODE(tmp); - - prio_set_parent(prev, tmp, true); - prev = tmp; - } - - if (!prio_tree_empty(root)) - prio_set_parent(prev, root->prio_tree_node, true); - - root->prio_tree_node = node; - return node; -} - -/* - * Replace a prio_tree_node with a new node and return the old node - */ -struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root, - struct prio_tree_node *old, struct prio_tree_node *node) -{ - INIT_PRIO_TREE_NODE(node); - - if (prio_tree_root(old)) { - BUG_ON(root->prio_tree_node != old); - /* - * We can reduce root->index_bits here. However, it is complex - * and does not help much to improve performance (IMO). - */ - root->prio_tree_node = node; - } else - prio_set_parent(old->parent, node, old->parent->left == old); - - if (!prio_tree_left_empty(old)) - prio_set_parent(node, old->left, true); - - if (!prio_tree_right_empty(old)) - prio_set_parent(node, old->right, false); - - return old; -} - -/* - * Insert a prio_tree_node @node into a radix priority search tree @root. The - * algorithm typically takes O(log n) time where 'log n' is the number of bits - * required to represent the maximum heap_index. In the worst case, the algo - * can take O((log n)^2) - check prio_tree_expand. - * - * If a prior node with same radix_index and heap_index is already found in - * the tree, then returns the address of the prior node. Otherwise, inserts - * @node into the tree and returns @node. - */ -struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root, - struct prio_tree_node *node) -{ - struct prio_tree_node *cur, *res = node; - unsigned long radix_index, heap_index; - unsigned long r_index, h_index, index, mask; - int size_flag = 0; - - get_index(root, node, &radix_index, &heap_index); - - if (prio_tree_empty(root) || - heap_index > prio_tree_maxindex(root->index_bits)) - return prio_tree_expand(root, node, heap_index); - - cur = root->prio_tree_node; - mask = 1UL << (root->index_bits - 1); - - while (mask) { - get_index(root, cur, &r_index, &h_index); - - if (r_index == radix_index && h_index == heap_index) - return cur; - - if (h_index < heap_index || - (h_index == heap_index && r_index > radix_index)) { - struct prio_tree_node *tmp = node; - node = prio_tree_replace(root, cur, node); - cur = tmp; - /* swap indices */ - index = r_index; - r_index = radix_index; - radix_index = index; - index = h_index; - h_index = heap_index; - heap_index = index; - } - - if (size_flag) - index = heap_index - radix_index; - else - index = radix_index; - - if (index & mask) { - if (prio_tree_right_empty(cur)) { - INIT_PRIO_TREE_NODE(node); - prio_set_parent(cur, node, false); - return res; - } else - cur = cur->right; - } else { - if (prio_tree_left_empty(cur)) { - INIT_PRIO_TREE_NODE(node); - prio_set_parent(cur, node, true); - return res; - } else - cur = cur->left; - } - - mask >>= 1; - - if (!mask) { - mask = 1UL << (BITS_PER_LONG - 1); - size_flag = 1; - } - } - /* Should not reach here */ - BUG(); - return NULL; -} -EXPORT_SYMBOL(prio_tree_insert); - -/* - * Remove a prio_tree_node @node from a radix priority search tree @root. The - * algorithm takes O(log n) time where 'log n' is the number of bits required - * to represent the maximum heap_index. - */ -void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node) -{ - struct prio_tree_node *cur; - unsigned long r_index, h_index_right, h_index_left; - - cur = node; - - while (!prio_tree_left_empty(cur) || !prio_tree_right_empty(cur)) { - if (!prio_tree_left_empty(cur)) - get_index(root, cur->left, &r_index, &h_index_left); - else { - cur = cur->right; - continue; - } - - if (!prio_tree_right_empty(cur)) - get_index(root, cur->right, &r_index, &h_index_right); - else { - cur = cur->left; - continue; - } - - /* both h_index_left and h_index_right cannot be 0 */ - if (h_index_left >= h_index_right) - cur = cur->left; - else - cur = cur->right; - } - - if (prio_tree_root(cur)) { - BUG_ON(root->prio_tree_node != cur); - __INIT_PRIO_TREE_ROOT(root, root->raw); - return; - } - - if (cur->parent->right == cur) - cur->parent->right = cur->parent; - else - cur->parent->left = cur->parent; - - while (cur != node) - cur = prio_tree_replace(root, cur->parent, cur); -} -EXPORT_SYMBOL(prio_tree_remove); - -static void iter_walk_down(struct prio_tree_iter *iter) -{ - iter->mask >>= 1; - if (iter->mask) { - if (iter->size_level) - iter->size_level++; - return; - } - - if (iter->size_level) { - BUG_ON(!prio_tree_left_empty(iter->cur)); - BUG_ON(!prio_tree_right_empty(iter->cur)); - iter->size_level++; - iter->mask = ULONG_MAX; - } else { - iter->size_level = 1; - iter->mask = 1UL << (BITS_PER_LONG - 1); - } -} - -static void iter_walk_up(struct prio_tree_iter *iter) -{ - if (iter->mask == ULONG_MAX) - iter->mask = 1UL; - else if (iter->size_level == 1) - iter->mask = 1UL; - else - iter->mask <<= 1; - if (iter->size_level) - iter->size_level--; - if (!iter->size_level && (iter->value & iter->mask)) - iter->value ^= iter->mask; -} - -/* - * Following functions help to enumerate all prio_tree_nodes in the tree that - * overlap with the input interval X [radix_index, heap_index]. The enumeration - * takes O(log n + m) time where 'log n' is the height of the tree (which is - * proportional to # of bits required to represent the maximum heap_index) and - * 'm' is the number of prio_tree_nodes that overlap the interval X. - */ - -static struct prio_tree_node *prio_tree_left(struct prio_tree_iter *iter, - unsigned long *r_index, unsigned long *h_index) -{ - if (prio_tree_left_empty(iter->cur)) - return NULL; - - get_index(iter->root, iter->cur->left, r_index, h_index); - - if (iter->r_index <= *h_index) { - iter->cur = iter->cur->left; - iter_walk_down(iter); - return iter->cur; - } - - return NULL; -} - -static struct prio_tree_node *prio_tree_right(struct prio_tree_iter *iter, - unsigned long *r_index, unsigned long *h_index) -{ - unsigned long value; - - if (prio_tree_right_empty(iter->cur)) - return NULL; - - if (iter->size_level) - value = iter->value; - else - value = iter->value | iter->mask; - - if (iter->h_index < value) - return NULL; - - get_index(iter->root, iter->cur->right, r_index, h_index); - - if (iter->r_index <= *h_index) { - iter->cur = iter->cur->right; - iter_walk_down(iter); - return iter->cur; - } - - return NULL; -} - -static struct prio_tree_node *prio_tree_parent(struct prio_tree_iter *iter) -{ - iter->cur = iter->cur->parent; - iter_walk_up(iter); - return iter->cur; -} - -static inline int overlap(struct prio_tree_iter *iter, - unsigned long r_index, unsigned long h_index) -{ - return iter->h_index >= r_index && iter->r_index <= h_index; -} - -/* - * prio_tree_first: - * - * Get the first prio_tree_node that overlaps with the interval [radix_index, - * heap_index]. Note that always radix_index <= heap_index. We do a pre-order - * traversal of the tree. - */ -static struct prio_tree_node *prio_tree_first(struct prio_tree_iter *iter) -{ - struct prio_tree_root *root; - unsigned long r_index, h_index; - - INIT_PRIO_TREE_ITER(iter); - - root = iter->root; - if (prio_tree_empty(root)) - return NULL; - - get_index(root, root->prio_tree_node, &r_index, &h_index); - - if (iter->r_index > h_index) - return NULL; - - iter->mask = 1UL << (root->index_bits - 1); - iter->cur = root->prio_tree_node; - - while (1) { - if (overlap(iter, r_index, h_index)) - return iter->cur; - - if (prio_tree_left(iter, &r_index, &h_index)) - continue; - - if (prio_tree_right(iter, &r_index, &h_index)) - continue; - - break; - } - return NULL; -} - -/* - * prio_tree_next: - * - * Get the next prio_tree_node that overlaps with the input interval in iter - */ -struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter) -{ - unsigned long r_index, h_index; - - if (iter->cur == NULL) - return prio_tree_first(iter); - -repeat: - while (prio_tree_left(iter, &r_index, &h_index)) - if (overlap(iter, r_index, h_index)) - return iter->cur; - - while (!prio_tree_right(iter, &r_index, &h_index)) { - while (!prio_tree_root(iter->cur) && - iter->cur->parent->right == iter->cur) - prio_tree_parent(iter); - - if (prio_tree_root(iter->cur)) - return NULL; - - prio_tree_parent(iter); - } - - if (overlap(iter, r_index, h_index)) - return iter->cur; - - goto repeat; -} -EXPORT_SYMBOL(prio_tree_next); diff --git a/lib/prio_tree_test.c b/lib/prio_tree_test.c deleted file mode 100644 index c26084ddc6a4..000000000000 --- a/lib/prio_tree_test.c +++ /dev/null @@ -1,106 +0,0 @@ -#include -#include -#include -#include - -#define NODES 100 -#define PERF_LOOPS 100000 -#define SEARCHES 100 -#define SEARCH_LOOPS 10000 - -static struct prio_tree_root root; -static struct prio_tree_node nodes[NODES]; -static u32 queries[SEARCHES]; - -static struct rnd_state rnd; - -static inline unsigned long -search(unsigned long query, struct prio_tree_root *root) -{ - struct prio_tree_iter iter; - unsigned long results = 0; - - prio_tree_iter_init(&iter, root, query, query); - while (prio_tree_next(&iter)) - results++; - return results; -} - -static void init(void) -{ - int i; - for (i = 0; i < NODES; i++) { - u32 a = prandom32(&rnd), b = prandom32(&rnd); - if (a <= b) { - nodes[i].start = a; - nodes[i].last = b; - } else { - nodes[i].start = b; - nodes[i].last = a; - } - } - for (i = 0; i < SEARCHES; i++) - queries[i] = prandom32(&rnd); -} - -static int prio_tree_test_init(void) -{ - int i, j; - unsigned long results; - cycles_t time1, time2, time; - - printk(KERN_ALERT "prio tree insert/remove"); - - prandom32_seed(&rnd, 3141592653589793238ULL); - INIT_PRIO_TREE_ROOT(&root); - init(); - - time1 = get_cycles(); - - for (i = 0; i < PERF_LOOPS; i++) { - for (j = 0; j < NODES; j++) - prio_tree_insert(&root, nodes + j); - for (j = 0; j < NODES; j++) - prio_tree_remove(&root, nodes + j); - } - - time2 = get_cycles(); - time = time2 - time1; - - time = div_u64(time, PERF_LOOPS); - printk(" -> %llu cycles\n", (unsigned long long)time); - - printk(KERN_ALERT "prio tree search"); - - for (j = 0; j < NODES; j++) - prio_tree_insert(&root, nodes + j); - - time1 = get_cycles(); - - results = 0; - for (i = 0; i < SEARCH_LOOPS; i++) - for (j = 0; j < SEARCHES; j++) - results += search(queries[j], &root); - - time2 = get_cycles(); - time = time2 - time1; - - time = div_u64(time, SEARCH_LOOPS); - results = div_u64(results, SEARCH_LOOPS); - printk(" -> %llu cycles (%lu results)\n", - (unsigned long long)time, results); - - return -EAGAIN; /* Fail will directly unload the module */ -} - -static void prio_tree_test_exit(void) -{ - printk(KERN_ALERT "test exit\n"); -} - -module_init(prio_tree_test_init) -module_exit(prio_tree_test_exit) - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Michel Lespinasse"); -MODULE_DESCRIPTION("Prio Tree test"); -- cgit From 9c079add0d0f45220f4bb37febf0621137ec2d38 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:33 -0700 Subject: rbtree: move augmented rbtree functionality to rbtree_augmented.h Provide rb_insert_augmented() and rb_erase_augmented() through a new rbtree_augmented.h include file. rb_erase_augmented() is defined there as an __always_inline function, in order to allow inlining of augmented rbtree callbacks into it. Since this generates a relatively large function, each augmented rbtree user should make sure to have a single call site. Signed-off-by: Michel Lespinasse Cc: Rik van Riel Cc: Hillf Danton Cc: Peter Zijlstra Cc: Catalin Marinas Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/rbtree.c | 162 ++++-------------------------------------------------- lib/rbtree_test.c | 2 +- 2 files changed, 12 insertions(+), 152 deletions(-) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index c0088ca345f9..4f56a11d67fa 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -21,7 +21,7 @@ linux/lib/rbtree.c */ -#include +#include #include /* @@ -44,52 +44,16 @@ * parentheses and have some accompanying text comment. */ -#define RB_RED 0 -#define RB_BLACK 1 - -#define __rb_parent(pc) ((struct rb_node *)(pc & ~3)) - -#define __rb_color(pc) ((pc) & 1) -#define __rb_is_black(pc) __rb_color(pc) -#define __rb_is_red(pc) (!__rb_color(pc)) -#define rb_color(rb) __rb_color((rb)->__rb_parent_color) -#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color) -#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) - static inline void rb_set_black(struct rb_node *rb) { rb->__rb_parent_color |= RB_BLACK; } -static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) -{ - rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; -} - -static inline void rb_set_parent_color(struct rb_node *rb, - struct rb_node *p, int color) -{ - rb->__rb_parent_color = (unsigned long)p | color; -} - static inline struct rb_node *rb_red_parent(struct rb_node *red) { return (struct rb_node *)red->__rb_parent_color; } -static inline void -__rb_change_child(struct rb_node *old, struct rb_node *new, - struct rb_node *parent, struct rb_root *root) -{ - if (parent) { - if (parent->rb_left == old) - parent->rb_left = new; - else - parent->rb_right = new; - } else - root->rb_node = new; -} - /* * Helper function for rotations: * - old's parent and color get assigned to new @@ -230,9 +194,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root, } } -static __always_inline void +__always_inline void __rb_erase_color(struct rb_node *parent, struct rb_root *root, - const struct rb_augment_callbacks *augment) + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; @@ -261,7 +225,7 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root, rb_set_parent_color(tmp1, parent, RB_BLACK); __rb_rotate_set_parents(parent, sibling, root, RB_RED); - augment->rotate(parent, sibling); + augment_rotate(parent, sibling); sibling = tmp1; } tmp1 = sibling->rb_right; @@ -313,7 +277,7 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root, if (tmp1) rb_set_parent_color(tmp1, sibling, RB_BLACK); - augment->rotate(sibling, tmp2); + augment_rotate(sibling, tmp2); tmp1 = sibling; sibling = tmp2; } @@ -336,7 +300,7 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root, rb_set_parent(tmp2, parent); __rb_rotate_set_parents(parent, sibling, root, RB_BLACK); - augment->rotate(parent, sibling); + augment_rotate(parent, sibling); break; } else { sibling = parent->rb_left; @@ -347,7 +311,7 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root, rb_set_parent_color(tmp1, parent, RB_BLACK); __rb_rotate_set_parents(parent, sibling, root, RB_RED); - augment->rotate(parent, sibling); + augment_rotate(parent, sibling); sibling = tmp1; } tmp1 = sibling->rb_left; @@ -374,7 +338,7 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root, if (tmp1) rb_set_parent_color(tmp1, sibling, RB_BLACK); - augment->rotate(sibling, tmp2); + augment_rotate(sibling, tmp2); tmp1 = sibling; sibling = tmp2; } @@ -386,109 +350,12 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root, rb_set_parent(tmp2, parent); __rb_rotate_set_parents(parent, sibling, root, RB_BLACK); - augment->rotate(parent, sibling); + augment_rotate(parent, sibling); break; } } } - -static __always_inline void -__rb_erase(struct rb_node *node, struct rb_root *root, - const struct rb_augment_callbacks *augment) -{ - struct rb_node *child = node->rb_right, *tmp = node->rb_left; - struct rb_node *parent, *rebalance; - unsigned long pc; - - if (!tmp) { - /* - * Case 1: node to erase has no more than 1 child (easy!) - * - * Note that if there is one child it must be red due to 5) - * and node must be black due to 4). We adjust colors locally - * so as to bypass __rb_erase_color() later on. - */ - pc = node->__rb_parent_color; - parent = __rb_parent(pc); - __rb_change_child(node, child, parent, root); - if (child) { - child->__rb_parent_color = pc; - rebalance = NULL; - } else - rebalance = __rb_is_black(pc) ? parent : NULL; - tmp = parent; - } else if (!child) { - /* Still case 1, but this time the child is node->rb_left */ - tmp->__rb_parent_color = pc = node->__rb_parent_color; - parent = __rb_parent(pc); - __rb_change_child(node, tmp, parent, root); - rebalance = NULL; - tmp = parent; - } else { - struct rb_node *successor = child, *child2; - tmp = child->rb_left; - if (!tmp) { - /* - * Case 2: node's successor is its right child - * - * (n) (s) - * / \ / \ - * (x) (s) -> (x) (c) - * \ - * (c) - */ - parent = successor; - child2 = successor->rb_right; - augment->copy(node, successor); - } else { - /* - * Case 3: node's successor is leftmost under - * node's right child subtree - * - * (n) (s) - * / \ / \ - * (x) (y) -> (x) (y) - * / / - * (p) (p) - * / / - * (s) (c) - * \ - * (c) - */ - do { - parent = successor; - successor = tmp; - tmp = tmp->rb_left; - } while (tmp); - parent->rb_left = child2 = successor->rb_right; - successor->rb_right = child; - rb_set_parent(child, successor); - augment->copy(node, successor); - augment->propagate(parent, successor); - } - - successor->rb_left = tmp = node->rb_left; - rb_set_parent(tmp, successor); - - pc = node->__rb_parent_color; - tmp = __rb_parent(pc); - __rb_change_child(node, successor, tmp, root); - if (child2) { - successor->__rb_parent_color = pc; - rb_set_parent_color(child2, parent, RB_BLACK); - rebalance = NULL; - } else { - unsigned long pc2 = successor->__rb_parent_color; - successor->__rb_parent_color = pc; - rebalance = __rb_is_black(pc2) ? parent : NULL; - } - tmp = successor; - } - - augment->propagate(tmp, NULL); - if (rebalance) - __rb_erase_color(rebalance, root, augment); -} +EXPORT_SYMBOL(__rb_erase_color); /* * Non-augmented rbtree manipulation functions. @@ -513,7 +380,7 @@ EXPORT_SYMBOL(rb_insert_color); void rb_erase(struct rb_node *node, struct rb_root *root) { - __rb_erase(node, root, &dummy_callbacks); + rb_erase_augmented(node, root, &dummy_callbacks); } EXPORT_SYMBOL(rb_erase); @@ -531,13 +398,6 @@ void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, } EXPORT_SYMBOL(__rb_insert_augmented); -void rb_erase_augmented(struct rb_node *node, struct rb_root *root, - const struct rb_augment_callbacks *augment) -{ - __rb_erase(node, root, augment); -} -EXPORT_SYMBOL(rb_erase_augmented); - /* * This function returns the first node (in sort order) of the tree. */ diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index b20e99969b0f..268b23951fec 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c @@ -1,5 +1,5 @@ #include -#include +#include #include #include -- cgit From 9826a516ff77c5820e591211e4f3e58ff36f46be Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:35 -0700 Subject: mm: interval tree updates Update the generic interval tree code that was introduced in "mm: replace vma prio_tree with an interval tree". Changes: - fixed 'endpoing' typo noticed by Andrew Morton - replaced include/linux/interval_tree_tmpl.h, which was used as a template (including it automatically defined the interval tree functions) with include/linux/interval_tree_generic.h, which only defines a preprocessor macro INTERVAL_TREE_DEFINE(), which itself defines the interval tree functions when invoked. Now that is a very long macro which is unfortunate, but it does make the usage sites (lib/interval_tree.c and mm/interval_tree.c) a bit nicer than previously. - make use of RB_DECLARE_CALLBACKS() in the INTERVAL_TREE_DEFINE() macro, instead of duplicating that code in the interval tree template. - replaced vma_interval_tree_add(), which was actually handling the nonlinear and interval tree cases, with vma_interval_tree_insert_after() which handles only the interval tree case and has an API that is more consistent with the other interval tree handling functions. The nonlinear case is now handled explicitly in kernel/fork.c dup_mmap(). Signed-off-by: Michel Lespinasse Cc: Andrea Arcangeli Cc: Rik van Riel Cc: Peter Zijlstra Cc: Daniel Santos Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/interval_tree.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/lib/interval_tree.c b/lib/interval_tree.c index 77a793e0644b..e6eb406f2d65 100644 --- a/lib/interval_tree.c +++ b/lib/interval_tree.c @@ -1,13 +1,10 @@ #include #include +#include -#define ITSTRUCT struct interval_tree_node -#define ITRB rb -#define ITTYPE unsigned long -#define ITSUBTREE __subtree_last -#define ITSTART(n) ((n)->start) -#define ITLAST(n) ((n)->last) -#define ITSTATIC -#define ITPREFIX interval_tree +#define START(node) ((node)->start) +#define LAST(node) ((node)->last) -#include +INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, + unsigned long, __subtree_last, + START, LAST,, interval_tree) -- cgit From ed8ea8150182f8d715fceb3b175ef0a9ebacd872 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:45 -0700 Subject: mm: add CONFIG_DEBUG_VM_RB build option Add a CONFIG_DEBUG_VM_RB build option for the previously existing DEBUG_MM_RB code. Now that Andi Kleen modified it to avoid using recursive algorithms, we can expose it a bit more. Also extend this code to validate_mm() after stack expansion, and to check that the vma's start and last pgoffs have not changed since the nodes were inserted on the anon vma interval tree (as it is important that the nodes be reindexed after each such update). Signed-off-by: Michel Lespinasse Cc: Andrea Arcangeli Cc: Rik van Riel Cc: Peter Zijlstra Cc: Daniel Santos Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a6e7e7741523..28e9d6c98941 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -798,6 +798,15 @@ config DEBUG_VM If unsure, say N. +config DEBUG_VM_RB + bool "Debug VM red-black trees" + depends on DEBUG_VM + help + Enable this to turn on more extended checks in the virtual-memory + system that may impact performance. + + If unsure, say N. + config DEBUG_VIRTUAL bool "Debug VM translations" depends on DEBUG_KERNEL && X86 -- cgit