summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/x86/include/asm/cpumask.h2
-rw-r--r--drivers/android/binder/process.rs64
-rw-r--r--drivers/clk/at91/clk-peripheral.c1
-rw-r--r--drivers/clk/at91/pmc.h3
-rw-r--r--drivers/clk/renesas/clk-div6.c6
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c15
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c9
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c8
-rw-r--r--drivers/edac/ie31200_edac.c4
-rw-r--r--drivers/gpio/gpio-aspeed.c5
-rw-r--r--drivers/iio/dac/ad3530r.c3
-rw-r--r--drivers/iio/temperature/mlx90614.c5
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c4
-rw-r--r--drivers/soc/renesas/renesas-soc.c4
-rw-r--r--drivers/soc/renesas/rz-sysc.c3
-rw-r--r--include/linux/bitfield.h95
-rw-r--r--include/linux/cpumask.h10
-rw-r--r--include/linux/nodemask.h9
-rw-r--r--lib/hweight.c4
-rw-r--r--rust/kernel/bitmap.rs43
-rw-r--r--rust/kernel/id_pool.rs141
-rw-r--r--sound/usb/mixer_quirks.c4
23 files changed, 302 insertions, 141 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 05db8b2df438..fdc7368de809 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4432,6 +4432,7 @@ F: arch/*/lib/bitops.c
F: include/asm-generic/bitops
F: include/asm-generic/bitops.h
F: include/linux/bitops.h
+F: lib/hweight.c
F: lib/test_bitops.c
F: tools/*/bitops*
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
index 70f6b60ad67b..9df9e9cde670 100644
--- a/arch/x86/include/asm/cpumask.h
+++ b/arch/x86/include/asm/cpumask.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_CPUMASK_H
#define _ASM_X86_CPUMASK_H
#ifndef __ASSEMBLER__
+
+#include <linux/compiler.h>
#include <linux/cpumask.h>
extern void setup_cpu_local_masks(void);
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
index e5237e9ec552..ac981614544e 100644
--- a/drivers/android/binder/process.rs
+++ b/drivers/android/binder/process.rs
@@ -19,6 +19,7 @@ use kernel::{
cred::Credential,
error::Error,
fs::file::{self, File},
+ id_pool::IdPool,
list::{List, ListArc, ListArcField, ListLinks},
mm,
prelude::*,
@@ -394,6 +395,8 @@ kernel::list::impl_list_item! {
struct ProcessNodeRefs {
/// Used to look up nodes using the 32-bit id that this process knows it by.
by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
+ /// Used to quickly find unused ids in `by_handle`.
+ handle_is_present: IdPool,
/// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
/// the underlying `Node` struct as returned by `Node::global_id`.
by_node: RBTree<usize, u32>,
@@ -408,6 +411,7 @@ impl ProcessNodeRefs {
fn new() -> Self {
Self {
by_handle: RBTree::new(),
+ handle_is_present: IdPool::new(),
by_node: RBTree::new(),
freeze_listeners: RBTree::new(),
}
@@ -802,7 +806,7 @@ impl Process {
pub(crate) fn insert_or_update_handle(
self: ArcBorrow<'_, Process>,
node_ref: NodeRef,
- is_mananger: bool,
+ is_manager: bool,
) -> Result<u32> {
{
let mut refs = self.node_refs.lock();
@@ -821,7 +825,33 @@ impl Process {
let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
let info = UniqueArc::new_uninit(GFP_KERNEL)?;
- let mut refs = self.node_refs.lock();
+ let mut refs_lock = self.node_refs.lock();
+ let mut refs = &mut *refs_lock;
+
+ let (unused_id, by_handle_slot) = loop {
+ // ID 0 may only be used by the manager.
+ let start = if is_manager { 0 } else { 1 };
+
+ if let Some(res) = refs.handle_is_present.find_unused_id(start) {
+ match refs.by_handle.entry(res.as_u32()) {
+ rbtree::Entry::Vacant(entry) => break (res, entry),
+ rbtree::Entry::Occupied(_) => {
+ pr_err!("Detected mismatch between handle_is_present and by_handle");
+ res.acquire();
+ kernel::warn_on!(true);
+ return Err(EINVAL);
+ }
+ }
+ }
+
+ let grow_request = refs.handle_is_present.grow_request().ok_or(ENOMEM)?;
+ drop(refs_lock);
+ let resizer = grow_request.realloc(GFP_KERNEL)?;
+ refs_lock = self.node_refs.lock();
+ refs = &mut *refs_lock;
+ refs.handle_is_present.grow(resizer);
+ };
+ let handle = unused_id.as_u32();
// Do a lookup again as node may have been inserted before the lock was reacquired.
if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
@@ -831,20 +861,9 @@ impl Process {
return Ok(handle);
}
- // Find id.
- let mut target: u32 = if is_mananger { 0 } else { 1 };
- for handle in refs.by_handle.keys() {
- if *handle > target {
- break;
- }
- if *handle == target {
- target = target.checked_add(1).ok_or(ENOMEM)?;
- }
- }
-
let gid = node_ref.node.global_id();
let (info_proc, info_node) = {
- let info_init = NodeRefInfo::new(node_ref, target, self.into());
+ let info_init = NodeRefInfo::new(node_ref, handle, self.into());
match info.pin_init_with(info_init) {
Ok(info) => ListArc::pair_from_pin_unique(info),
// error is infallible
@@ -865,9 +884,10 @@ impl Process {
// `info_node` into the right node's `refs` list.
unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
- refs.by_node.insert(reserve1.into_node(gid, target));
- refs.by_handle.insert(reserve2.into_node(target, info_proc));
- Ok(target)
+ refs.by_node.insert(reserve1.into_node(gid, handle));
+ by_handle_slot.insert(info_proc, reserve2);
+ unused_id.acquire();
+ Ok(handle)
}
pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
@@ -932,6 +952,16 @@ impl Process {
let id = info.node_ref().node.global_id();
refs.by_handle.remove(&handle);
refs.by_node.remove(&id);
+ refs.handle_is_present.release_id(handle as usize);
+
+ if let Some(shrink) = refs.handle_is_present.shrink_request() {
+ drop(refs);
+ // This intentionally ignores allocation failures.
+ if let Ok(new_bitmap) = shrink.realloc(GFP_KERNEL) {
+ refs = self.node_refs.lock();
+ refs.handle_is_present.shrink(new_bitmap);
+ }
+ }
}
} else {
// All refs are cleared in process exit, so this warning is expected in that case.
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index e700f40fd87f..e7208c47268b 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -3,6 +3,7 @@
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 5daa32c4cf25..543d7aee8d24 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -117,9 +117,6 @@ struct at91_clk_pms {
unsigned int parent;
};
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
#define ndck(a, s) (a[s - 1].id + 1)
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 3abd6e5400ad..f7b827b5e9b2 100644
--- a/drivers/clk/renesas/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -7,6 +7,7 @@
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -171,8 +172,7 @@ static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
if (clock->src_mask == 0)
return 0;
- hw_index = (readl(clock->reg) & clock->src_mask) >>
- __ffs(clock->src_mask);
+ hw_index = field_get(clock->src_mask, readl(clock->reg));
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
if (clock->parents[i] == hw_index)
return i;
@@ -191,7 +191,7 @@ static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
if (index >= clk_hw_get_num_parents(hw))
return -EINVAL;
- src = clock->parents[index] << __ffs(clock->src_mask);
+ src = field_prep(clock->src_mask, clock->parents[index]);
writel((readl(clock->reg) & ~clock->src_mask) | src, clock->reg);
return 0;
}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 10ae20489df9..b954278ddd9d 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -54,10 +54,8 @@ static unsigned long cpg_pll_clk_recalc_rate(struct clk_hw *hw,
{
struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
unsigned int mult;
- u32 val;
- val = readl(pll_clk->pllcr_reg) & CPG_PLLnCR_STC_MASK;
- mult = (val >> __ffs(CPG_PLLnCR_STC_MASK)) + 1;
+ mult = FIELD_GET(CPG_PLLnCR_STC_MASK, readl(pll_clk->pllcr_reg)) + 1;
return parent_rate * mult * pll_clk->fixed_mult;
}
@@ -94,7 +92,7 @@ static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
val = readl(pll_clk->pllcr_reg);
val &= ~CPG_PLLnCR_STC_MASK;
- val |= (mult - 1) << __ffs(CPG_PLLnCR_STC_MASK);
+ val |= FIELD_PREP(CPG_PLLnCR_STC_MASK, mult - 1);
writel(val, pll_clk->pllcr_reg);
for (i = 1000; i; i--) {
@@ -176,11 +174,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val;
-
- val = readl(zclk->reg) & zclk->mask;
- mult = 32 - (val >> __ffs(zclk->mask));
+ unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
32 * zclk->fixed_div);
@@ -231,7 +225,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+ cpg_reg_modify(zclk->reg, zclk->mask,
+ field_prep(zclk->mask, 32 - mult));
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index fb9a876aaba5..db3a0b8ef2b9 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -279,11 +279,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val;
-
- val = readl(zclk->reg) & zclk->mask;
- mult = 32 - (val >> __ffs(zclk->mask));
+ unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
32 * zclk->fixed_div);
@@ -334,7 +330,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+ cpg_reg_modify(zclk->reg, zclk->mask,
+ field_prep(zclk->mask, 32 - mult));
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
index 69295a9ddf0a..4ccc94ed9493 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
@@ -1,18 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2025 Intel Corporation */
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/sprintf.h>
#include <linux/string_helpers.h>
#include "adf_pm_dbgfs_utils.h"
-/*
- * This is needed because a variable is used to index the mask at
- * pm_scnprint_table(), making it not compile time constant, so the compile
- * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
- */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
#define PM_INFO_MAX_KEY_LEN 21
static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 8d4ddaa85ae8..eaab6af143e1 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -44,6 +44,7 @@
* but lo_hi_readq() ensures that we are safe across all e3-1200 processors.
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -139,9 +140,6 @@
#define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1)
-/* Non-constant mask variant of FIELD_GET() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
static int nr_channels;
static struct pci_dev *mci_pdev;
static int ie31200_registered = 1;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 2e0ae953dd99..cbdf781994dc 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -5,6 +5,7 @@
* Joel Stanley <joel@jms.id.au>
*/
+#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/gpio/aspeed.h>
@@ -30,10 +31,6 @@
*/
#include <linux/gpio/consumer.h>
-/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
#define GPIO_G7_IRQ_STS_BASE 0x100
#define GPIO_G7_IRQ_STS_OFFSET(x) (GPIO_G7_IRQ_STS_BASE + (x) * 0x4)
#define GPIO_G7_CTRL_REG_BASE 0x180
diff --git a/drivers/iio/dac/ad3530r.c b/drivers/iio/dac/ad3530r.c
index 6134613777b8..b97b46090d80 100644
--- a/drivers/iio/dac/ad3530r.c
+++ b/drivers/iio/dac/ad3530r.c
@@ -53,9 +53,6 @@
#define AD3530R_MAX_CHANNELS 8
#define AD3531R_MAX_CHANNELS 4
-/* Non-constant mask variant of FIELD_PREP() */
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
enum ad3530r_mode {
AD3530R_NORMAL_OP,
AD3530R_POWERDOWN_1K,
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 8a44a00bfd5e..1ad21b73e1b4 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -22,6 +22,7 @@
* the "wakeup" GPIO is not given, power management will be disabled.
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
@@ -68,10 +69,6 @@
#define MLX90614_CONST_SCALE 20 /* Scale in milliKelvin (0.02 * 1000) */
#define MLX90614_CONST_FIR 0x7 /* Fixed value for FIR part of low pass filter */
-/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
struct mlx_chip_info {
/* EEPROM offsets with 16-bit data, MSB first */
/* emissivity correction coefficient */
diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35.c b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
index cdad01d68a37..8d71dc53cc1d 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-ma35.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
@@ -81,10 +81,6 @@
#define MVOLT_1800 0
#define MVOLT_3300 1
-/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
static const char * const gpio_group_name[] = {
"gpioa", "gpiob", "gpioc", "gpiod", "gpioe", "gpiof", "gpiog",
"gpioh", "gpioi", "gpioj", "gpiok", "gpiol", "gpiom", "gpion",
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 1eb52356b996..ee4f17bb4db4 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -5,6 +5,7 @@
* Copyright (C) 2014-2016 Glider bvba
*/
+#include <linux/bitfield.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -524,8 +525,7 @@ static int __init renesas_soc_init(void)
eshi, eslo);
}
- if (soc->id &&
- ((product & id->mask) >> __ffs(id->mask)) != soc->id) {
+ if (soc->id && field_get(id->mask, product) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n", product);
ret = -ENODEV;
goto free_soc_dev_attr;
diff --git a/drivers/soc/renesas/rz-sysc.c b/drivers/soc/renesas/rz-sysc.c
index 19c1e666279b..ae727d9c8cc5 100644
--- a/drivers/soc/renesas/rz-sysc.c
+++ b/drivers/soc/renesas/rz-sysc.c
@@ -5,6 +5,7 @@
* Copyright (C) 2024 Renesas Electronics Corp.
*/
+#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
@@ -16,8 +17,6 @@
#include "rz-sysc.h"
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
/**
* struct rz_sysc - RZ SYSC private data structure
* @base: SYSC base address
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 5355f8f806a9..126dc5b380af 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -17,6 +17,7 @@
* FIELD_{GET,PREP} macros take as first parameter shifted mask
* from which they extract the base mask and shift amount.
* Mask must be a compilation time constant.
+ * field_{get,prep} are variants that take a non-const mask.
*
* Example:
*
@@ -60,7 +61,7 @@
#define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x))
-#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
+#define __BF_FIELD_CHECK_MASK(_mask, _val, _pfx) \
({ \
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
_pfx "mask is not constant"); \
@@ -69,13 +70,33 @@
~((_mask) >> __bf_shf(_mask)) & \
(0 + (_val)) : 0, \
_pfx "value too large for the field"); \
- BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \
- __bf_cast_unsigned(_reg, ~0ull), \
- _pfx "type of reg too small for mask"); \
__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
(1ULL << __bf_shf(_mask))); \
})
+#define __BF_FIELD_CHECK_REG(mask, reg, pfx) \
+ BUILD_BUG_ON_MSG(__bf_cast_unsigned(mask, mask) > \
+ __bf_cast_unsigned(reg, ~0ull), \
+ pfx "type of reg too small for mask")
+
+#define __BF_FIELD_CHECK(mask, reg, val, pfx) \
+ ({ \
+ __BF_FIELD_CHECK_MASK(mask, val, pfx); \
+ __BF_FIELD_CHECK_REG(mask, reg, pfx); \
+ })
+
+#define __FIELD_PREP(mask, val, pfx) \
+ ({ \
+ __BF_FIELD_CHECK_MASK(mask, val, pfx); \
+ ((typeof(mask))(val) << __bf_shf(mask)) & (mask); \
+ })
+
+#define __FIELD_GET(mask, reg, pfx) \
+ ({ \
+ __BF_FIELD_CHECK_MASK(mask, 0U, pfx); \
+ (typeof(mask))(((reg) & (mask)) >> __bf_shf(mask)); \
+ })
+
/**
* FIELD_MAX() - produce the maximum value representable by a field
* @_mask: shifted mask defining the field's length and position
@@ -112,8 +133,8 @@
*/
#define FIELD_PREP(_mask, _val) \
({ \
- __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
- ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
+ __BF_FIELD_CHECK_REG(_mask, 0ULL, "FIELD_PREP: "); \
+ __FIELD_PREP(_mask, _val, "FIELD_PREP: "); \
})
#define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0)
@@ -152,8 +173,8 @@
*/
#define FIELD_GET(_mask, _reg) \
({ \
- __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
- (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
+ __BF_FIELD_CHECK_REG(_mask, _reg, "FIELD_GET: "); \
+ __FIELD_GET(_mask, _reg, "FIELD_GET: "); \
})
/**
@@ -220,4 +241,62 @@ __MAKE_OP(64)
#undef __MAKE_OP
#undef ____MAKE_OP
+#define __field_prep(mask, val) \
+ ({ \
+ __auto_type __mask = (mask); \
+ typeof(__mask) __val = (val); \
+ unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
+ __ffs(__mask) : __ffs64(__mask); \
+ (__val << __shift) & __mask; \
+ })
+
+#define __field_get(mask, reg) \
+ ({ \
+ __auto_type __mask = (mask); \
+ typeof(__mask) __reg = (reg); \
+ unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
+ __ffs(__mask) : __ffs64(__mask); \
+ (__reg & __mask) >> __shift; \
+ })
+
+/**
+ * field_prep() - prepare a bitfield element
+ * @mask: shifted mask defining the field's length and position, must be
+ * non-zero
+ * @val: value to put in the field
+ *
+ * Return: field value masked and shifted to its final destination
+ *
+ * field_prep() masks and shifts up the value. The result should be
+ * combined with other fields of the bitfield using logical OR.
+ * Unlike FIELD_PREP(), @mask is not limited to a compile-time constant.
+ * Typical usage patterns are a value stored in a table, or calculated by
+ * shifting a constant by a variable number of bits.
+ * If you want to ensure that @mask is a compile-time constant, please use
+ * FIELD_PREP() directly instead.
+ */
+#define field_prep(mask, val) \
+ (__builtin_constant_p(mask) ? __FIELD_PREP(mask, val, "field_prep: ") \
+ : __field_prep(mask, val))
+
+/**
+ * field_get() - extract a bitfield element
+ * @mask: shifted mask defining the field's length and position, must be
+ * non-zero
+ * @reg: value of entire bitfield
+ *
+ * Return: extracted field value
+ *
+ * field_get() extracts the field specified by @mask from the
+ * bitfield passed in as @reg by masking and shifting it down.
+ * Unlike FIELD_GET(), @mask is not limited to a compile-time constant.
+ * Typical usage patterns are a value stored in a table, or calculated by
+ * shifting a constant by a variable number of bits.
+ * If you want to ensure that @mask is a compile-time constant, please use
+ * FIELD_GET() directly instead.
+ */
+#define field_get(mask, reg) \
+ (__builtin_constant_p(mask) ? __FIELD_GET(mask, reg, "field_get: ") \
+ : __field_get(mask, reg))
+
#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index afedfd5bea07..80211900f373 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -7,14 +7,16 @@
* set of CPUs in a system, one bit position per CPU number. In general,
* only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
-#include <linux/cleanup.h>
-#include <linux/kernel.h>
+#include <linux/atomic.h>
#include <linux/bitmap.h>
+#include <linux/cleanup.h>
#include <linux/cpumask_types.h>
-#include <linux/atomic.h>
-#include <linux/bug.h>
#include <linux/gfp_types.h>
#include <linux/numa.h>
+#include <linux/threads.h>
+#include <linux/types.h>
+
+#include <asm/bug.h>
/**
* cpumask_pr_args - printf args to output a cpumask
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 7ad1f5c7407e..bd38648c998d 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -245,18 +245,18 @@ static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int n
}
/* FIXME: better would be to fix all architectures to never return
- > MAX_NUMNODES, then the silly min_ts could be dropped. */
+ > MAX_NUMNODES, then the silly min()s could be dropped. */
#define first_node(src) __first_node(&(src))
static __always_inline unsigned int __first_node(const nodemask_t *srcp)
{
- return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
+ return min(MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
}
#define next_node(n, src) __next_node((n), &(src))
static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp)
{
- return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
+ return min(MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
/*
@@ -293,8 +293,7 @@ static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node)
#define first_unset_node(mask) __first_unset_node(&(mask))
static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp)
{
- return min_t(unsigned int, MAX_NUMNODES,
- find_first_zero_bit(maskp->bits, MAX_NUMNODES));
+ return min(MAX_NUMNODES, find_first_zero_bit(maskp->bits, MAX_NUMNODES));
}
#define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES)
diff --git a/lib/hweight.c b/lib/hweight.c
index c94586b62551..0dfcafc3fd39 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -4,8 +4,8 @@
#include <asm/types.h>
/**
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
+ * DOC: __sw_hweightN - returns the hamming weight of a N-bit word
+ * @w: the word to weigh
*
* The Hamming Weight of a number is the total number of bits set in it.
*/
diff --git a/rust/kernel/bitmap.rs b/rust/kernel/bitmap.rs
index aa8fc7bf06fc..83d7dea99137 100644
--- a/rust/kernel/bitmap.rs
+++ b/rust/kernel/bitmap.rs
@@ -12,8 +12,6 @@ use crate::bindings;
use crate::pr_err;
use core::ptr::NonNull;
-const BITS_PER_LONG: usize = bindings::BITS_PER_LONG as usize;
-
/// Represents a C bitmap. Wraps underlying C bitmap API.
///
/// # Invariants
@@ -149,14 +147,14 @@ macro_rules! bitmap_assert_return {
///
/// # Invariants
///
-/// * `nbits` is `<= i32::MAX` and never changes.
-/// * if `nbits <= bindings::BITS_PER_LONG`, then `repr` is a `usize`.
+/// * `nbits` is `<= MAX_LEN`.
+/// * if `nbits <= MAX_INLINE_LEN`, then `repr` is a `usize`.
/// * otherwise, `repr` holds a non-null pointer to an initialized
/// array of `unsigned long` that is large enough to hold `nbits` bits.
pub struct BitmapVec {
/// Representation of bitmap.
repr: BitmapRepr,
- /// Length of this bitmap. Must be `<= i32::MAX`.
+ /// Length of this bitmap. Must be `<= MAX_LEN`.
nbits: usize,
}
@@ -164,7 +162,7 @@ impl core::ops::Deref for BitmapVec {
type Target = Bitmap;
fn deref(&self) -> &Bitmap {
- let ptr = if self.nbits <= BITS_PER_LONG {
+ let ptr = if self.nbits <= BitmapVec::MAX_INLINE_LEN {
// SAFETY: Bitmap is represented inline.
#[allow(unused_unsafe, reason = "Safe since Rust 1.92.0")]
unsafe {
@@ -183,7 +181,7 @@ impl core::ops::Deref for BitmapVec {
impl core::ops::DerefMut for BitmapVec {
fn deref_mut(&mut self) -> &mut Bitmap {
- let ptr = if self.nbits <= BITS_PER_LONG {
+ let ptr = if self.nbits <= BitmapVec::MAX_INLINE_LEN {
// SAFETY: Bitmap is represented inline.
#[allow(unused_unsafe, reason = "Safe since Rust 1.92.0")]
unsafe {
@@ -213,7 +211,7 @@ unsafe impl Sync for BitmapVec {}
impl Drop for BitmapVec {
fn drop(&mut self) {
- if self.nbits <= BITS_PER_LONG {
+ if self.nbits <= BitmapVec::MAX_INLINE_LEN {
return;
}
// SAFETY: `self.ptr` was returned by the C `bitmap_zalloc`.
@@ -226,23 +224,39 @@ impl Drop for BitmapVec {
}
impl BitmapVec {
+ /// The maximum possible length of a `BitmapVec`.
+ pub const MAX_LEN: usize = i32::MAX as usize;
+
+ /// The maximum length that uses the inline representation.
+ pub const MAX_INLINE_LEN: usize = usize::BITS as usize;
+
+ /// Construct a longest possible inline [`BitmapVec`].
+ #[inline]
+ pub fn new_inline() -> Self {
+ // INVARIANT: `nbits <= MAX_INLINE_LEN`, so an inline bitmap is the right repr.
+ BitmapVec {
+ repr: BitmapRepr { bitmap: 0 },
+ nbits: BitmapVec::MAX_INLINE_LEN,
+ }
+ }
+
/// Constructs a new [`BitmapVec`].
///
/// Fails with [`AllocError`] when the [`BitmapVec`] could not be allocated. This
- /// includes the case when `nbits` is greater than `i32::MAX`.
+ /// includes the case when `nbits` is greater than `MAX_LEN`.
#[inline]
pub fn new(nbits: usize, flags: Flags) -> Result<Self, AllocError> {
- if nbits <= BITS_PER_LONG {
+ if nbits <= BitmapVec::MAX_INLINE_LEN {
return Ok(BitmapVec {
repr: BitmapRepr { bitmap: 0 },
nbits,
});
}
- if nbits > i32::MAX.try_into().unwrap() {
+ if nbits > Self::MAX_LEN {
return Err(AllocError);
}
let nbits_u32 = u32::try_from(nbits).unwrap();
- // SAFETY: `BITS_PER_LONG < nbits` and `nbits <= i32::MAX`.
+ // SAFETY: `MAX_INLINE_LEN < nbits` and `nbits <= MAX_LEN`.
let ptr = unsafe { bindings::bitmap_zalloc(nbits_u32, flags.as_raw()) };
let ptr = NonNull::new(ptr).ok_or(AllocError)?;
// INVARIANT: `ptr` returned by C `bitmap_zalloc` and `nbits` checked.
@@ -495,9 +509,10 @@ mod tests {
#[test]
fn bitmap_borrow() {
let fake_bitmap: [usize; 2] = [0, 0];
+ let fake_bitmap_len = 2 * usize::BITS as usize;
// SAFETY: `fake_c_bitmap` is an array of expected length.
- let b = unsafe { Bitmap::from_raw(fake_bitmap.as_ptr(), 2 * BITS_PER_LONG) };
- assert_eq!(2 * BITS_PER_LONG, b.len());
+ let b = unsafe { Bitmap::from_raw(fake_bitmap.as_ptr(), fake_bitmap_len) };
+ assert_eq!(fake_bitmap_len, b.len());
assert_eq!(None, b.next_bit(0));
}
diff --git a/rust/kernel/id_pool.rs b/rust/kernel/id_pool.rs
index a41a3404213c..384753fe0e44 100644
--- a/rust/kernel/id_pool.rs
+++ b/rust/kernel/id_pool.rs
@@ -7,8 +7,6 @@
use crate::alloc::{AllocError, Flags};
use crate::bitmap::BitmapVec;
-const BITS_PER_LONG: usize = bindings::BITS_PER_LONG as usize;
-
/// Represents a dynamic ID pool backed by a [`BitmapVec`].
///
/// Clients acquire and release IDs from unset bits in a bitmap.
@@ -25,22 +23,22 @@ const BITS_PER_LONG: usize = bindings::BITS_PER_LONG as usize;
/// Basic usage
///
/// ```
-/// use kernel::alloc::{AllocError, flags::GFP_KERNEL};
-/// use kernel::id_pool::IdPool;
+/// use kernel::alloc::AllocError;
+/// use kernel::id_pool::{IdPool, UnusedId};
///
-/// let mut pool = IdPool::new(64, GFP_KERNEL)?;
+/// let mut pool = IdPool::with_capacity(64, GFP_KERNEL)?;
/// for i in 0..64 {
-/// assert_eq!(i, pool.acquire_next_id(i).ok_or(ENOSPC)?);
+/// assert_eq!(i, pool.find_unused_id(i).ok_or(ENOSPC)?.acquire());
/// }
///
/// pool.release_id(23);
-/// assert_eq!(23, pool.acquire_next_id(0).ok_or(ENOSPC)?);
+/// assert_eq!(23, pool.find_unused_id(0).ok_or(ENOSPC)?.acquire());
///
-/// assert_eq!(None, pool.acquire_next_id(0)); // time to realloc.
+/// assert!(pool.find_unused_id(0).is_none()); // time to realloc.
/// let resizer = pool.grow_request().ok_or(ENOSPC)?.realloc(GFP_KERNEL)?;
/// pool.grow(resizer);
///
-/// assert_eq!(pool.acquire_next_id(0), Some(64));
+/// assert_eq!(pool.find_unused_id(0).ok_or(ENOSPC)?.acquire(), 64);
/// # Ok::<(), Error>(())
/// ```
///
@@ -54,8 +52,8 @@ const BITS_PER_LONG: usize = bindings::BITS_PER_LONG as usize;
/// fn get_id_maybe_realloc(guarded_pool: &SpinLock<IdPool>) -> Result<usize, AllocError> {
/// let mut pool = guarded_pool.lock();
/// loop {
-/// match pool.acquire_next_id(0) {
-/// Some(index) => return Ok(index),
+/// match pool.find_unused_id(0) {
+/// Some(index) => return Ok(index.acquire()),
/// None => {
/// let alloc_request = pool.grow_request();
/// drop(pool);
@@ -97,13 +95,24 @@ impl ReallocRequest {
impl IdPool {
/// Constructs a new [`IdPool`].
///
- /// A capacity below [`BITS_PER_LONG`] is adjusted to
- /// [`BITS_PER_LONG`].
+ /// The pool will have a capacity of [`MAX_INLINE_LEN`].
+ ///
+ /// [`MAX_INLINE_LEN`]: BitmapVec::MAX_INLINE_LEN
+ #[inline]
+ pub fn new() -> Self {
+ Self {
+ map: BitmapVec::new_inline(),
+ }
+ }
+
+ /// Constructs a new [`IdPool`] with space for a specific number of bits.
+ ///
+ /// A capacity below [`MAX_INLINE_LEN`] is adjusted to [`MAX_INLINE_LEN`].
///
- /// [`BITS_PER_LONG`]: srctree/include/asm-generic/bitsperlong.h
+ /// [`MAX_INLINE_LEN`]: BitmapVec::MAX_INLINE_LEN
#[inline]
- pub fn new(num_ids: usize, flags: Flags) -> Result<Self, AllocError> {
- let num_ids = core::cmp::max(num_ids, BITS_PER_LONG);
+ pub fn with_capacity(num_ids: usize, flags: Flags) -> Result<Self, AllocError> {
+ let num_ids = usize::max(num_ids, BitmapVec::MAX_INLINE_LEN);
let map = BitmapVec::new(num_ids, flags)?;
Ok(Self { map })
}
@@ -116,28 +125,34 @@ impl IdPool {
/// Returns a [`ReallocRequest`] if the [`IdPool`] can be shrunk, [`None`] otherwise.
///
- /// The capacity of an [`IdPool`] cannot be shrunk below [`BITS_PER_LONG`].
+ /// The capacity of an [`IdPool`] cannot be shrunk below [`MAX_INLINE_LEN`].
///
- /// [`BITS_PER_LONG`]: srctree/include/asm-generic/bitsperlong.h
+ /// [`MAX_INLINE_LEN`]: BitmapVec::MAX_INLINE_LEN
///
/// # Examples
///
/// ```
- /// use kernel::alloc::{AllocError, flags::GFP_KERNEL};
- /// use kernel::id_pool::{ReallocRequest, IdPool};
+ /// use kernel::{
+ /// alloc::AllocError,
+ /// bitmap::BitmapVec,
+ /// id_pool::{
+ /// IdPool,
+ /// ReallocRequest,
+ /// },
+ /// };
///
- /// let mut pool = IdPool::new(1024, GFP_KERNEL)?;
+ /// let mut pool = IdPool::with_capacity(1024, GFP_KERNEL)?;
/// let alloc_request = pool.shrink_request().ok_or(AllocError)?;
/// let resizer = alloc_request.realloc(GFP_KERNEL)?;
/// pool.shrink(resizer);
- /// assert_eq!(pool.capacity(), kernel::bindings::BITS_PER_LONG as usize);
+ /// assert_eq!(pool.capacity(), BitmapVec::MAX_INLINE_LEN);
/// # Ok::<(), AllocError>(())
/// ```
#[inline]
pub fn shrink_request(&self) -> Option<ReallocRequest> {
let cap = self.capacity();
- // Shrinking below [`BITS_PER_LONG`] is never possible.
- if cap <= BITS_PER_LONG {
+ // Shrinking below `MAX_INLINE_LEN` is never possible.
+ if cap <= BitmapVec::MAX_INLINE_LEN {
return None;
}
// Determine if the bitmap can shrink based on the position of
@@ -146,13 +161,13 @@ impl IdPool {
// bitmap should shrink to half its current size.
let Some(bit) = self.map.last_bit() else {
return Some(ReallocRequest {
- num_ids: BITS_PER_LONG,
+ num_ids: BitmapVec::MAX_INLINE_LEN,
});
};
if bit >= (cap / 4) {
return None;
}
- let num_ids = usize::max(BITS_PER_LONG, cap / 2);
+ let num_ids = usize::max(BitmapVec::MAX_INLINE_LEN, cap / 2);
Some(ReallocRequest { num_ids })
}
@@ -177,11 +192,13 @@ impl IdPool {
/// Returns a [`ReallocRequest`] for growing this [`IdPool`], if possible.
///
- /// The capacity of an [`IdPool`] cannot be grown above [`i32::MAX`].
+ /// The capacity of an [`IdPool`] cannot be grown above [`MAX_LEN`].
+ ///
+ /// [`MAX_LEN`]: BitmapVec::MAX_LEN
#[inline]
pub fn grow_request(&self) -> Option<ReallocRequest> {
let num_ids = self.capacity() * 2;
- if num_ids > i32::MAX.try_into().unwrap() {
+ if num_ids > BitmapVec::MAX_LEN {
return None;
}
Some(ReallocRequest { num_ids })
@@ -204,18 +221,18 @@ impl IdPool {
self.map = resizer.new;
}
- /// Acquires a new ID by finding and setting the next zero bit in the
- /// bitmap.
+ /// Finds an unused ID in the bitmap.
///
/// Upon success, returns its index. Otherwise, returns [`None`]
/// to indicate that a [`Self::grow_request`] is needed.
#[inline]
- pub fn acquire_next_id(&mut self, offset: usize) -> Option<usize> {
- let next_zero_bit = self.map.next_zero_bit(offset);
- if let Some(nr) = next_zero_bit {
- self.map.set_bit(nr);
- }
- next_zero_bit
+ #[must_use]
+ pub fn find_unused_id(&mut self, offset: usize) -> Option<UnusedId<'_>> {
+ // INVARIANT: `next_zero_bit()` returns None or an integer less than `map.len()`
+ Some(UnusedId {
+ id: self.map.next_zero_bit(offset)?,
+ pool: self,
+ })
}
/// Releases an ID.
@@ -224,3 +241,55 @@ impl IdPool {
self.map.clear_bit(id);
}
}
+
+/// Represents an unused id in an [`IdPool`].
+///
+/// # Invariants
+///
+/// The value of `id` is less than `pool.map.len()`.
+pub struct UnusedId<'pool> {
+ id: usize,
+ pool: &'pool mut IdPool,
+}
+
+impl<'pool> UnusedId<'pool> {
+ /// Get the unused id as an usize.
+ ///
+ /// Be aware that the id has not yet been acquired in the pool. The
+ /// [`acquire`] method must be called to prevent others from taking the id.
+ ///
+ /// [`acquire`]: UnusedId::acquire()
+ #[inline]
+ #[must_use]
+ pub fn as_usize(&self) -> usize {
+ self.id
+ }
+
+ /// Get the unused id as an u32.
+ ///
+ /// Be aware that the id has not yet been acquired in the pool. The
+ /// [`acquire`] method must be called to prevent others from taking the id.
+ ///
+ /// [`acquire`]: UnusedId::acquire()
+ #[inline]
+ #[must_use]
+ pub fn as_u32(&self) -> u32 {
+ // CAST: By the type invariants:
+ // `self.id < pool.map.len() <= BitmapVec::MAX_LEN = i32::MAX`.
+ self.id as u32
+ }
+
+ /// Acquire the unused id.
+ #[inline]
+ pub fn acquire(self) -> usize {
+ self.pool.map.set_bit(self.id);
+ self.id
+ }
+}
+
+impl Default for IdPool {
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index fe6c2cebc7f0..f7189990dd6a 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -3416,10 +3416,6 @@ static int snd_bbfpro_controls_create(struct usb_mixer_interface *mixer)
#define RME_DIGIFACE_REGISTER(reg, mask) (((reg) << 16) | (mask))
#define RME_DIGIFACE_INVERT BIT(31)
-/* Nonconst helpers */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
static int snd_rme_digiface_write_reg(struct snd_kcontrol *kcontrol, int item, u16 mask, u16 val)
{
struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);