From 8ae0e970319ac0b516d285650a744bab4ed3dd37 Mon Sep 17 00:00:00 2001 From: Jia He Date: Sat, 28 Oct 2023 10:20:58 +0000 Subject: dma-mapping: move dma_addressing_limited() out of line This patch moves dma_addressing_limited() out of line, serving as a preliminary step to prevent the introduction of a new publicly accessible low-level helper when validating whether all system RAM is mapped within the DMA mapping range. Suggested-by: Christoph Hellwig Signed-off-by: Jia He Signed-off-by: Christoph Hellwig --- kernel/dma/mapping.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'kernel/dma') diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index e323ca48f7f2..7789c86f7ba3 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -793,6 +793,21 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) } EXPORT_SYMBOL(dma_set_coherent_mask); +/** + * dma_addressing_limited - return if the device is addressing limited + * @dev: device to check + * + * Return %true if the devices DMA mask is too small to address all memory in + * the system, else %false. Lack of addressing bits is the prime reason for + * bounce buffering, but might not be the only one. + */ +bool dma_addressing_limited(struct device *dev) +{ + return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < + dma_get_required_mask(dev); +} +EXPORT_SYMBOL_GPL(dma_addressing_limited); + size_t dma_max_mapping_size(struct device *dev) { const struct dma_map_ops *ops = get_dma_ops(dev); -- cgit