diff options
94 files changed, 810 insertions, 550 deletions
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst index 7d90964abbb0..710ce1c701bf 100644 --- a/Documentation/vm/hmm.rst +++ b/Documentation/vm/hmm.rst @@ -237,7 +237,7 @@ The usage pattern is:: ret = hmm_range_snapshot(&range); if (ret) { up_read(&mm->mmap_sem); - if (ret == -EAGAIN) { + if (ret == -EBUSY) { /* * No need to check hmm_range_wait_until_valid() return value * on retry we will get proper error with hmm_range_snapshot() diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 7e0486ad1318..dba9355e2484 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -18,7 +18,9 @@ extern const struct dma_map_ops arm_coherent_dma_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL; + if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE)) + return &arm_dma_ops; + return NULL; } #ifdef __arch_page_to_dma diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 820b60a50125..c54cd7ed90ba 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -663,6 +663,11 @@ config ARM_LPAE depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \ !CPU_32v4 && !CPU_32v3 select PHYS_ADDR_T_64BIT + select SWIOTLB + select ARCH_HAS_DMA_COHERENT_TO_PFN + select ARCH_HAS_DMA_MMAP_PGPROT + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_SYNC_DMA_FOR_CPU help Say Y if you have an ARMv7 processor supporting the LPAE page table format and you would like to access memory beyond the diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4789c60a86e3..6774b03aa405 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/device.h> #include <linux/dma-mapping.h> +#include <linux/dma-noncoherent.h> #include <linux/dma-contiguous.h> #include <linux/highmem.h> #include <linux/memblock.h> @@ -1125,6 +1126,19 @@ int arm_dma_supported(struct device *dev, u64 mask) static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) { + /* + * When CONFIG_ARM_LPAE is set, physical address can extend above + * 32-bits, which then can't be addressed by devices that only support + * 32-bit DMA. + * Use the generic dma-direct / swiotlb ops code in that case, as that + * handles bounce buffering for us. + * + * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the + * latter is also selected by the Xen code, but that code for now relies + * on non-NULL dev_dma_ops. To be cleaned up later. + */ + if (IS_ENABLED(CONFIG_ARM_LPAE)) + return NULL; return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; } @@ -2329,6 +2343,9 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct dma_map_ops *dma_ops; dev->archdata.dma_coherent = coherent; +#ifdef CONFIG_SWIOTLB + dev->dma_coherent = coherent; +#endif /* * Don't override the dma_ops if they have already been set. Ideally @@ -2363,3 +2380,47 @@ void arch_teardown_dma_ops(struct device *dev) /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ set_dma_ops(dev, NULL); } + +#ifdef CONFIG_SWIOTLB +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), + size, dir); +} + +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), + size, dir); +} + +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, + dma_addr_t dma_addr) +{ + return dma_to_pfn(dev, dma_addr); +} + +pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, + unsigned long attrs) +{ + if (!dev_is_dma_coherent(dev)) + return __get_dma_pgprot(attrs, prot); + return prot; +} + +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) +{ + return __dma_alloc(dev, size, dma_handle, gfp, + __get_dma_pgprot(attrs, PAGE_KERNEL), false, + attrs, __builtin_return_address(0)); +} + +void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle, unsigned long attrs) +{ + __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); +} +#endif /* CONFIG_SWIOTLB */ diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 4920a206dce9..16d373d587c4 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -21,6 +21,7 @@ #include <linux/dma-contiguous.h> #include <linux/sizes.h> #include <linux/stop_machine.h> +#include <linux/swiotlb.h> #include <asm/cp15.h> #include <asm/mach-types.h> @@ -463,6 +464,10 @@ static void __init free_highpages(void) */ void __init mem_init(void) { +#ifdef CONFIG_ARM_LPAE + swiotlb_init(1); +#endif + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); /* this will put all unused low memory onto the freelists */ diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c index a55be205b91a..dbfe34664633 100644 --- a/drivers/bluetooth/hci_ath.c +++ b/drivers/bluetooth/hci_ath.c @@ -98,6 +98,9 @@ static int ath_open(struct hci_uart *hu) BT_DBG("hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + ath = kzalloc(sizeof(*ath), GFP_KERNEL); if (!ath) return -ENOMEM; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 8905ad2edde7..ae2624fce913 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -406,6 +406,9 @@ static int bcm_open(struct hci_uart *hu) bt_dev_dbg(hu->hdev, "hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + bcm = kzalloc(sizeof(*bcm), GFP_KERNEL); if (!bcm) return -ENOMEM; diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 207bae5e0d46..31f25153087d 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -391,6 +391,9 @@ static int intel_open(struct hci_uart *hu) BT_DBG("hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + intel = kzalloc(sizeof(*intel), GFP_KERNEL); if (!intel) return -ENOMEM; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 8950e07889fe..85a30fb9177b 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -292,6 +292,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return 0; } +/* Check the underlying device or tty has flow control support */ +bool hci_uart_has_flow_control(struct hci_uart *hu) +{ + /* serdev nodes check if the needed operations are present */ + if (hu->serdev) + return true; + + if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset) + return true; + + return false; +} + /* Flow control or un-flow control the device */ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) { diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c index f98e5cc343b2..fbc3f7c3a5c7 100644 --- a/drivers/bluetooth/hci_mrvl.c +++ b/drivers/bluetooth/hci_mrvl.c @@ -59,6 +59,9 @@ static int mrvl_open(struct hci_uart *hu) BT_DBG("hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL); if (!mrvl) return -ENOMEM; diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 9a5c9c1f9484..82a0a3691a63 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -473,6 +473,9 @@ static int qca_open(struct hci_uart *hu) BT_DBG("hu %p qca_open", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); if (!qca) return -ENOMEM; diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index f11af3912ce6..6ab631101019 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -104,6 +104,7 @@ int hci_uart_wait_until_sent(struct hci_uart *hu); int hci_uart_init_ready(struct hci_uart *hu); void hci_uart_init_work(struct work_struct *work); void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed); +bool hci_uart_has_flow_control(struct hci_uart *hu); void hci_uart_set_flow_control(struct hci_uart *hu, bool enable); void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed, unsigned int oper_speed); diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c index 57204335c5f5..285e0b8f9a97 100644 --- a/drivers/char/ipmi/ipmb_dev_int.c +++ b/drivers/char/ipmi/ipmb_dev_int.c @@ -76,7 +76,7 @@ static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count, struct ipmb_dev *ipmb_dev = to_ipmb_dev(file); struct ipmb_request_elem *queue_elem; struct ipmb_msg msg; - ssize_t ret; + ssize_t ret = 0; memset(&msg, 0, sizeof(msg)); diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c index 44db83a6d01c..44a46dcc0518 100644 --- a/drivers/clk/at91/clk-generated.c +++ b/drivers/clk/at91/clk-generated.c @@ -141,6 +141,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw, continue; div = DIV_ROUND_CLOSEST(parent_rate, req->rate); + if (div > GENERATED_MAX_DIV + 1) + div = GENERATED_MAX_DIV + 1; clk_generated_best_diff(req, parent, parent_rate, div, &best_diff, &best_rate); diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c index 1aa5f4059251..73b7e238eee7 100644 --- a/drivers/clk/mediatek/clk-mt8183.c +++ b/drivers/clk/mediatek/clk-mt8183.c @@ -25,9 +25,11 @@ static const struct mtk_fixed_clk top_fixed_clks[] = { FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000), }; +static const struct mtk_fixed_factor top_early_divs[] = { + FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2), +}; + static const struct mtk_fixed_factor top_divs[] = { - FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, - 2), FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1, 2), FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1, @@ -1148,37 +1150,57 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev) return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); } +static struct clk_onecell_data *top_clk_data; + +static void clk_mt8183_top_init_early(struct device_node *node) +{ + int i; + + top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + + for (i = 0; i < CLK_TOP_NR_CLK; i++) + top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); + + mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), + top_clk_data); + + of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); +} + +CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen", + clk_mt8183_top_init_early); + static int clk_mt8183_top_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); void __iomem *base; - struct clk_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); - clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); - mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), - clk_data); + top_clk_data); + + mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), + top_clk_data); - mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); + mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data); mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), - node, &mt8183_clk_lock, clk_data); + node, &mt8183_clk_lock, top_clk_data); mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes), - base, &mt8183_clk_lock, clk_data); + base, &mt8183_clk_lock, top_clk_data); mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), - base, &mt8183_clk_lock, clk_data); + base, &mt8183_clk_lock, top_clk_data); mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), - clk_data); + top_clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); } static int clk_mt8183_infra_probe(struct platform_device *pdev) diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 52bbb9ce3807..d4075b130674 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -572,17 +572,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev, unsigned int reg = id / 32; unsigned int bit = id % 32; u32 bitmask = BIT(bit); - unsigned long flags; - u32 value; dev_dbg(priv->dev, "reset %u%02u\n", reg, bit); /* Reset module */ - spin_lock_irqsave(&priv->rmw_lock, flags); - value = readl(priv->base + SRCR(reg)); - value |= bitmask; - writel(value, priv->base + SRCR(reg)); - spin_unlock_irqrestore(&priv->rmw_lock, flags); + writel(bitmask, priv->base + SRCR(reg)); /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ udelay(35); @@ -599,16 +593,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id) unsigned int reg = id / 32; unsigned int bit = id % 32; u32 bitmask = BIT(bit); - unsigned long flags; - u32 value; dev_dbg(priv->dev, "assert %u%02u\n", reg, bit); - spin_lock_irqsave(&priv->rmw_lock, flags); - value = readl(priv->base + SRCR(reg)); - value |= bitmask; - writel(value, priv->base + SRCR(reg)); - spin_unlock_irqrestore(&priv->rmw_lock, flags); + writel(bitmask, priv->base + SRCR(reg)); return 0; } diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig index 91d3d721c801..3c219af25100 100644 --- a/drivers/clk/sprd/Kconfig +++ b/drivers/clk/sprd/Kconfig @@ -3,6 +3,7 @@ config SPRD_COMMON_CLK tristate "Clock support for Spreadtrum SoCs" depends on ARCH_SPRD || COMPILE_TEST default ARCH_SPRD + select REGMAP_MMIO if SPRD_COMMON_CLK diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 3ee99d070608..f497003f119c 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -956,9 +956,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) } if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) - irqflags |= IRQF_TRIGGER_RISING; + irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? + IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) - irqflags |= IRQF_TRIGGER_FALLING; + irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? + IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; irqflags |= IRQF_ONESHOT; INIT_KFIFO(le->events); @@ -1392,12 +1394,17 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, for (i = 0; i < chip->ngpio; i++) { struct gpio_desc *desc = &gdev->descs[i]; - if (chip->get_direction && gpiochip_line_is_valid(chip, i)) - desc->flags = !chip->get_direction(chip, i) ? - (1 << FLAG_IS_OUT) : 0; - else - desc->flags = !chip->direction_input ? - (1 << FLAG_IS_OUT) : 0; + if (chip->get_direction && gpiochip_line_is_valid(chip, i)) { + if (!chip->get_direction(chip, i)) + set_bit(FLAG_IS_OUT, &desc->flags); + else + clear_bit(FLAG_IS_OUT, &desc->flags); + } else { + if (!chip->direction_input) + set_bit(FLAG_IS_OUT, &desc->flags); + else + clear_bit(FLAG_IS_OUT, &desc->flags); + } } acpi_gpiochip_add(chip); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 1d3ee9c42f7e..6a5c96e519b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( adev->asic_type != CHIP_FIJI && adev->asic_type != CHIP_POLARIS10 && adev->asic_type != CHIP_POLARIS11 && - adev->asic_type != CHIP_POLARIS12) ? + adev->asic_type != CHIP_POLARIS12 && + adev->asic_type != CHIP_VEGAM) ? VI_BO_SIZE_ALIGN : 1; mapping_flags = AMDGPU_VM_PAGE_READABLE; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e069de8b54e6..4e4094f842e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, return r; } - fence = amdgpu_ctx_get_fence(ctx, entity, - deps[i].handle); + fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); + amdgpu_ctx_put(ctx); + + if (IS_ERR(fence)) + return PTR_ERR(fence); + else if (!fence) + continue; if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { - struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); + struct drm_sched_fence *s_fence; struct dma_fence *old = fence; + s_fence = to_drm_sched_fence(fence); fence = dma_fence_get(&s_fence->scheduled); dma_fence_put(old); } - if (IS_ERR(fence)) { - r = PTR_ERR(fence); - amdgpu_ctx_put(ctx); + r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); + dma_fence_put(fence); + if (r) return r; - } else if (fence) { - r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, - true); - dma_fence_put(fence); - amdgpu_ctx_put(ctx); - if (r) - return r; - } } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 6d54decef7f8..5652cc72ed3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, thread = (*pos & GENMASK_ULL(59, 52)) >> 52; bank = (*pos & GENMASK_ULL(61, 60)) >> 60; - data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); + data = kcalloc(1024, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 03ca8c69114f..2b546567853b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; - if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) - pm = amdgpu_smu_get_current_power_state(adev); - else if (adev->powerplay.pp_funcs->get_current_power_state) + if (is_support_sw_smu(adev)) { + if (adev->smu.ppt_funcs->get_current_power_state) + pm = amdgpu_smu_get_current_power_state(adev); + else + pm = adev->pm.dpm.user_state; + } else if (adev->powerplay.pp_funcs->get_current_power_state) { pm = amdgpu_dpm_get_current_power_state(adev); - else + } else { pm = adev->pm.dpm.user_state; + } return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : @@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, goto fail; } - if (adev->powerplay.pp_funcs->dispatch_tasks) { + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.user_state = state; + mutex_unlock(&adev->pm.mutex); + } else if (adev->powerplay.pp_funcs->dispatch_tasks) { amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); } else { mutex_lock(&adev->pm.mutex); @@ -3067,28 +3075,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); - /* UVD clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "UVD: Disabled\n"); - } else { - seq_printf(m, "UVD: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (DCLK)\n", value/100); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + if (adev->asic_type > CHIP_VEGA20) { + /* VCN clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCN: Disabled\n"); + } else { + seq_printf(m, "VCN: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } } - } - seq_printf(m, "\n"); + seq_printf(m, "\n"); + } else { + /* UVD clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "UVD: Disabled\n"); + } else { + seq_printf(m, "UVD: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } + } + seq_printf(m, "\n"); - /* VCE clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "VCE: Disabled\n"); - } else { - seq_printf(m, "VCE: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + /* VCE clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCE: Disabled\n"); + } else { + seq_printf(m, "VCE: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + } } } diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 9f661bf96ed0..5b1ebb7f995a 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -123,6 +123,7 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, AMDGPU_PP_SENSOR_MIN_FAN_RPM, AMDGPU_PP_SENSOR_MAX_FAN_RPM, + AMDGPU_PP_SENSOR_VCN_POWER_STATE, }; enum amd_pp_task { diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index c097113c3976..0685a3388e38 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -306,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu, /* not support power state */ memset(state_info, 0, sizeof(struct pp_states_info)); - state_info->nums = 0; + state_info->nums = 1; + state_info->states[0] = POWER_STATE_TYPE_DEFAULT; return 0; } @@ -337,6 +338,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_POWER_STATE: + *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0; + *size = 4; + break; default: ret = -EINVAL; break; @@ -723,6 +728,12 @@ static int smu_sw_init(void *handle) return ret; } + ret = smu_register_irq_handler(smu); + if (ret) { + pr_err("Failed to register smc irq handler!\n"); + return ret; + } + return 0; } @@ -732,6 +743,9 @@ static int smu_sw_fini(void *handle) struct smu_context *smu = &adev->smu; int ret; + kfree(smu->irq_source); + smu->irq_source = NULL; + ret = smu_smc_table_sw_fini(smu); if (ret) { pr_err("Failed to sw fini smc table!\n"); @@ -1088,10 +1102,6 @@ static int smu_hw_init(void *handle) if (ret) goto failed; - ret = smu_register_irq_handler(smu); - if (ret) - goto failed; - if (!smu->pm_enabled) adev->pm.dpm_enabled = false; else @@ -1121,9 +1131,6 @@ static int smu_hw_fini(void *handle) kfree(table_context->overdrive_table); table_context->overdrive_table = NULL; - kfree(smu->irq_source); - smu->irq_source = NULL; - ret = smu_fini_fb_allocations(smu); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index e32ae9d3373c..18e780f566fa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, void *value, int *size) { + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); uint32_t sclk, mclk; int ret = 0; @@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, case AMDGPU_PP_SENSOR_GPU_TEMP: *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr); break; + case AMDGPU_PP_SENSOR_VCN_POWER_STATE: + *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1; + *size = 4; + break; default: ret = -EINVAL; break; @@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate) static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) { + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); + if (bgate) { amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerDownVcn, 0); + smu10_data->vcn_power_gated = true; } else { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerUpVcn, 0); amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_UNGATE); + smu10_data->vcn_power_gated = false; } } diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 22e46a289a16..208e6711d506 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -429,7 +429,6 @@ struct smu_table_context struct smu_table *tables; uint32_t table_count; struct smu_table memory_pool; - uint16_t software_shutdown_temp; uint8_t thermal_controller_type; uint16_t TDPODLimit; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 4aaad255a288..cc0a3b2256af 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -23,6 +23,7 @@ #include "pp_debug.h" #include <linux/firmware.h> +#include <linux/pci.h> #include "amdgpu.h" #include "amdgpu_smu.h" #include "atomfirmware.h" @@ -577,28 +578,20 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) { int ret = 0; - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; - if (enable && power_gate->uvd_gated) { - if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); - if (ret) - return ret; - } - power_gate->uvd_gated = false; + if (enable) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); + if (ret) + return ret; } else { - if (!enable && !power_gate->uvd_gated) { - if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); - if (ret) - return ret; - } - power_gate->uvd_gated = true; - } + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + if (ret) + return ret; } - return 0; + ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable); + + return ret; } static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, @@ -1573,7 +1566,7 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu) uint32_t sclk_freq = 0, uclk_freq = 0; uint32_t uclk_level = 0; - switch (adev->rev_id) { + switch (adev->pdev->revision) { case 0xf0: /* XTX */ case 0xc0: sclk_freq = NAVI10_PEAK_SCLK_XTX; @@ -1620,6 +1613,22 @@ static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_fo return ret; } +static int navi10_get_thermal_temperature_range(struct smu_context *smu, + struct smu_temperature_range *range) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table; + + if (!range || !powerplay_table) + return -EINVAL; + + /* The unit is temperature */ + range->min = 0; + range->max = powerplay_table->software_shutdown_temp; + + return 0; +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -1657,6 +1666,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_ppfeature_status = navi10_get_ppfeature_status, .set_ppfeature_status = navi10_set_ppfeature_status, .set_performance_level = navi10_set_performance_level, + .get_thermal_temperature_range = navi10_get_thermal_temperature_range, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index caca9091bfcc..ac5b26228e75 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1124,10 +1124,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, struct smu_temperature_range *range) { struct amdgpu_device *adev = smu->adev; - int low = SMU_THERMAL_MINIMUM_ALERT_TEMP * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = SMU_THERMAL_MINIMUM_ALERT_TEMP; + int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; if (!range) @@ -1138,6 +1136,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, if (high > range->max) high = range->max; + low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min); + high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max); + if (low > high) return -EINVAL; @@ -1146,8 +1147,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); @@ -1186,7 +1187,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) if (!smu->pm_enabled) return ret; + ret = smu_get_thermal_temperature_range(smu, &range); + if (ret) + return ret; if (smu->smu_table.thermal_controller_type) { ret = smu_v11_0_set_thermal_range(smu, &range); @@ -1202,15 +1206,17 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) return ret; } - adev->pm.dpm.thermal.min_temp = range.min; - adev->pm.dpm.thermal.max_temp = range.max; - adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max; - adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min; - adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max; - adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max; - adev->pm.dpm.thermal.min_mem_temp = range.mem_min; - adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; - adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; + adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index dc139a6feeb1..dd6fd1c8bf24 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -450,7 +450,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu) memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable, sizeof(PPTable_t)); - table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp; table_context->thermal_controller_type = powerplay_table->ucThermalControllerType; table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]); @@ -3234,35 +3233,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu, return 0; } -static const struct smu_temperature_range vega20_thermal_policy[] = -{ - {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, - { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, -}; - static int vega20_get_thermal_temperature_range(struct smu_context *smu, struct smu_temperature_range *range) { - + struct smu_table_context *table_context = &smu->smu_table; + ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table; PPTable_t *pptable = smu->smu_table.driver_pptable; - if (!range) + if (!range || !powerplay_table) return -EINVAL; - memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range)); - - range->max = pptable->TedgeLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_crit_max = pptable->ThotspotLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_crit_max = pptable->ThbmLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)* - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + /* The unit is temperature */ + range->min = 0; + range->max = powerplay_table->usSoftwareShutdownTemp; + range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE); + range->hotspot_crit_max = pptable->ThotspotLimit; + range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT); + range->mem_crit_max = pptable->ThbmLimit; + range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM); return 0; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 1671db47aa57..e9c55d1d6c04 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: /* copy commands into RB: */ obj = submit->bos[submit->cmd[i].idx].obj; @@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index be39cf01e51e..dc8ec2c94301 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 9acbbc0f3232..048c8be426f3 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, /* ignore if there has not been a ctx switch: */ if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index ff14555372d0..78d5fa230c16 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, mdp5_crtc->enabled = false; } +static void mdp5_crtc_vblank_on(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_interface *intf = mdp5_cstate->pipeline.intf; + u32 count; + + count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff; + drm_crtc_set_max_vblank_count(crtc, count); + + drm_crtc_vblank_on(crtc); +} + static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, } /* Restore vblank irq handling after power is enabled */ - drm_crtc_vblank_on(crtc); + mdp5_crtc_vblank_on(crtc); mdp5_crtc_mode_set_nofb(crtc); @@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc) mdp5_crtc_destroy_state(crtc, crtc->state); __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); + + drm_crtc_vblank_reset(crtc); } static const struct drm_crtc_funcs mdp5_crtc_funcs = { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 4a60f5fca6b0..fec6ef1ae3b9 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; dev->driver->get_scanout_position = mdp5_get_scanoutpos; dev->driver->get_vblank_counter = mdp5_get_vblank_counter; - dev->max_vblank_count = 0xffffffff; + dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */ dev->vblank_disable_immediate = true; return kms; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index c226156f2dea..c356f5ccf253 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev, if (!np) return 0; - drm_of_component_match_add(dev, matchptr, compare_of, np); + if (of_device_is_available(np)) + drm_of_component_match_add(dev, matchptr, compare_of, np); of_node_put(np); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index c2114c748c2f..8cf6362e64bf 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj) return !msm_obj->vram_node; } +/* + * Cache sync.. this is a bit over-complicated, to fit dma-mapping + * API. Really GPU cache is out of scope here (handled on cmdstream) + * and all we need to do is invalidate newly allocated pages before + * mapping to CPU as uncached/writecombine. + * + * On top of this, we have the added headache, that depending on + * display generation, the display's iommu may be wired up to either + * the toplevel drm device (mdss), or to the mdp sub-node, meaning + * that here we either have dma-direct or iommu ops. + * + * Let this be a cautionary tail of abstraction gone wrong. + */ + +static void sync_for_device(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev)) { + dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_map_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + +static void sync_for_cpu(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev)) { + dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + /* allocate pages from VRAM carveout, used when no IOMMU: */ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) { @@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj) * because display controller, GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + sync_for_device(msm_obj); } return msm_obj->pages; @@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj) * GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, - DMA_BIDIRECTIONAL); + sync_for_cpu(msm_obj); sg_free_table(msm_obj->sgt); kfree(msm_obj->sgt); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 8497768f1b41..126703816794 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -780,7 +780,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, connector->display_info.bpc * 3); - if (drm_atomic_crtc_needs_modeset(crtc_state)) { + if (crtc_state->mode_changed) { slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn); diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 8c92374afcf2..a835cebb6d90 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm, fault->inst, fault->addr, fault->access); } +static inline bool +nouveau_range_done(struct hmm_range *range) +{ + bool ret = hmm_range_valid(range); + + hmm_range_unregister(range); + return ret; +} + +static int +nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range) +{ + long ret; + + range->default_flags = 0; + range->pfn_flags_mask = -1UL; + + ret = hmm_range_register(range, mirror, + range->start, range->end, + PAGE_SHIFT); + if (ret) { + up_read(&range->vma->vm_mm->mmap_sem); + return (int)ret; + } + + if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { + up_read(&range->vma->vm_mm->mmap_sem); + return -EAGAIN; + } + + ret = hmm_range_fault(range, true); + if (ret <= 0) { + if (ret == 0) + ret = -EBUSY; + up_read(&range->vma->vm_mm->mmap_sem); + hmm_range_unregister(range); + return ret; + } + return 0; +} + static int nouveau_svm_fault(struct nvif_notify *notify) { @@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify) range.values = nouveau_svm_pfn_values; range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; again: - ret = hmm_vma_fault(&svmm->mirror, &range, true); + ret = nouveau_range_fault(&svmm->mirror, &range); if (ret == 0) { mutex_lock(&svmm->mutex); - if (!hmm_vma_range_done(&range)) { + if (!nouveau_range_done(&range)) { mutex_unlock(&svmm->mutex); goto again; } @@ -666,8 +707,8 @@ again: NULL); svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); + up_read(&svmm->mm->mmap_sem); } - up_read(&svmm->mm->mmap_sem); /* Cancel any faults in the window whose pages didn't manage * to keep their valid bit, or stay writeable when required. diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 01faef7bc061..45d5164e9574 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -393,6 +393,9 @@ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index) u64 sum; port_counter = &dev->port_data[port].port_counter; + if (!port_counter->hstats) + return 0; + sum = get_running_counters_hwstat_sum(dev, port, index); sum += port_counter->hstats->value[index]; @@ -594,7 +597,7 @@ void rdma_counter_init(struct ib_device *dev) struct rdma_port_counter *port_counter; u32 port; - if (!dev->ops.alloc_hw_stats || !dev->port_data) + if (!dev->port_data) return; rdma_for_each_port(dev, port) { @@ -602,6 +605,9 @@ void rdma_counter_init(struct ib_device *dev) port_counter->mode.mode = RDMA_COUNTER_MODE_NONE; mutex_init(&port_counter->lock); + if (!dev->ops.alloc_hw_stats) + continue; + port_counter->hstats = dev->ops.alloc_hw_stats(dev, port); if (!port_counter->hstats) goto fail; @@ -624,9 +630,6 @@ void rdma_counter_release(struct ib_device *dev) struct rdma_port_counter *port_counter; u32 port; - if (!dev->ops.alloc_hw_stats) - return; - rdma_for_each_port(dev, port) { port_counter = &dev->port_data[port].port_counter; kfree(port_counter->hstats); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index a91653aabf38..098ab883733e 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; struct bnxt_qplib_gid *gid_to_del; + u16 vlan_id = 0xFFFF; /* Delete the entry from the hardware */ ctx = *context; @@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) if (sgid_tbl && sgid_tbl->active) { if (ctx->idx >= sgid_tbl->max) return -EINVAL; - gid_to_del = &sgid_tbl->tbl[ctx->idx]; + gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; + vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id; /* DEL_GID is called in WQ context(netdevice_event_work_handler) * or via the ib_unregister_device path. In the former case QP1 * may not be destroyed yet, in which case just return as FW @@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) } ctx->refcnt--; if (!ctx->refcnt) { - rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true); + rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, + vlan_id, true); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to remove GID: %#x", rc); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 37928b1111df..bdbde8e22420 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res, struct bnxt_qplib_sgid_tbl *sgid_tbl, u16 max) { - sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL); + sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL); if (!sgid_tbl->tbl) return -ENOMEM; @@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res, for (i = 0; i < sgid_tbl->max; i++) { if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, sizeof(bnxt_qplib_gid_zero))) - bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true); + bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid, + sgid_tbl->tbl[i].vlan_id, true); } - memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max); + memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max); memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max); sgid_tbl->active = 0; @@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res, static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct net_device *netdev) { - memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max); + u32 i; + + for (i = 0; i < sgid_tbl->max; i++) + sgid_tbl->tbl[i].vlan_id = 0xffff; + memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 30c42c92fac7..fbda11a7ab1a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl { }; struct bnxt_qplib_sgid_tbl { - struct bnxt_qplib_gid *tbl; + struct bnxt_qplib_gid_info *tbl; u16 *hw_id; u16 max; u16 active; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 48793d3512ac..40296b97d21e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, index, sgid_tbl->max); return -EINVAL; } - memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid)); + memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid)); return 0; } int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, - struct bnxt_qplib_gid *gid, bool update) + struct bnxt_qplib_gid *gid, u16 vlan_id, bool update) { struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, struct bnxt_qplib_res, @@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return -ENOMEM; } for (index = 0; index < sgid_tbl->max; index++) { - if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid))) + if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) && + vlan_id == sgid_tbl->tbl[index].vlan_id) break; } if (index == sgid_tbl->max) { @@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, if (rc) return rc; } - memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, + memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero, sizeof(bnxt_qplib_gid_zero)); + sgid_tbl->tbl[index].vlan_id = 0xFFFF; sgid_tbl->vlan[index] = 0; sgid_tbl->active--; dev_dbg(&res->pdev->dev, @@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, } free_idx = sgid_tbl->max; for (i = 0; i < sgid_tbl->max; i++) { - if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) { + if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) && + sgid_tbl->tbl[i].vlan_id == vlan_id) { dev_dbg(&res->pdev->dev, "SGID entry already exist in entry %d!\n", i); *index = i; @@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, } /* Add GID to the sgid_tbl */ memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); + sgid_tbl->tbl[free_idx].vlan_id = vlan_id; sgid_tbl->active++; if (vlan_id != 0xFFFF) sgid_tbl->vlan[free_idx] = 1; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 0ec3b12b0bcd..13d9432d5ce2 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -84,6 +84,11 @@ struct bnxt_qplib_gid { u8 data[16]; }; +struct bnxt_qplib_gid_info { + struct bnxt_qplib_gid gid; + u16 vlan_id; +}; + struct bnxt_qplib_ah { struct bnxt_qplib_gid dgid; struct bnxt_qplib_pd *pd; @@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, struct bnxt_qplib_sgid_tbl *sgid_tbl, int index, struct bnxt_qplib_gid *gid); int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, - struct bnxt_qplib_gid *gid, bool update); + struct bnxt_qplib_gid *gid, u16 vlan_id, bool update); int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id, bool update, u32 *index); diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index d5b643a1d9fd..67052dc3100c 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd) clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); } -static void init_rxe(struct hfi1_devdata *dd) +static int init_rxe(struct hfi1_devdata *dd) { struct rsm_map_table *rmt; u64 val; @@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata *dd) write_csr(dd, RCV_ERR_MASK, ~0ull); rmt = alloc_rsm_map_table(dd); + if (!rmt) + return -ENOMEM; + /* set up QOS, including the QPN map table */ init_qos(dd, rmt); init_fecn_handling(dd, rmt); @@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata *dd) val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) << RCV_BYPASS_HDR_SIZE_SHIFT); write_csr(dd, RCV_BYPASS, val); + return 0; } static void init_other(struct hfi1_devdata *dd) @@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd) goto bail_cleanup; /* set initial RXE CSRs */ - init_rxe(dd); + ret = init_rxe(dd); + if (ret) + goto bail_cleanup; + /* set initial TXE CSRs */ init_txe(dd); /* set initial non-RXE, non-TXE CSRs */ diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 0477c14633ab..024a7c2b6124 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) break; trdma_clean_swqe(qp, wqe); - rvt_qp_wqe_unreserve(qp, wqe); trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); rvt_qp_complete_swqe(qp, wqe, @@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { trdma_clean_swqe(qp, wqe); - rvt_qp_wqe_unreserve(qp, wqe); trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); rvt_qp_complete_swqe(qp, wqe, diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 92acccaaaa86..996fc298207e 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req, flows[i].req = req; flows[i].npagesets = 0; flows[i].pagesets[0].mapped = 0; + flows[i].resync_npkts = 0; } req->flows = flows; return 0; @@ -1673,34 +1674,6 @@ static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req, return NULL; } -static struct tid_rdma_flow * -__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail, - u32 psn, u16 *fidx) -{ - for ( ; CIRC_CNT(head, tail, MAX_FLOWS); - tail = CIRC_NEXT(tail, MAX_FLOWS)) { - struct tid_rdma_flow *flow = &req->flows[tail]; - u32 spsn, lpsn; - - spsn = full_flow_psn(flow, flow->flow_state.spsn); - lpsn = full_flow_psn(flow, flow->flow_state.lpsn); - - if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) { - if (fidx) - *fidx = tail; - return flow; - } - } - return NULL; -} - -static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req, - u32 psn, u16 *fidx) -{ - return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn, - fidx); -} - /* TID RDMA READ functions */ u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, struct ib_other_headers *ohdr, u32 *bth1, @@ -2788,19 +2761,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, * to prevent continuous Flow Sequence errors for any * packets that could be still in the fabric. */ - flow = find_flow(req, psn, NULL); - if (!flow) { - /* - * We can't find the IB PSN matching the - * received KDETH PSN. The only thing we can - * do at this point is report the error to - * the QP. - */ - hfi1_kern_read_tid_flow_free(qp); - spin_unlock(&qp->s_lock); - rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); - return ret; - } + flow = &req->flows[req->clear_tail]; if (priv->s_flags & HFI1_R_TID_SW_PSN) { diff = cmp_psn(psn, flow->flow_state.r_next_psn); diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c index 627aa46ef683..c00714c2f16a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_db.c +++ b/drivers/infiniband/hw/hns/hns_roce_db.c @@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, struct ib_udata *udata, unsigned long virt, struct hns_roce_db *db) { + unsigned long page_addr = virt & PAGE_MASK; struct hns_roce_user_db_page *page; + unsigned int offset; int ret = 0; mutex_lock(&context->page_mutex); list_for_each_entry(page, &context->page_list, list) - if (page->user_virt == (virt & PAGE_MASK)) + if (page->user_virt == page_addr) goto found; page = kmalloc(sizeof(*page), GFP_KERNEL); @@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, } refcount_set(&page->refcount, 1); - page->user_virt = (virt & PAGE_MASK); - page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0); + page->user_virt = page_addr; + page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0); if (IS_ERR(page->umem)) { ret = PTR_ERR(page->umem); kfree(page); @@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, list_add(&page->list, &context->page_list); found: - db->dma = sg_dma_address(page->umem->sg_head.sgl) + - (virt & ~PAGE_MASK); - page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK; - db->virt_addr = sg_virt(page->umem->sg_head.sgl); + offset = virt - page_addr; + db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset; + db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset; db->u.user_page = page; refcount_inc(&page->refcount); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index c482f19958b3..f6a53455bf8b 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -481,6 +481,7 @@ struct mlx5_umr_wr { u64 length; int access_flags; u32 mkey; + u8 ignore_free_state:1; }; static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 20ece6e0b2fc..2c77456f359f 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -545,13 +545,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return; c = order2idx(dev, mr->order); - if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { - mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); - return; - } + WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES); - if (unreg_umr(dev, mr)) + if (unreg_umr(dev, mr)) { + mr->allocated_from_cache = false; + destroy_mkey(dev, mr); + ent = &cache->ent[c]; + if (ent->cur < ent->limit) + queue_work(cache->wq, &ent->work); return; + } ent = &cache->ent[c]; spin_lock_irq(&ent->lock); @@ -1373,9 +1376,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return 0; umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | - MLX5_IB_SEND_UMR_FAIL_IF_FREE; + MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; umrwr.wr.opcode = MLX5_IB_WR_UMR; + umrwr.pd = dev->umrc.pd; umrwr.mkey = mr->mmkey.key; + umrwr.ignore_free_state = 1; return mlx5_ib_post_send_wait(dev, &umrwr); } @@ -1577,10 +1582,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) mr->sig = NULL; } - mlx5_free_priv_descs(mr); - - if (!allocated_from_cache) + if (!allocated_from_cache) { destroy_mkey(dev, mr); + mlx5_free_priv_descs(mr); + } } static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 5b642d81e617..81da82050d05 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -246,7 +246,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, * overwrite the same MTTs. Concurent invalidations might race us, * but they will write 0s as well, so no difference in the end result. */ - + mutex_lock(&umem_odp->umem_mutex); for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; /* @@ -278,6 +278,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, idx - blk_start_idx + 1, 0, MLX5_IB_UPD_XLT_ZAP | MLX5_IB_UPD_XLT_ATOMIC); + mutex_unlock(&umem_odp->umem_mutex); /* * We are now sure that the device will not access the * memory. We can safely unmap it, and mark it as dirty if @@ -1771,7 +1772,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work) num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list, w->num_sge, 0); - kfree(w); + kvfree(w); } int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, @@ -1813,7 +1814,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, if (valid_req) queue_work(system_unbound_wq, &work->work); else - kfree(work); + kvfree(work); srcu_read_unlock(&dev->mr_srcu, srcu_key); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 2a97619ed603..379328b2598f 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1713,7 +1713,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, } MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); - MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); memcpy(rss_key, ucmd.rx_hash_key, len); break; } @@ -4295,10 +4294,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev, memset(umr, 0, sizeof(*umr)); - if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) - umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */ - else - umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */ + if (!umrwr->ignore_free_state) { + if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) + /* fail if free */ + umr->flags = MLX5_UMR_CHECK_FREE; + else + /* fail if not free */ + umr->flags = MLX5_UMR_CHECK_NOT_FREE; + } umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) { diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index a7cde98e73e8..9ce8a1b925d2 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -220,13 +220,12 @@ static void siw_put_work(struct siw_cm_work *work) static void siw_cep_set_inuse(struct siw_cep *cep) { unsigned long flags; - int rv; retry: spin_lock_irqsave(&cep->lock, flags); if (cep->in_use) { spin_unlock_irqrestore(&cep->lock, flags); - rv = wait_event_interruptible(cep->waitq, !cep->in_use); + wait_event_interruptible(cep->waitq, !cep->in_use); if (signal_pending(current)) flush_signals(current); goto retry; diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index f55c4e80aea4..d0f140daf659 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -612,6 +612,7 @@ static __init int siw_init_module(void) if (!siw_create_tx_threads()) { pr_info("siw: Could not start any TX thread\n"); + rv = -ENOMEM; goto out_error; } /* diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 433f4d2ee956..80a740df0737 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -2,7 +2,7 @@ /* * Virtio driver for the paravirtualized IOMMU * - * Copyright (C) 2018 Arm Limited + * Copyright (C) 2019 Arm Limited */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -47,7 +47,10 @@ struct viommu_dev { /* Device configuration */ struct iommu_domain_geometry geometry; u64 pgsize_bitmap; - u8 domain_bits; + u32 first_domain; + u32 last_domain; + /* Supported MAP flags */ + u32 map_flags; u32 probe_size; }; @@ -62,6 +65,7 @@ struct viommu_domain { struct viommu_dev *viommu; struct mutex mutex; /* protects viommu pointer */ unsigned int id; + u32 map_flags; spinlock_t mappings_lock; struct rb_root_cached mappings; @@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len) return -ENOENT; case VIRTIO_IOMMU_S_FAULT: return -EFAULT; + case VIRTIO_IOMMU_S_NOMEM: + return -ENOMEM; case VIRTIO_IOMMU_S_IOERR: case VIRTIO_IOMMU_S_DEVERR: default: @@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu, { int ret; struct viommu_domain *vdomain = to_viommu_domain(domain); - unsigned int max_domain = viommu->domain_bits > 31 ? ~0 : - (1U << viommu->domain_bits) - 1; vdomain->viommu = viommu; + vdomain->map_flags = viommu->map_flags; domain->pgsize_bitmap = viommu->pgsize_bitmap; domain->geometry = viommu->geometry; - ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL); + ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, + viommu->last_domain, GFP_KERNEL); if (ret >= 0) vdomain->id = (unsigned int)ret; @@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { int ret; - int flags; + u32 flags; struct virtio_iommu_req_map map; struct viommu_domain *vdomain = to_viommu_domain(domain); @@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); + if (flags & ~vdomain->map_flags) + return -EINVAL; + ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); if (ret) return ret; @@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev) goto err_free_vqs; } - viommu->domain_bits = 32; + viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; + viommu->last_domain = ~0U; /* Optional features */ virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, @@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev) struct virtio_iommu_config, input_range.end, &input_end); - virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS, - struct virtio_iommu_config, domain_bits, - &viommu->domain_bits); + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, + struct virtio_iommu_config, domain_range.start, + &viommu->first_domain); + + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, + struct virtio_iommu_config, domain_range.end, + &viommu->last_domain); virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, struct virtio_iommu_config, probe_size, @@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev) .force_aperture = true, }; + if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO)) + viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; + viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; virtio_device_ready(vdev); @@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev) static unsigned int features[] = { VIRTIO_IOMMU_F_MAP_UNMAP, - VIRTIO_IOMMU_F_DOMAIN_BITS, VIRTIO_IOMMU_F_INPUT_RANGE, + VIRTIO_IOMMU_F_DOMAIN_RANGE, VIRTIO_IOMMU_F_PROBE, + VIRTIO_IOMMU_F_MMIO, }; static struct virtio_device_id id_table[] = { diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index e327f80ebe70..7102e2ebc614 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -10,6 +10,7 @@ #include <linux/kthread.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> +#include <linux/backing-dev.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> @@ -427,6 +428,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) goto free_tag_set; } + if (mmc_host_is_spi(host) && host->use_spi_crc) + mq->queue->backing_dev_info->capabilities |= + BDI_CAP_STABLE_WRITES; + mq->queue->queuedata = mq; blk_queue_rq_timeout(mq->queue, 60 * HZ); diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index faaaf52a46d2..eea52e2c5a0c 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2012,8 +2012,7 @@ static void dw_mci_tasklet_func(unsigned long priv) * delayed. Allowing the transfer to take place * avoids races and keeps things simple. */ - if ((err != -ETIMEDOUT) && - (cmd->opcode == MMC_SEND_TUNING_BLOCK)) { + if (err != -ETIMEDOUT) { state = STATE_SENDING_DATA; continue; } diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 2d736e416775..ba9a63db73da 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -73,7 +73,7 @@ #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6) #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8) #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9) - #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13) + #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10) #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15) #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30) #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31) diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index 6ee340a3fb3a..603a5d9f045a 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -624,6 +624,7 @@ err_cleanup_host: sdhci_cleanup_host(host); pm_runtime_disable: + pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c index 48d6f0d87583..83ed1fbf73cf 100644 --- a/drivers/platform/olpc/olpc-xo175-ec.c +++ b/drivers/platform/olpc/olpc-xo175-ec.c @@ -736,6 +736,12 @@ static const struct of_device_id olpc_xo175_ec_of_match[] = { }; MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match); +static const struct spi_device_id olpc_xo175_ec_id_table[] = { + { "xo1.75-ec", 0 }, + {} +}; +MODULE_DEVICE_TABLE(spi, olpc_xo175_ec_id_table); + static struct spi_driver olpc_xo175_ec_spi_driver = { .driver = { .name = "olpc-xo175-ec", diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 235c0b89f824..c510d0d72475 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -812,6 +812,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map), INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map), INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map), + INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), {} }; diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c index b0d3110ae378..e4c68efac0c2 100644 --- a/drivers/platform/x86/pcengines-apuv2.c +++ b/drivers/platform/x86/pcengines-apuv2.c @@ -93,7 +93,7 @@ static struct gpiod_lookup_table gpios_led_table = { static struct gpio_keys_button apu2_keys_buttons[] = { { - .code = KEY_SETUP, + .code = KEY_RESTART, .active_low = 1, .desc = "front button", .type = EV_KEY, @@ -255,6 +255,4 @@ MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table); MODULE_ALIAS("platform:pcengines-apuv2"); -MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME); -MODULE_SOFTDEP("pre: platform:leds-gpio"); -MODULE_SOFTDEP("pre: platform:gpio_keys_polled"); +MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled"); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 819296332913..42a8c2a13ab1 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -96,7 +96,7 @@ struct vhost_uaddr { }; #if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 -#define VHOST_ARCH_CAN_ACCEL_UACCESS 1 +#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 #else #define VHOST_ARCH_CAN_ACCEL_UACCESS 0 #endif @@ -266,7 +266,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry) static void put_unlocked_entry(struct xa_state *xas, void *entry) { /* If we were the only waiter woken, wake the next one */ - if (entry && dax_is_conflict(entry)) + if (entry && !dax_is_conflict(entry)) dax_wake_entry(xas, entry, false); } diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index f8d46df8fa9e..3e58a6f697dd 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -1653,19 +1653,12 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id) static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) { struct f2fs_inode_info *fi = F2FS_I(inode); - u32 oldflags; /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) return -EPERM; - oldflags = fi->i_flags; - - if ((iflags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL)) - if (!capable(CAP_LINUX_IMMUTABLE)) - return -EPERM; - - fi->i_flags = iflags | (oldflags & ~mask); + fi->i_flags = iflags | (fi->i_flags & ~mask); if (fi->i_flags & F2FS_PROJINHERIT_FL) set_inode_flag(inode, FI_PROJ_INHERIT); @@ -1770,7 +1763,8 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); - u32 fsflags; + struct f2fs_inode_info *fi = F2FS_I(inode); + u32 fsflags, old_fsflags; u32 iflags; int ret; @@ -1794,8 +1788,14 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) inode_lock(inode); + old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags); + ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags); + if (ret) + goto out; + ret = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL)); +out: inode_unlock(inode); mnt_drop_write_file(filp); return ret; @@ -2855,52 +2855,32 @@ static inline u32 f2fs_xflags_to_iflags(u32 xflags) return iflags; } -static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) +static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa) { - struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); - struct fsxattr fa; - memset(&fa, 0, sizeof(struct fsxattr)); - fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags); + simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags)); if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) - fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, - fi->i_projid); - - if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) - return -EFAULT; - return 0; + fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid); } -static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa) +static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) { - /* - * Project Quota ID state is only allowed to change from within the init - * namespace. Enforce that restriction only if we are trying to change - * the quota ID state. Everything else is allowed in user namespaces. - */ - if (current_user_ns() == &init_user_ns) - return 0; + struct inode *inode = file_inode(filp); + struct fsxattr fa; - if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid) - return -EINVAL; - - if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) { - if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT)) - return -EINVAL; - } else { - if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT) - return -EINVAL; - } + f2fs_fill_fsxattr(inode, &fa); + if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) + return -EFAULT; return 0; } static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); - struct fsxattr fa; + struct fsxattr fa, old_fa; u32 iflags; int err; @@ -2923,9 +2903,12 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) return err; inode_lock(inode); - err = f2fs_ioctl_check_project(inode, &fa); + + f2fs_fill_fsxattr(inode, &old_fa); + err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa); if (err) goto out; + err = f2fs_setflags_common(inode, iflags, f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS)); if (err) diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 6691f526fa40..8974672db78f 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -796,6 +796,29 @@ static int move_data_block(struct inode *inode, block_t bidx, if (lfs_mode) down_write(&fio.sbi->io_order_lock); + mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), + fio.old_blkaddr, false); + if (!mpage) + goto up_out; + + fio.encrypted_page = mpage; + + /* read source block in mpage */ + if (!PageUptodate(mpage)) { + err = f2fs_submit_page_bio(&fio); + if (err) { + f2fs_put_page(mpage, 1); + goto up_out; + } + lock_page(mpage); + if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || + !PageUptodate(mpage))) { + err = -EIO; + f2fs_put_page(mpage, 1); + goto up_out; + } + } + f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, &sum, CURSEG_COLD_DATA, NULL, false); @@ -803,44 +826,18 @@ static int move_data_block(struct inode *inode, block_t bidx, newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); if (!fio.encrypted_page) { err = -ENOMEM; - goto recover_block; - } - - mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), - fio.old_blkaddr, FGP_LOCK, GFP_NOFS); - if (mpage) { - bool updated = false; - - if (PageUptodate(mpage)) { - memcpy(page_address(fio.encrypted_page), - page_address(mpage), PAGE_SIZE); - updated = true; - } f2fs_put_page(mpage, 1); - invalidate_mapping_pages(META_MAPPING(fio.sbi), - fio.old_blkaddr, fio.old_blkaddr); - if (updated) - goto write_page; - } - - err = f2fs_submit_page_bio(&fio); - if (err) - goto put_page_out; - - /* write page */ - lock_page(fio.encrypted_page); - - if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) { - err = -EIO; - goto put_page_out; - } - if (unlikely(!PageUptodate(fio.encrypted_page))) { - err = -EIO; - goto put_page_out; + goto recover_block; } -write_page: + /* write target block */ f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true); + memcpy(page_address(fio.encrypted_page), + page_address(mpage), PAGE_SIZE); + f2fs_put_page(mpage, 1); + invalidate_mapping_pages(META_MAPPING(fio.sbi), + fio.old_blkaddr, fio.old_blkaddr); + set_page_dirty(fio.encrypted_page); if (clear_page_dirty_for_io(fio.encrypted_page)) dec_page_count(fio.sbi, F2FS_DIRTY_META); @@ -871,11 +868,12 @@ write_page: put_page_out: f2fs_put_page(fio.encrypted_page, 1); recover_block: - if (lfs_mode) - up_write(&fio.sbi->io_order_lock); if (err) f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, true, true); +up_out: + if (lfs_mode) + up_write(&fio.sbi->io_order_lock); put_out: f2fs_put_dnode(&dn); out: diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 6de6cda44031..78a1b873e48a 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -2422,6 +2422,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, size_t crc_offset = 0; __u32 crc = 0; + if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) { + f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)", + F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); + return -EINVAL; + } + /* Check checksum_offset and crc in superblock */ if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) { crc_offset = le32_to_cpu(raw_super->checksum_offset); @@ -2429,26 +2435,20 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, offsetof(struct f2fs_super_block, crc)) { f2fs_info(sbi, "Invalid SB checksum offset: %zu", crc_offset); - return 1; + return -EFSCORRUPTED; } crc = le32_to_cpu(raw_super->crc); if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) { f2fs_info(sbi, "Invalid SB checksum value: %u", crc); - return 1; + return -EFSCORRUPTED; } } - if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { - f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)", - F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); - return 1; - } - /* Currently, support only 4KB page cache size */ if (F2FS_BLKSIZE != PAGE_SIZE) { f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB", PAGE_SIZE); - return 1; + return -EFSCORRUPTED; } /* Currently, support only 4KB block size */ @@ -2456,14 +2456,14 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, if (blocksize != F2FS_BLKSIZE) { f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB", blocksize); - return 1; + return -EFSCORRUPTED; } /* check log blocks per segment */ if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) { f2fs_info(sbi, "Invalid log blocks per segment (%u)", le32_to_cpu(raw_super->log_blocks_per_seg)); - return 1; + return -EFSCORRUPTED; } /* Currently, support 512/1024/2048/4096 bytes sector size */ @@ -2473,7 +2473,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, F2FS_MIN_LOG_SECTOR_SIZE) { f2fs_info(sbi, "Invalid log sectorsize (%u)", le32_to_cpu(raw_super->log_sectorsize)); - return 1; + return -EFSCORRUPTED; } if (le32_to_cpu(raw_super->log_sectors_per_block) + le32_to_cpu(raw_super->log_sectorsize) != @@ -2481,7 +2481,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)", le32_to_cpu(raw_super->log_sectors_per_block), le32_to_cpu(raw_super->log_sectorsize)); - return 1; + return -EFSCORRUPTED; } segment_count = le32_to_cpu(raw_super->segment_count); @@ -2495,7 +2495,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, if (segment_count > F2FS_MAX_SEGMENT || segment_count < F2FS_MIN_SEGMENTS) { f2fs_info(sbi, "Invalid segment count (%u)", segment_count); - return 1; + return -EFSCORRUPTED; } if (total_sections > segment_count || @@ -2503,25 +2503,25 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, segs_per_sec > segment_count || !segs_per_sec) { f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)", segment_count, total_sections, segs_per_sec); - return 1; + return -EFSCORRUPTED; } if ((segment_count / segs_per_sec) < total_sections) { f2fs_info(sbi, "Small segment_count (%u < %u * %u)", segment_count, segs_per_sec, total_sections); - return 1; + return -EFSCORRUPTED; } if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) { f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)", segment_count, le64_to_cpu(raw_super->block_count)); - return 1; + return -EFSCORRUPTED; } if (secs_per_zone > total_sections || !secs_per_zone) { f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)", secs_per_zone, total_sections); - return 1; + return -EFSCORRUPTED; } if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION || raw_super->hot_ext_count > F2FS_MAX_EXTENSION || @@ -2531,7 +2531,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, le32_to_cpu(raw_super->extension_count), raw_super->hot_ext_count, F2FS_MAX_EXTENSION); - return 1; + return -EFSCORRUPTED; } if (le32_to_cpu(raw_super->cp_payload) > @@ -2539,7 +2539,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, f2fs_info(sbi, "Insane cp_payload (%u > %u)", le32_to_cpu(raw_super->cp_payload), blocks_per_seg - F2FS_CP_PACKS); - return 1; + return -EFSCORRUPTED; } /* check reserved ino info */ @@ -2550,12 +2550,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, le32_to_cpu(raw_super->node_ino), le32_to_cpu(raw_super->meta_ino), le32_to_cpu(raw_super->root_ino)); - return 1; + return -EFSCORRUPTED; } /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ if (sanity_check_area_boundary(sbi, bh)) - return 1; + return -EFSCORRUPTED; return 0; } @@ -2870,10 +2870,10 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi, } /* sanity checking of raw super */ - if (sanity_check_raw_super(sbi, bh)) { + err = sanity_check_raw_super(sbi, bh); + if (err) { f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock", block + 1); - err = -EFSCORRUPTED; brelse(bh); continue; } diff --git a/fs/super.c b/fs/super.c index 113c58f19425..5960578a4076 100644 --- a/fs/super.c +++ b/fs/super.c @@ -478,13 +478,10 @@ EXPORT_SYMBOL(generic_shutdown_super); bool mount_capable(struct fs_context *fc) { - struct user_namespace *user_ns = fc->global ? &init_user_ns - : fc->user_ns; - if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) return capable(CAP_SYS_ADMIN); else - return ns_capable(user_ns, CAP_SYS_ADMIN); + return ns_capable(fc->user_ns, CAP_SYS_ADMIN); } /** diff --git a/include/linux/clk.h b/include/linux/clk.h index 3c096c7a51dc..853a8f181394 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -359,6 +359,7 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, /** * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks * @dev: device for clock "consumer" + * @num_clks: the number of clk_bulk_data * @clks: pointer to the clk_bulk_data table of consumer * * Behaves the same as devm_clk_bulk_get() except where there is no clock diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 9ddcf50a3c59..a7f08fb0f865 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -247,7 +247,7 @@ static inline void gpiod_put(struct gpio_desc *desc) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline void devm_gpiod_unhinge(struct device *dev, @@ -256,7 +256,7 @@ static inline void devm_gpiod_unhinge(struct device *dev, might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline void gpiod_put_array(struct gpio_descs *descs) @@ -264,7 +264,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(descs); } static inline struct gpio_desc *__must_check @@ -317,7 +317,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline void devm_gpiod_put_array(struct device *dev, @@ -326,32 +326,32 @@ static inline void devm_gpiod_put_array(struct device *dev, might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(descs); } static inline int gpiod_get_direction(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_input(struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_output(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } @@ -359,7 +359,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) static inline int gpiod_get_value(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_array_value(unsigned int array_size, @@ -368,13 +368,13 @@ static inline int gpiod_get_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_value(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_array_value(unsigned int array_size, struct gpio_desc **desc_array, @@ -382,13 +382,13 @@ static inline int gpiod_set_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_get_raw_value(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_raw_array_value(unsigned int array_size, @@ -397,13 +397,13 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, @@ -411,14 +411,14 @@ static inline int gpiod_set_raw_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_array_value_cansleep(unsigned int array_size, @@ -427,13 +427,13 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, @@ -441,13 +441,13 @@ static inline int gpiod_set_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, @@ -456,14 +456,14 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, @@ -471,41 +471,41 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_is_active_low(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_to_irq(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -EINVAL; } @@ -513,7 +513,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -EINVAL; } @@ -525,7 +525,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio) static inline int desc_to_gpio(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -EINVAL; } diff --git a/include/linux/hmm.h b/include/linux/hmm.h index b8a08b2a10ca..7ef56dc18050 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range, */ #define HMM_RANGE_DEFAULT_TIMEOUT 1000 -/* This is a temporary helper to avoid merge conflict between trees. */ -static inline bool hmm_vma_range_done(struct hmm_range *range) -{ - bool ret = hmm_range_valid(range); - - hmm_range_unregister(range); - return ret; -} - -/* This is a temporary helper to avoid merge conflict between trees. */ -static inline int hmm_vma_fault(struct hmm_mirror *mirror, - struct hmm_range *range, bool block) -{ - long ret; - - /* - * With the old API the driver must set each individual entries with - * the requested flags (valid, write, ...). So here we set the mask to - * keep intact the entries provided by the driver and zero out the - * default_flags. - */ - range->default_flags = 0; - range->pfn_flags_mask = -1UL; - - ret = hmm_range_register(range, mirror, - range->start, range->end, - PAGE_SHIFT); - if (ret) - return (int)ret; - - if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { - /* - * The mmap_sem was taken by driver we release it here and - * returns -EAGAIN which correspond to mmap_sem have been - * drop in the old API. - */ - up_read(&range->vma->vm_mm->mmap_sem); - return -EAGAIN; - } - - ret = hmm_range_fault(range, block); - if (ret <= 0) { - if (ret == -EBUSY || !ret) { - /* Same as above, drop mmap_sem to match old API. */ - up_read(&range->vma->vm_mm->mmap_sem); - ret = -EBUSY; - } else if (ret == -EAGAIN) - ret = -EBUSY; - hmm_range_unregister(range); - return ret; - } - return 0; -} - /* Below are for HMM internal use only! Not to be used by device driver! */ static inline void hmm_mm_init(struct mm_struct *mm) { diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index b2c1648f7e5d..5714fd35a83c 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -814,6 +814,7 @@ struct tee_client_device_id { /** * struct wmi_device_id - WMI device identifier * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba + * @context: pointer to driver specific data */ struct wmi_device_id { const char guid_string[UUID_STRING_LEN+1]; diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 0eeea520a853..e06c77d76463 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -608,7 +608,7 @@ static inline void rvt_qp_wqe_reserve( /** * rvt_qp_wqe_unreserve - clean reserved operation * @qp - the rvt qp - * @wqe - the send wqe + * @flags - send wqe flags * * This decrements the reserve use count. * @@ -620,11 +620,9 @@ static inline void rvt_qp_wqe_reserve( * the compiler does not juggle the order of the s_last * ring index and the decrementing of s_reserved_used. */ -static inline void rvt_qp_wqe_unreserve( - struct rvt_qp *qp, - struct rvt_swqe *wqe) +static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags) { - if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) { + if (unlikely(flags & RVT_SEND_RESERVE_USED)) { atomic_dec(&qp->s_reserved_used); /* insure no compiler re-order up to s_last change */ smp_mb__after_atomic(); @@ -853,6 +851,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp, u32 byte_len, last; int flags = wqe->wr.send_flags; + rvt_qp_wqe_unreserve(qp, flags); rvt_put_qp_swqe(qp, wqe); need_completion = diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h index 2212adda8f77..64e92d56c6a8 100644 --- a/include/trace/events/dma_fence.h +++ b/include/trace/events/dma_fence.h @@ -2,7 +2,7 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM dma_fence -#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_TRACE_DMA_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_DMA_FENCE_H #include <linux/tracepoint.h> diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h index f3a12566bed0..6678cf8b235b 100644 --- a/include/trace/events/napi.h +++ b/include/trace/events/napi.h @@ -3,7 +3,7 @@ #define TRACE_SYSTEM napi #if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_NAPI_H_ +#define _TRACE_NAPI_H #include <linux/netdevice.h> #include <linux/tracepoint.h> @@ -38,7 +38,7 @@ TRACE_EVENT(napi_poll, #undef NO_DEV -#endif /* _TRACE_NAPI_H_ */ +#endif /* _TRACE_NAPI_H */ /* This part must be outside protection */ #include <trace/define_trace.h> diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h index 60d0d8bd336d..0d1a9ebf55ba 100644 --- a/include/trace/events/qdisc.h +++ b/include/trace/events/qdisc.h @@ -2,7 +2,7 @@ #define TRACE_SYSTEM qdisc #if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_QDISC_H_ +#define _TRACE_QDISC_H #include <linux/skbuff.h> #include <linux/netdevice.h> @@ -44,7 +44,7 @@ TRACE_EVENT(qdisc_dequeue, __entry->txq_state, __entry->packets, __entry->skbaddr ) ); -#endif /* _TRACE_QDISC_H_ */ +#endif /* _TRACE_QDISC_H */ /* This part must be outside protection */ #include <trace/define_trace.h> diff --git a/include/trace/events/tegra_apb_dma.h b/include/trace/events/tegra_apb_dma.h index 0818f6286110..971cd02d2daf 100644 --- a/include/trace/events/tegra_apb_dma.h +++ b/include/trace/events/tegra_apb_dma.h @@ -1,5 +1,5 @@ #if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_TEGRA_APM_DMA_H +#define _TRACE_TEGRA_APB_DMA_H #include <linux/tracepoint.h> #include <linux/dmaengine.h> @@ -55,7 +55,7 @@ TRACE_EVENT(tegra_dma_isr, TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq) ); -#endif /* _TRACE_TEGRADMA_H */ +#endif /* _TRACE_TEGRA_APB_DMA_H */ /* This part must be outside protection */ #include <trace/define_trace.h> diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h index ba1b460c9944..237e36a280cb 100644 --- a/include/uapi/linux/virtio_iommu.h +++ b/include/uapi/linux/virtio_iommu.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* - * Virtio-iommu definition v0.9 + * Virtio-iommu definition v0.12 * - * Copyright (C) 2018 Arm Ltd. + * Copyright (C) 2019 Arm Ltd. */ #ifndef _UAPI_LINUX_VIRTIO_IOMMU_H #define _UAPI_LINUX_VIRTIO_IOMMU_H @@ -11,26 +11,31 @@ /* Feature bits */ #define VIRTIO_IOMMU_F_INPUT_RANGE 0 -#define VIRTIO_IOMMU_F_DOMAIN_BITS 1 +#define VIRTIO_IOMMU_F_DOMAIN_RANGE 1 #define VIRTIO_IOMMU_F_MAP_UNMAP 2 #define VIRTIO_IOMMU_F_BYPASS 3 #define VIRTIO_IOMMU_F_PROBE 4 +#define VIRTIO_IOMMU_F_MMIO 5 -struct virtio_iommu_range { - __u64 start; - __u64 end; +struct virtio_iommu_range_64 { + __le64 start; + __le64 end; +}; + +struct virtio_iommu_range_32 { + __le32 start; + __le32 end; }; struct virtio_iommu_config { /* Supported page sizes */ - __u64 page_size_mask; + __le64 page_size_mask; /* Supported IOVA range */ - struct virtio_iommu_range input_range; + struct virtio_iommu_range_64 input_range; /* Max domain ID size */ - __u8 domain_bits; - __u8 padding[3]; + struct virtio_iommu_range_32 domain_range; /* Probe buffer size */ - __u32 probe_size; + __le32 probe_size; }; /* Request types */ @@ -49,6 +54,7 @@ struct virtio_iommu_config { #define VIRTIO_IOMMU_S_RANGE 0x05 #define VIRTIO_IOMMU_S_NOENT 0x06 #define VIRTIO_IOMMU_S_FAULT 0x07 +#define VIRTIO_IOMMU_S_NOMEM 0x08 struct virtio_iommu_req_head { __u8 type; @@ -78,12 +84,10 @@ struct virtio_iommu_req_detach { #define VIRTIO_IOMMU_MAP_F_READ (1 << 0) #define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1) -#define VIRTIO_IOMMU_MAP_F_EXEC (1 << 2) -#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 3) +#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 2) #define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \ VIRTIO_IOMMU_MAP_F_WRITE | \ - VIRTIO_IOMMU_MAP_F_EXEC | \ VIRTIO_IOMMU_MAP_F_MMIO) struct virtio_iommu_req_map { diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index bfc0c17f2a3d..2bd410f934b3 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -243,8 +243,9 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) /* CMA can be used only in the context which permits sleeping */ if (cma && gfpflags_allow_blocking(gfp)) { - align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT); - page = cma_alloc(cma, count, align, gfp & __GFP_NOWARN); + size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT); + + page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN); } /* Fallback allocation of normal pages */ @@ -266,7 +267,8 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) */ void dma_free_contiguous(struct device *dev, struct page *page, size_t size) { - if (!cma_release(dev_get_cma_area(dev), page, size >> PAGE_SHIFT)) + if (!cma_release(dev_get_cma_area(dev), page, + PAGE_ALIGN(size) >> PAGE_SHIFT)) __free_pages(page, get_order(size)); } diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 1f628e7ac709..b945239621d8 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -116,11 +116,16 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, int ret; if (!dev_is_dma_coherent(dev)) { + unsigned long pfn; + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) return -ENXIO; - page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr, - dma_addr)); + /* If the PFN is not valid, we do not have a struct page */ + pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); + if (!pfn_valid(pfn)) + return -ENXIO; + page = pfn_to_page(pfn); } else { page = virt_to_page(cpu_addr); } @@ -170,7 +175,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, if (!dev_is_dma_coherent(dev)) { if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) return -ENXIO; + + /* If the PFN is not valid, we do not have a struct page */ pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); + if (!pfn_valid(pfn)) + return -ENXIO; } else { pfn = page_to_pfn(virt_to_page(cpu_addr)); } diff --git a/kernel/exit.c b/kernel/exit.c index 4436158a6d30..5b4a5dcce8f8 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -734,9 +734,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead) autoreap = true; } - tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE; - if (tsk->exit_state == EXIT_DEAD) + if (autoreap) { + tsk->exit_state = EXIT_DEAD; list_add(&tsk->ptrace_entry, &dead); + } /* mt-exec, de_thread() is waiting for group leader */ if (unlikely(tsk->signal->notify_count < 0)) diff --git a/kernel/signal.c b/kernel/signal.c index 91b789dd6e72..349f5a67f100 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1885,6 +1885,7 @@ static void do_notify_pidfd(struct task_struct *task) { struct pid *pid; + WARN_ON(task->exit_state == 0); pid = task_pid(task); wake_up_all(&pid->wait_pidfd); } diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 69ebf3c2f1b5..78af97163147 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -137,6 +137,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) return 0; + /* + * Do not trace a function if it's filtered by set_graph_notrace. + * Make the index of ret stack negative to indicate that it should + * ignore further functions. But it needs its own ret stack entry + * to recover the original index in order to continue tracing after + * returning from the function. + */ if (ftrace_graph_notrace_addr(trace->func)) { trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT); /* @@ -156,16 +163,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) return 0; /* - * Do not trace a function if it's filtered by set_graph_notrace. - * Make the index of ret stack negative to indicate that it should - * ignore further functions. But it needs its own ret stack entry - * to recover the original index in order to continue tracing after - * returning from the function. - */ - if (ftrace_graph_notrace_addr(trace->func)) - return 1; - - /* * Stop here if tracing_threshold is set. We only write function return * events to the ring buffer. */ diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 83a7b614061f..798275a51887 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -21,7 +21,6 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, * memory corruption is possible and we should stop execution. */ BUG_ON(!trylock_page(page)); - list_del(&page->lru); balloon_page_insert(b_dev_info, page); unlock_page(page); __count_vm_event(BALLOON_INFLATE); @@ -33,8 +32,8 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, * @b_dev_info: balloon device descriptor where we will insert a new page to * @pages: pages to enqueue - allocated using balloon_page_alloc. * - * Driver must call it to properly enqueue a balloon pages before definitively - * removing it from the guest system. + * Driver must call this function to properly enqueue balloon pages before + * definitively removing them from the guest system. * * Return: number of pages that were enqueued. */ @@ -47,6 +46,7 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info, spin_lock_irqsave(&b_dev_info->pages_lock, flags); list_for_each_entry_safe(page, tmp, pages, lru) { + list_del(&page->lru); balloon_page_enqueue_one(b_dev_info, page); n_pages++; } @@ -63,12 +63,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue); * @n_req_pages: number of requested pages. * * Driver must call this function to properly de-allocate a previous enlisted - * balloon pages before definetively releasing it back to the guest system. + * balloon pages before definitively releasing it back to the guest system. * This function tries to remove @n_req_pages from the ballooned pages and * return them to the caller in the @pages list. * - * Note that this function may fail to dequeue some pages temporarily empty due - * to compaction isolated pages. + * Note that this function may fail to dequeue some pages even if the balloon + * isn't empty - since the page list can be temporarily empty due to compaction + * of isolated pages. * * Return: number of pages that were added to the @pages list. */ @@ -112,12 +113,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue); /* * balloon_page_alloc - allocates a new page for insertion into the balloon - * page list. + * page list. + * + * Driver must call this function to properly allocate a new balloon page. + * Driver must call balloon_page_enqueue before definitively removing the page + * from the guest system. * - * Driver must call it to properly allocate a new enlisted balloon page. - * Driver must call balloon_page_enqueue before definitively removing it from - * the guest system. This function returns the page address for the recently - * allocated page or NULL in the case we fail to allocate a new page this turn. + * Return: struct page for the allocated page or NULL on allocation failure. */ struct page *balloon_page_alloc(void) { @@ -128,15 +130,17 @@ struct page *balloon_page_alloc(void) EXPORT_SYMBOL_GPL(balloon_page_alloc); /* - * balloon_page_enqueue - allocates a new page and inserts it into the balloon - * page list. - * @b_dev_info: balloon device descriptor where we will insert a new page to + * balloon_page_enqueue - inserts a new page into the balloon page list. + * + * @b_dev_info: balloon device descriptor where we will insert a new page * @page: new page to enqueue - allocated using balloon_page_alloc. * - * Driver must call it to properly enqueue a new allocated balloon page - * before definitively removing it from the guest system. - * This function returns the page address for the recently enqueued page or - * NULL in the case we fail to allocate a new page this turn. + * Drivers must call this function to properly enqueue a new allocated balloon + * page before definitively removing the page from the guest system. + * + * Drivers must not call balloon_page_enqueue on pages that have been pushed to + * a list with balloon_page_push before removing them with balloon_page_pop. To + * enqueue a list of pages, use balloon_page_list_enqueue instead. */ void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, struct page *page) @@ -151,14 +155,23 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue); /* * balloon_page_dequeue - removes a page from balloon's page list and returns - * the its address to allow the driver release the page. + * its address to allow the driver to release the page. * @b_dev_info: balloon device decriptor where we will grab a page from. * - * Driver must call it to properly de-allocate a previous enlisted balloon page - * before definetively releasing it back to the guest system. - * This function returns the page address for the recently dequeued page or - * NULL in the case we find balloon's page list temporarily empty due to - * compaction isolated pages. + * Driver must call this function to properly dequeue a previously enqueued page + * before definitively releasing it back to the guest system. + * + * Caller must perform its own accounting to ensure that this + * function is called only if some pages are actually enqueued. + * + * Note that this function may fail to dequeue some pages even if there are + * some enqueued pages - since the page list can be temporarily empty due to + * the compaction of isolated pages. + * + * TODO: remove the caller accounting requirements, and allow caller to wait + * until all pages can be dequeued. + * + * Return: struct page for the dequeued page, or NULL if no page was dequeued. */ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) { @@ -171,9 +184,9 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) if (n_pages != 1) { /* * If we are unable to dequeue a balloon page because the page - * list is empty and there is no isolated pages, then something + * list is empty and there are no isolated pages, then something * went out of track and some balloon pages are lost. - * BUG() here, otherwise the balloon driver may get stuck into + * BUG() here, otherwise the balloon driver may get stuck in * an infinite loop while attempting to release all its pages. */ spin_lock_irqsave(&b_dev_info->pages_lock, flags); @@ -224,8 +237,8 @@ int balloon_page_migrate(struct address_space *mapping, /* * We can not easily support the no copy case here so ignore it as it - * is unlikely to be use with ballon pages. See include/linux/hmm.h for - * user of the MIGRATE_SYNC_NO_COPY mode. + * is unlikely to be used with balloon pages. See include/linux/hmm.h + * for a user of the MIGRATE_SYNC_NO_COPY mode. */ if (mode == MIGRATE_SYNC_NO_COPY) return -EINVAL; @@ -946,7 +946,7 @@ EXPORT_SYMBOL(hmm_range_unregister); * @range: range * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid * permission (for instance asking for write and range is read only), - * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid + * -EBUSY if you need to retry, -EFAULT invalid (ie either no valid * vma or it is illegal to access that range), number of valid pages * in range->pfns[] (from range start address). * @@ -967,7 +967,7 @@ long hmm_range_snapshot(struct hmm_range *range) do { /* If range is no longer valid force retry. */ if (!range->valid) - return -EAGAIN; + return -EBUSY; vma = find_vma(hmm->mm, start); if (vma == NULL || (vma->vm_flags & device_vma)) @@ -1062,10 +1062,8 @@ long hmm_range_fault(struct hmm_range *range, bool block) do { /* If range is no longer valid force retry. */ - if (!range->valid) { - up_read(&hmm->mm->mmap_sem); - return -EAGAIN; - } + if (!range->valid) + return -EBUSY; vma = find_vma(hmm->mm, start); if (vma == NULL || (vma->vm_flags & device_vma)) diff --git a/mm/slub.c b/mm/slub.c index e6c030e47364..8834563cdb4b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1432,7 +1432,9 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, void *old_tail = *tail ? *tail : *head; int rsize; - if (slab_want_init_on_free(s)) + if (slab_want_init_on_free(s)) { + void *p = NULL; + do { object = next; next = get_freepointer(s, object); @@ -1445,8 +1447,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, : 0; memset((char *)object + s->inuse, 0, s->size - s->inuse - rsize); - set_freepointer(s, object, next); + set_freepointer(s, object, p); + p = object; } while (object != old_tail); + } /* * Compiler cannot detect this function can be removed if slab_free_hook() diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 12dd9b318db1..703857aab00f 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -1873,6 +1873,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, if (!to_check) break; /* all drained */ init_waitqueue_entry(&wait, current); + set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&to_check->sleep, &wait); snd_pcm_stream_unlock_irq(substream); if (runtime->no_period_wakeup) @@ -1885,7 +1886,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, } tout = msecs_to_jiffies(tout * 1000); } - tout = schedule_timeout_interruptible(tout); + tout = schedule_timeout(tout); snd_pcm_stream_lock_irq(substream); group = snd_pcm_stream_group_ref(substream); diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 1192c7561d62..3c2db3816029 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c @@ -136,10 +136,12 @@ int snd_hdac_i915_init(struct hdac_bus *bus) if (!acomp) return -ENODEV; if (!acomp->ops) { - request_module("i915"); - /* 60s timeout */ - wait_for_completion_timeout(&bind_complete, - msecs_to_jiffies(60 * 1000)); + if (!IS_ENABLED(CONFIG_MODULES) || + !request_module("i915")) { + /* 60s timeout */ + wait_for_completion_timeout(&bind_complete, + msecs_to_jiffies(60 * 1000)); + } } if (!acomp->ops) { dev_info(bus->dev, "couldn't bind with audio component\n"); diff --git a/sound/usb/helper.c b/sound/usb/helper.c index 71d5f540334a..4c12cc5b53fd 100644 --- a/sound/usb/helper.c +++ b/sound/usb/helper.c @@ -72,7 +72,7 @@ int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe) struct usb_host_endpoint *ep; ep = usb_pipe_endpoint(dev, pipe); - if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) + if (!ep || usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) return -EINVAL; return 0; } diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh index 71231ad2dbfb..47315fe48d5a 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh @@ -262,7 +262,7 @@ test_mc_aware() stop_traffic - log_test "UC performace under MC overload" + log_test "UC performance under MC overload" echo "UC-only throughput $(humanize $ucth1)" echo "UC+MC throughput $(humanize $ucth2)" @@ -316,7 +316,7 @@ test_uc_aware() stop_traffic - log_test "MC performace under UC overload" + log_test "MC performance under UC overload" echo " ingress UC throughput $(humanize ${uc_ir})" echo " egress UC throughput $(humanize ${uc_er})" echo " sent $attempts BC ARPs, got $passes responses" diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh index 0a76314b4414..8b944cf042f6 100755 --- a/tools/testing/selftests/kmod/kmod.sh +++ b/tools/testing/selftests/kmod/kmod.sh @@ -28,7 +28,7 @@ # override by exporting to your environment prior running this script. # For instance this script assumes you do not have xfs loaded upon boot. # If this is false, export DEFAULT_KMOD_FS="ext4" prior to running this -# script if the filesyste module you don't have loaded upon bootup +# script if the filesystem module you don't have loaded upon bootup # is ext4 instead. Refer to allow_user_defaults() for a list of user # override variables possible. # @@ -263,7 +263,7 @@ config_get_test_result() config_reset() { if ! echo -n "1" >"$DIR"/reset; then - echo "$0: reset shuld have worked" >&2 + echo "$0: reset should have worked" >&2 exit 1 fi } @@ -488,7 +488,7 @@ usage() echo Example uses: echo echo "${TEST_NAME}.sh -- executes all tests" - echo "${TEST_NAME}.sh -t 0008 -- Executes test ID 0008 number of times is recomended" + echo "${TEST_NAME}.sh -t 0008 -- Executes test ID 0008 number of times is recommended" echo "${TEST_NAME}.sh -w 0008 -- Watch test ID 0008 run until an error occurs" echo "${TEST_NAME}.sh -s 0008 -- Run test ID 0008 once" echo "${TEST_NAME}.sh -c 0008 3 -- Run test ID 0008 three times" diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh index 30195449c63c..edcfeace4655 100644 --- a/tools/testing/selftests/livepatch/functions.sh +++ b/tools/testing/selftests/livepatch/functions.sh @@ -13,6 +13,14 @@ function log() { echo "$1" > /dev/kmsg } +# skip(msg) - testing can't proceed +# msg - explanation +function skip() { + log "SKIP: $1" + echo "SKIP: $1" >&2 + exit 4 +} + # die(msg) - game over, man # msg - dying words function die() { @@ -43,6 +51,12 @@ function loop_until() { done } +function assert_mod() { + local mod="$1" + + modprobe --dry-run "$mod" &>/dev/null +} + function is_livepatch_mod() { local mod="$1" @@ -75,6 +89,9 @@ function __load_mod() { function load_mod() { local mod="$1"; shift + assert_mod "$mod" || + skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root" + is_livepatch_mod "$mod" && die "use load_lp() to load the livepatch module $mod" @@ -88,6 +105,9 @@ function load_mod() { function load_lp_nowait() { local mod="$1"; shift + assert_mod "$mod" || + skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root" + is_livepatch_mod "$mod" || die "module $mod is not a livepatch" diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c index 7eaa8a3de262..b632965e60eb 100644 --- a/tools/testing/selftests/pidfd/pidfd_test.c +++ b/tools/testing/selftests/pidfd/pidfd_test.c @@ -339,13 +339,9 @@ static int test_pidfd_send_signal_syscall_support(void) ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0); if (ret < 0) { - /* - * pidfd_send_signal() will currently return ENOSYS when - * CONFIG_PROC_FS is not set. - */ if (errno == ENOSYS) ksft_exit_skip( - "%s test: pidfd_send_signal() syscall not supported (Ensure that CONFIG_PROC_FS=y is set)\n", + "%s test: pidfd_send_signal() syscall not supported\n", test_name); ksft_exit_fail_msg("%s test: Failed to send signal\n", diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c index 4602326b8f5b..a4f4d4cf22c3 100644 --- a/tools/testing/selftests/x86/test_vsyscall.c +++ b/tools/testing/selftests/x86/test_vsyscall.c @@ -451,7 +451,7 @@ static int test_vsys_x(void) printf("[OK]\tExecuting the vsyscall page failed: #PF(0x%lx)\n", segv_err); } else { - printf("[FAILT]\tExecution failed with the wrong error: #PF(0x%lx)\n", + printf("[FAIL]\tExecution failed with the wrong error: #PF(0x%lx)\n", segv_err); return 1; } |