summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/clk/ingenic/jz4760-cgu.c18
-rw-r--r--drivers/clk/microchip/clk-mpfs-ccc.c10
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c26
-rw-r--r--drivers/cxl/core/region.c12
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/firmware/efi/libstub/arm64.c9
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-vf610.c41
-rw-r--r--drivers/gpio/gpiolib-acpi.c12
-rw-r--r--drivers/gpio/gpiolib-acpi.h1
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h29
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c4
-rw-r--r--drivers/gpu/drm/drm_client.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c12
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c10
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c8
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c7
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c9
-rw-r--r--drivers/infiniband/hw/irdma/cm.c3
-rw-r--r--drivers/infiniband/hw/mana/qp.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c44
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c28
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c108
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c12
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h1
-rw-r--r--drivers/net/usb/kalmia.c8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c50
-rw-r--r--drivers/nvdimm/Kconfig19
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c42
-rw-r--r--drivers/nvme/host/auth.c2
-rw-r--r--drivers/nvme/host/pci.c20
-rw-r--r--drivers/of/of_reserved_mem.c3
-rw-r--r--drivers/pci/pci.c7
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/pcie/aspm.c111
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c16
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8195.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c1
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c2
-rw-r--r--drivers/platform/x86/intel/vsec.c9
-rw-r--r--drivers/spi/spi-dw-core.c2
-rw-r--r--drivers/spi/spi.c23
-rw-r--r--drivers/spi/spidev.c22
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/typec/altmodes/displayport.c8
-rw-r--r--drivers/video/fbdev/core/fb_defio.c10
-rw-r--r--drivers/video/fbdev/core/fbmem.c4
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c81
101 files changed, 827 insertions, 522 deletions
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f1cc5ec6a3b6..4e48d6db05eb 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3297,8 +3297,8 @@ void acpi_nfit_shutdown(void *data)
mutex_lock(&acpi_desc->init_mutex);
set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
- cancel_delayed_work_sync(&acpi_desc->dwork);
mutex_unlock(&acpi_desc->init_mutex);
+ cancel_delayed_work_sync(&acpi_desc->dwork);
/*
* Bounce the nvdimm bus lock to make sure any in-flight
diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c
index ecd395ac8a28..e407f00bd594 100644
--- a/drivers/clk/ingenic/jz4760-cgu.c
+++ b/drivers/clk/ingenic/jz4760-cgu.c
@@ -58,7 +58,7 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
unsigned long rate, unsigned long parent_rate,
unsigned int *pm, unsigned int *pn, unsigned int *pod)
{
- unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 2;
+ unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 1;
/* The frequency after the N divider must be between 1 and 50 MHz. */
n = parent_rate / (1 * MHZ);
@@ -66,19 +66,17 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
/* The N divider must be >= 2. */
n = clamp_val(n, 2, 1 << pll_info->n_bits);
- for (;; n >>= 1) {
- od = (unsigned int)-1;
+ rate /= MHZ;
+ parent_rate /= MHZ;
- do {
- m = (rate / MHZ) * (1 << ++od) * n / (parent_rate / MHZ);
- } while ((m > m_max || m & 1) && (od < 4));
-
- if (od < 4 && m >= 4 && m <= m_max)
- break;
+ for (m = m_max; m >= m_max && n >= 2; n--) {
+ m = rate * n / parent_rate;
+ od = m & 1;
+ m <<= od;
}
*pm = m;
- *pn = n;
+ *pn = n + 1;
*pod = 1 << od;
}
diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c
index 32aae880a14f..0ddc73e07be4 100644
--- a/drivers/clk/microchip/clk-mpfs-ccc.c
+++ b/drivers/clk/microchip/clk-mpfs-ccc.c
@@ -164,12 +164,11 @@ static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_
for (unsigned int i = 0; i < num_clks; i++) {
struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i];
- char *name = devm_kzalloc(dev, 23, GFP_KERNEL);
+ char *name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%u", parent->name, i);
if (!name)
return -ENOMEM;
- snprintf(name, 23, "%s_out%u", parent->name, i);
out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0);
out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] +
out_hw->reg_offset;
@@ -201,14 +200,13 @@ static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clo
for (unsigned int i = 0; i < num_clks; i++) {
struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i];
- char *name = devm_kzalloc(dev, 18, GFP_KERNEL);
- if (!name)
+ pll_hw->name = devm_kasprintf(dev, GFP_KERNEL, "ccc%s_pll%u",
+ strchrnul(dev->of_node->full_name, '@'), i);
+ if (!pll_hw->name)
return -ENOMEM;
pll_hw->base = data->pll_base[i];
- snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i);
- pll_hw->name = (const char *)name;
pll_hw->hw.init = CLK_HW_INIT_PARENTS_DATA_FIXED_SIZE(pll_hw->name,
pll_hw->parents,
&mpfs_ccc_pll_ops, 0);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 9505a812d6a1..d3f55ca06ed3 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -143,40 +143,42 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
return lval * xo_rate;
}
-/* Get the current frequency of the CPU (after throttling) */
-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+/* Get the frequency requested by the cpufreq core for the CPU */
+static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
+ const struct qcom_cpufreq_soc_data *soc_data;
struct cpufreq_policy *policy;
+ unsigned int index;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
data = policy->driver_data;
+ soc_data = qcom_cpufreq.soc_data;
- return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
+ index = readl_relaxed(data->base + soc_data->reg_perf_state);
+ index = min(index, LUT_MAX_ENTRIES - 1);
+
+ return policy->freq_table[index].frequency;
}
-/* Get the frequency requested by the cpufreq core for the CPU */
-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
- const struct qcom_cpufreq_soc_data *soc_data;
struct cpufreq_policy *policy;
- unsigned int index;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
data = policy->driver_data;
- soc_data = qcom_cpufreq.soc_data;
- index = readl_relaxed(data->base + soc_data->reg_perf_state);
- index = min(index, LUT_MAX_ENTRIES - 1);
+ if (data->throttle_irq >= 0)
+ return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
- return policy->freq_table[index].frequency;
+ return qcom_cpufreq_get_freq(cpu);
}
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
@@ -704,6 +706,8 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
return -ENOMEM;
qcom_cpufreq.soc_data = of_device_get_match_data(dev);
+ if (!qcom_cpufreq.soc_data)
+ return -ENODEV;
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_domains), GFP_KERNEL);
if (!clk_data)
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 02f28da519e3..940f805b1534 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -131,7 +131,7 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_port *iter = cxled_to_port(cxled);
struct cxl_ep *ep;
- int rc;
+ int rc = 0;
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
iter = to_cxl_port(iter->dev.parent);
@@ -143,7 +143,8 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
- rc = cxld->reset(cxld);
+ if (cxld->reset)
+ rc = cxld->reset(cxld);
if (rc)
return rc;
}
@@ -186,7 +187,8 @@ static int cxl_region_decode_commit(struct cxl_region *cxlr)
iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
- cxld->reset(cxld);
+ if (cxld->reset)
+ cxld->reset(cxld);
}
cxled->cxld.reset(&cxled->cxld);
@@ -991,10 +993,10 @@ static int cxl_port_setup_targets(struct cxl_port *port,
int i, distance;
/*
- * Passthrough ports impose no distance requirements between
+ * Passthrough decoders impose no distance requirements between
* peers
*/
- if (port->nr_dports == 1)
+ if (cxl_rr->nr_targets == 1)
distance = 0;
else
distance = p->nr_targets / cxl_rr->nr_targets;
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index da4438f3188c..c4c4728a36e4 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -475,7 +475,7 @@ EXPORT_SYMBOL_GPL(put_dax);
/**
* dax_holder() - obtain the holder of a dax device
* @dax_dev: a dax_device instance
-
+ *
* Return: the holder's data which represents the holder if registered,
* otherwize NULL.
*/
diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c
index ff2d18c42ee7..4501652e11ab 100644
--- a/drivers/firmware/efi/libstub/arm64.c
+++ b/drivers/firmware/efi/libstub/arm64.c
@@ -19,10 +19,13 @@ static bool system_needs_vamap(void)
const u8 *type1_family = efi_get_smbios_string(1, family);
/*
- * Ampere Altra machines crash in SetTime() if SetVirtualAddressMap()
- * has not been called prior.
+ * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
+ * SetVirtualAddressMap() has not been called prior.
*/
- if (!type1_family || strcmp(type1_family, "Altra"))
+ if (!type1_family || (
+ strcmp(type1_family, "eMAG") &&
+ strcmp(type1_family, "Altra") &&
+ strcmp(type1_family, "Altra Max")))
return false;
efi_warn("Working around broken SetVirtualAddressMap()\n");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index ec7cfd4f52b1..e9917a45b005 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1531,6 +1531,7 @@ config GPIO_MLXBF2
tristate "Mellanox BlueField 2 SoC GPIO"
depends on (MELLANOX_PLATFORM && ARM64 && ACPI) || (64BIT && COMPILE_TEST)
select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
help
Say Y here if you want GPIO support on Mellanox BlueField 2 SoC.
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 9db42f6a2043..9033db00c360 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -30,7 +30,6 @@ struct fsl_gpio_soc_data {
struct vf610_gpio_port {
struct gpio_chip gc;
- struct irq_chip ic;
void __iomem *base;
void __iomem *gpio_base;
const struct fsl_gpio_soc_data *sdata;
@@ -207,20 +206,24 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
static void vf610_gpio_irq_mask(struct irq_data *d)
{
- struct vf610_gpio_port *port =
- gpiochip_get_data(irq_data_get_irq_chip_data(d));
- void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct vf610_gpio_port *port = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ void __iomem *pcr_base = port->base + PORT_PCR(gpio_num);
vf610_gpio_writel(0, pcr_base);
+ gpiochip_disable_irq(gc, gpio_num);
}
static void vf610_gpio_irq_unmask(struct irq_data *d)
{
- struct vf610_gpio_port *port =
- gpiochip_get_data(irq_data_get_irq_chip_data(d));
- void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct vf610_gpio_port *port = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ void __iomem *pcr_base = port->base + PORT_PCR(gpio_num);
- vf610_gpio_writel(port->irqc[d->hwirq] << PORT_PCR_IRQC_OFFSET,
+ gpiochip_enable_irq(gc, gpio_num);
+ vf610_gpio_writel(port->irqc[gpio_num] << PORT_PCR_IRQC_OFFSET,
pcr_base);
}
@@ -237,6 +240,17 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
return 0;
}
+static const struct irq_chip vf610_irqchip = {
+ .name = "gpio-vf610",
+ .irq_ack = vf610_gpio_irq_ack,
+ .irq_mask = vf610_gpio_irq_mask,
+ .irq_unmask = vf610_gpio_irq_unmask,
+ .irq_set_type = vf610_gpio_irq_set_type,
+ .irq_set_wake = vf610_gpio_irq_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static void vf610_gpio_disable_clk(void *data)
{
clk_disable_unprepare(data);
@@ -249,7 +263,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
struct vf610_gpio_port *port;
struct gpio_chip *gc;
struct gpio_irq_chip *girq;
- struct irq_chip *ic;
int i;
int ret;
@@ -315,14 +328,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc->direction_output = vf610_gpio_direction_output;
gc->set = vf610_gpio_set;
- ic = &port->ic;
- ic->name = "gpio-vf610";
- ic->irq_ack = vf610_gpio_irq_ack;
- ic->irq_mask = vf610_gpio_irq_mask;
- ic->irq_unmask = vf610_gpio_irq_unmask;
- ic->irq_set_type = vf610_gpio_irq_set_type;
- ic->irq_set_wake = vf610_gpio_irq_set_wake;
-
/* Mask all GPIO interrupts */
for (i = 0; i < gc->ngpio; i++)
vf610_gpio_writel(0, port->base + PORT_PCR(i));
@@ -331,7 +336,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
vf610_gpio_writel(~0, port->base + PORT_ISFR);
girq = &gc->irq;
- girq->chip = ic;
+ gpio_irq_chip_set_chip(girq, &vf610_irqchip);
girq->parent_handler = vf610_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, 1,
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 17c53f484280..34ff048e70d0 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1637,6 +1637,18 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_wake = "ELAN0415:00@9",
},
},
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.7
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "SYNA1202:00@16",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index 9475f99a9694..5a08693b8fb1 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -14,7 +14,6 @@
#include <linux/gpio/consumer.h>
-struct acpi_device;
struct device;
struct fwnode_handle;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 315cbdf61979..9abfb482b615 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -53,7 +53,8 @@ config DRM_DEBUG_MM
config DRM_USE_DYNAMIC_DEBUG
bool "use dynamic debug to implement drm.debug"
- default y
+ default n
+ depends on BROKEN
depends on DRM
depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
depends on JUMP_LABEL
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e3e2e6e3b485..d148a1bd85e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -243,6 +243,7 @@ extern int amdgpu_num_kcq;
#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
extern int amdgpu_vcnfw_log;
+extern int amdgpu_sg_display;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7b5ce00f0602..7af3041ccd0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1220,10 +1220,13 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
* next job actually sees the results from the previous one
* before we start executing on the same scheduler ring.
*/
- if (!s_fence || s_fence->sched != sched)
+ if (!s_fence || s_fence->sched != sched) {
+ dma_fence_put(fence);
continue;
+ }
r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+ dma_fence_put(fence);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2f28a8c02f64..fbf2f24169eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4268,6 +4268,9 @@ exit:
}
adev->in_suspend = false;
+ if (adev->enable_mes)
+ amdgpu_mes_self_test(adev);
+
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
DRM_WARN("smart shift update failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index cd4caaa29528..3fe277bc233f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -186,6 +186,7 @@ int amdgpu_num_kcq = -1;
int amdgpu_smartshift_bias;
int amdgpu_use_xgmi_p2p = 1;
int amdgpu_vcnfw_log;
+int amdgpu_sg_display = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -932,6 +933,16 @@ MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = e
module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
/**
+ * DOC: sg_display (int)
+ * Disable S/G (scatter/gather) display (i.e., display from system memory).
+ * This option is only relevant on APUs. Set this option to 0 to disable
+ * S/G display if you experience flickering or other issues under memory
+ * pressure and report the issue.
+ */
+MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)");
+module_param_named(sg_display, amdgpu_sg_display, int, 0444);
+
+/**
* DOC: smu_pptable_id (int)
* Used to override pptable id. id = 0 use VBIOS pptable.
* id > 0 use the soft pptable with specicfied id.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 00444203220d..faff4a3f96e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -618,7 +618,13 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
- if (!ring->no_scheduler)
+ /*
+ * Notice we check for sched.ops since there's some
+ * override on the meaning of sched.ready by amdgpu.
+ * The natural check would be sched.ready, which is
+ * set as drm_sched_init() finishes...
+ */
+ if (ring->sched.ops)
drm_sched_fini(&ring->sched);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index f752c7ae7f60..3989e755a5b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -295,7 +295,7 @@ struct amdgpu_ring {
#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
#define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
-#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
+#define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0)
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index b5f3bba851db..01e42bdd8e4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -974,7 +974,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
min(nptes, 32u), dst, incr,
upd_flags,
- vm->task_info.pid,
+ vm->task_info.tgid,
vm->immediate.fence_context);
amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
cursor.level, pe_start, dst,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f202b45c413c..5dde6f82a1ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -6877,7 +6877,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
- .test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 5dff79e8f301..1c4787000a5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -1344,7 +1344,7 @@ static int mes_v11_0_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix &&
+ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
(adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
amdgpu_mes_self_test(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 9eff5f41df9d..7050238c4c48 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -641,7 +641,9 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_REPEATER_FGCG |
AMD_CG_SUPPORT_GFX_MGCG |
- AMD_CG_SUPPORT_HDP_SD;
+ AMD_CG_SUPPORT_HDP_SD |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 31bce529f685..9c7b69d377bd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1184,24 +1184,38 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
memset(pa_config, 0, sizeof(*pa_config));
- logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
- pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
-
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- /*
- * Raven2 has a HW issue that it is unable to use the vram which
- * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
- * workaround that increase system aperture high address (add 1)
- * to get rid of the VM fault and hardware hang.
- */
- logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
- else
- logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
-
agp_base = 0;
agp_bot = adev->gmc.agp_start >> 24;
agp_top = adev->gmc.agp_end >> 24;
+ /* AGP aperture is disabled */
+ if (agp_bot == agp_top) {
+ logical_addr_low = adev->gmc.vram_start >> 18;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
+ else
+ logical_addr_high = adev->gmc.vram_end >> 18;
+ } else {
+ logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
+ else
+ logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
+ }
+
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
@@ -1503,6 +1517,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
init_data.flags.gpu_vm_support = true;
break;
@@ -1511,6 +1527,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
break;
}
+ if (init_data.flags.gpu_vm_support &&
+ (amdgpu_sg_display == 0))
+ init_data.flags.gpu_vm_support = false;
if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true;
@@ -9639,7 +9658,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
* atomic state, so call drm helper to normalize zpos.
*/
- drm_atomic_normalize_zpos(dev, state);
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret) {
+ drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
+ goto fail;
+ }
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index fe2023f18b7d..8f894c1d1d1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -3626,7 +3626,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
(int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
+ pos_cpy.x = 2 * viewport_width - temp_x;
}
}
} else {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index a9170360d7e8..2f3e239e623d 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1991,6 +1991,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(9, 4, 2):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
*states = ATTR_STATE_SUPPORTED;
break;
default:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index d6b964cf73bd..4bc7aee4d44f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -123,7 +123,8 @@
(1 << FEATURE_DS_FCLK_BIT) | \
(1 << FEATURE_DS_LCLK_BIT) | \
(1 << FEATURE_DS_DCFCLK_BIT) | \
- (1 << FEATURE_DS_UCLK_BIT))
+ (1 << FEATURE_DS_UCLK_BIT) | \
+ (1ULL << FEATURE_DS_VCN_BIT))
//For use with feature control messages
typedef enum {
@@ -522,9 +523,9 @@ typedef enum {
TEMP_HOTSPOT_M,
TEMP_MEM,
TEMP_VR_GFX,
- TEMP_VR_SOC,
TEMP_VR_MEM0,
TEMP_VR_MEM1,
+ TEMP_VR_SOC,
TEMP_VR_U,
TEMP_LIQUID0,
TEMP_LIQUID1,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
index d6b13933a98f..48a3a3952ceb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
@@ -113,20 +113,21 @@
#define NUM_FEATURES 64
#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
-#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \
- (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
- (1 << FEATURE_DPM_UCLK_BIT) | \
- (1 << FEATURE_DPM_FCLK_BIT) | \
- (1 << FEATURE_DPM_SOCCLK_BIT) | \
- (1 << FEATURE_DPM_MP0CLK_BIT) | \
- (1 << FEATURE_DPM_LINK_BIT) | \
- (1 << FEATURE_DPM_DCN_BIT) | \
- (1 << FEATURE_DS_GFXCLK_BIT) | \
- (1 << FEATURE_DS_SOCCLK_BIT) | \
- (1 << FEATURE_DS_FCLK_BIT) | \
- (1 << FEATURE_DS_LCLK_BIT) | \
- (1 << FEATURE_DS_DCFCLK_BIT) | \
- (1 << FEATURE_DS_UCLK_BIT)
+#define ALLOWED_FEATURE_CTRL_SCPM ((1 << FEATURE_DPM_GFXCLK_BIT) | \
+ (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
+ (1 << FEATURE_DPM_UCLK_BIT) | \
+ (1 << FEATURE_DPM_FCLK_BIT) | \
+ (1 << FEATURE_DPM_SOCCLK_BIT) | \
+ (1 << FEATURE_DPM_MP0CLK_BIT) | \
+ (1 << FEATURE_DPM_LINK_BIT) | \
+ (1 << FEATURE_DPM_DCN_BIT) | \
+ (1 << FEATURE_DS_GFXCLK_BIT) | \
+ (1 << FEATURE_DS_SOCCLK_BIT) | \
+ (1 << FEATURE_DS_FCLK_BIT) | \
+ (1 << FEATURE_DS_LCLK_BIT) | \
+ (1 << FEATURE_DS_DCFCLK_BIT) | \
+ (1 << FEATURE_DS_UCLK_BIT) | \
+ (1ULL << FEATURE_DS_VCN_BIT))
//For use with feature control messages
typedef enum {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index e8c6febb8b64..992163e66f7b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,11 +28,11 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x35
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index cf96c3f2affe..508e392547d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -407,6 +407,9 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size);
@@ -1257,6 +1260,9 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
table_context->power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (!range)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index e87db7e02e8a..9e1967d8049e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -124,6 +124,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c7443317c747..66a4a41c3fe9 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -714,7 +714,7 @@ static int ast_primary_plane_init(struct ast_private *ast)
struct ast_plane *ast_primary_plane = &ast->primary_plane;
struct drm_plane *primary_plane = &ast_primary_plane->base;
void __iomem *vaddr = ast->vram;
- u64 offset = ast->vram_base;
+ u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
unsigned long size = ast->vram_fb_available - cursor_size;
int ret;
@@ -972,7 +972,7 @@ static int ast_cursor_plane_init(struct ast_private *ast)
return -ENOMEM;
vaddr = ast->vram + ast->vram_fb_available - size;
- offset = ast->vram_base + ast->vram_fb_available - size;
+ offset = ast->vram_fb_available - size;
ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size,
0x01, &ast_cursor_plane_funcs,
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index fd67efe37c63..056ab9d5f313 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -233,21 +233,17 @@ void drm_client_dev_restore(struct drm_device *dev)
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
- struct drm_device *dev = buffer->client->dev;
-
if (buffer->gem) {
drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
drm_gem_object_put(buffer->gem);
}
- if (buffer->handle)
- drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
-
kfree(buffer);
}
static struct drm_client_buffer *
-drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
+ u32 format, u32 *handle)
{
const struct drm_format_info *info = drm_format_info(format);
struct drm_mode_create_dumb dumb_args = { };
@@ -269,16 +265,15 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
if (ret)
goto err_delete;
- buffer->handle = dumb_args.handle;
- buffer->pitch = dumb_args.pitch;
-
obj = drm_gem_object_lookup(client->file, dumb_args.handle);
if (!obj) {
ret = -ENOENT;
goto err_delete;
}
+ buffer->pitch = dumb_args.pitch;
buffer->gem = obj;
+ *handle = dumb_args.handle;
return buffer;
@@ -365,7 +360,8 @@ static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
}
static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
- u32 width, u32 height, u32 format)
+ u32 width, u32 height, u32 format,
+ u32 handle)
{
struct drm_client_dev *client = buffer->client;
struct drm_mode_fb_cmd fb_req = { };
@@ -377,7 +373,7 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
fb_req.depth = info->depth;
fb_req.width = width;
fb_req.height = height;
- fb_req.handle = buffer->handle;
+ fb_req.handle = handle;
fb_req.pitch = buffer->pitch;
ret = drm_mode_addfb(client->dev, &fb_req, client->file);
@@ -414,13 +410,24 @@ struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
{
struct drm_client_buffer *buffer;
+ u32 handle;
int ret;
- buffer = drm_client_buffer_create(client, width, height, format);
+ buffer = drm_client_buffer_create(client, width, height, format,
+ &handle);
if (IS_ERR(buffer))
return buffer;
- ret = drm_client_buffer_addfb(buffer, width, height, format);
+ ret = drm_client_buffer_addfb(buffer, width, height, format, handle);
+
+ /*
+ * The handle is only needed for creating the framebuffer, destroy it
+ * again to solve a circular dependency should anybody export the GEM
+ * object as DMA-buf. The framebuffer and our buffer structure are still
+ * holding references to the GEM object to prevent its destruction.
+ */
+ drm_mode_destroy_dumb(client->dev, handle, client->file);
+
if (ret) {
drm_client_buffer_delete(buffer);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 572a4e3769f3..a491e6c38875 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -2466,6 +2466,22 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
dvo_port);
}
+static enum port
+dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
+{
+ switch (dvo_port) {
+ case DVO_PORT_MIPIA:
+ return PORT_A;
+ case DVO_PORT_MIPIC:
+ if (DISPLAY_VER(i915) >= 11)
+ return PORT_B;
+ else
+ return PORT_C;
+ default:
+ return PORT_NONE;
+ }
+}
+
static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
{
switch (vbt_max_link_rate) {
@@ -3414,19 +3430,16 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
dvo_port = child->dvo_port;
- if (dvo_port == DVO_PORT_MIPIA ||
- (dvo_port == DVO_PORT_MIPIB && DISPLAY_VER(i915) >= 11) ||
- (dvo_port == DVO_PORT_MIPIC && DISPLAY_VER(i915) < 11)) {
- if (port)
- *port = dvo_port - DVO_PORT_MIPIA;
- return true;
- } else if (dvo_port == DVO_PORT_MIPIB ||
- dvo_port == DVO_PORT_MIPIC ||
- dvo_port == DVO_PORT_MIPID) {
+ if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
drm_dbg_kms(&i915->drm,
"VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
+ continue;
}
+
+ if (port)
+ *port = dsi_dvo_port_to_port(i915, dvo_port);
+ return true;
}
return false;
@@ -3511,7 +3524,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
+ if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) {
if (!devdata->dsc)
return false;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 5575d7abdc09..f76c06b7f1d4 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -328,8 +328,20 @@ out_unlock:
return ret;
}
+static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty)
+ return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+
+ return 0;
+}
+
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_probe = intelfb_create,
+ .fb_dirty = intelfb_dirty,
};
static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index e0766d1be966..11554645e6ee 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -1587,7 +1587,8 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
skl_check_wm_level(&wm->wm[level], ddb);
if (icl_need_wm1_wa(i915, plane_id) &&
- level == 1 && wm->wm[0].enable) {
+ level == 1 && !wm->wm[level].enable &&
+ wm->wm[0].enable) {
wm->wm[level].blocks = wm->wm[0].blocks;
wm->wm[level].lines = wm->wm[0].lines;
wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index f266b68cf012..0f2e056c02dd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -3483,6 +3483,13 @@ err_request:
eb.composite_fence :
&eb.requests[0]->fence);
+ if (unlikely(eb.gem_context->syncobj)) {
+ drm_syncobj_replace_fence(eb.gem_context->syncobj,
+ eb.composite_fence ?
+ eb.composite_fence :
+ &eb.requests[0]->fence);
+ }
+
if (out_fence) {
if (err == 0) {
fd_install(out_fence_fd, out_fence->file);
@@ -3494,13 +3501,6 @@ err_request:
}
}
- if (unlikely(eb.gem_context->syncobj)) {
- drm_syncobj_replace_fence(eb.gem_context->syncobj,
- eb.composite_fence ?
- eb.composite_fence :
- &eb.requests[0]->fence);
- }
-
if (!out_fence && eb.composite_fence)
dma_fence_put(eb.composite_fence);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 9c759df700ca..937728840428 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -579,7 +579,7 @@ static int shmem_object_init(struct intel_memory_region *mem,
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
- i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
+ i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 949c19339015..a0740308555d 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1355,6 +1355,13 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
/* Wa_1407352427:icl,ehl */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
PSDUNIT_CLKGATE_DIS);
@@ -2540,13 +2547,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN11_ENABLE_32_PLANE_MODE);
/*
- * Wa_1408615072:icl,ehl (vsunit)
- * Wa_1407596294:icl,ehl (hsunit)
- */
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
- VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
-
- /*
* Wa_1408767742:icl[a2..forever],ehl[all]
* Wa_1605460711:icl[a0..c0]
*/
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 0108613e79d5..7258975331ca 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -711,7 +711,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
- vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
+ vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 8000,
mode->clock * 9 / 10) * 1000;
} else {
vc4_state->hvs_load = mode->clock * 1000;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 55744216392b..7546103f1499 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -97,6 +97,10 @@
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8)
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK VC4_MASK(7, 0)
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_SET_AVMUTE BIT(0)
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE BIT(4)
+
# define VC4_HD_M_SW_RST BIT(2)
# define VC4_HD_M_ENABLE BIT(0)
@@ -1306,7 +1310,6 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
VC4_HDMI_VERTB_VBP));
unsigned long flags;
unsigned char gcp;
- bool gcp_en;
u32 reg;
int idx;
@@ -1341,16 +1344,13 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
switch (vc4_state->output_bpc) {
case 12:
gcp = 6;
- gcp_en = true;
break;
case 10:
gcp = 5;
- gcp_en = true;
break;
case 8:
default:
- gcp = 4;
- gcp_en = false;
+ gcp = 0;
break;
}
@@ -1359,8 +1359,7 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
* doesn't signal in GCP.
*/
if (vc4_state->output_format == VC4_HDMI_OUTPUT_YUV422) {
- gcp = 4;
- gcp_en = false;
+ gcp = 0;
}
reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1);
@@ -1373,11 +1372,12 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
reg = HDMI_READ(HDMI_GCP_WORD_1);
reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK;
reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1);
+ reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK;
+ reg |= VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE;
HDMI_WRITE(HDMI_GCP_WORD_1, reg);
reg = HDMI_READ(HDMI_GCP_CONFIG);
- reg &= ~VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
- reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0;
+ reg |= VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
HDMI_WRITE(HDMI_GCP_CONFIG, reg);
reg = HDMI_READ(HDMI_MISC_CONTROL);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 8b92a45a3c89..bd5acc4a8687 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -340,7 +340,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *bo;
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
@@ -359,8 +359,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
if (ret)
return ret;
- for (i = 0; i < num_planes; i++)
+ for (i = 0; i < num_planes; i++) {
+ bo = drm_fb_dma_get_gem_obj(fb, i);
vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
+ }
/*
* We don't support subpixel source positioning for scaling,
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 9f4a90493aea..da45215a933d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -126,7 +126,6 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
void __user *user_bo_handles = NULL;
struct virtio_gpu_object_array *buflist = NULL;
struct sync_file *sync_file;
- int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1;
void *buf;
uint64_t fence_ctx;
@@ -152,13 +151,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
ring_idx = exbuf->ring_idx;
}
- exbuf->fence_fd = -1;
-
virtio_gpu_create_context(dev, file);
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
struct dma_fence *in_fence;
- in_fence = sync_file_get_fence(in_fence_fd);
+ in_fence = sync_file_get_fence(exbuf->fence_fd);
if (!in_fence)
return -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index aa1cd5126a32..4dcf2eb7aa80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -462,6 +462,9 @@ int vmw_bo_create(struct vmw_private *vmw,
return -ENOMEM;
}
+ /*
+ * vmw_bo_init will delete the *p_bo object if it fails
+ */
ret = vmw_bo_init(vmw, *p_bo, size,
placement, interruptible, pin,
bo_free);
@@ -470,7 +473,6 @@ int vmw_bo_create(struct vmw_private *vmw,
return ret;
out_error:
- kfree(*p_bo);
*p_bo = NULL;
return ret;
}
@@ -596,6 +598,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
ttm_bo_put(&vmw_bo->base);
}
+ drm_gem_object_put(&vmw_bo->base.base);
return ret;
}
@@ -636,6 +639,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
+ drm_gem_object_put(&vbo->base.base);
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
@@ -693,7 +697,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
* struct vmw_buffer_object should be placed.
* Return: Zero on success, Negative error code on error.
*
- * The vmw buffer object pointer will be refcounted.
+ * The vmw buffer object pointer will be refcounted (both ttm and gem)
*/
int vmw_user_bo_lookup(struct drm_file *filp,
uint32_t handle,
@@ -710,7 +714,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
*out = gem_to_vmw_bo(gobj);
ttm_bo_get(&(*out)->base);
- drm_gem_object_put(gobj);
return 0;
}
@@ -791,7 +794,8 @@ int vmw_dumb_create(struct drm_file *file_priv,
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
args->size, &args->handle,
&vbo);
-
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&vbo->base.base);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index a44d53e33cdb..c0686283ffd1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1160,6 +1160,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
ttm_bo_put(&vmw_bo->base);
+ drm_gem_object_put(&vmw_bo->base.base);
if (unlikely(ret != 0))
return ret;
@@ -1214,6 +1215,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
}
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
ttm_bo_put(&vmw_bo->base);
+ drm_gem_object_put(&vmw_bo->base.base);
if (unlikely(ret != 0))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index ce609e7d758f..4d2c28e39f4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -146,14 +146,12 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
&vmw_sys_placement :
&vmw_vram_sys_placement,
true, false, &vmw_gem_destroy, p_vbo);
-
- (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
if (ret != 0)
goto out_no_bo;
+ (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
+
ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
- /* drop reference from allocate - handle holds it now */
- drm_gem_object_put(&(*p_vbo)->base.base);
out_no_bo:
return ret;
}
@@ -180,6 +178,8 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&vbo->base.base);
out_no_bo:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 257f090071f1..445d619e1fdc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1815,8 +1815,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
- if (bo)
+ if (bo) {
vmw_bo_unreference(&bo);
+ drm_gem_object_put(&bo->base.base);
+ }
if (surface)
vmw_surface_unreference(&surface);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index e9f5c89b4ca6..b5b311f2a91a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -458,6 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_bo_unreference(&buf);
+ drm_gem_object_put(&buf->base.base);
out_unlock:
mutex_unlock(&overlay->mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 108a496b5d18..51e83dfa1cac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -807,6 +807,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
num_output_sig, tfile, shader_handle);
out_bad_arg:
vmw_bo_unreference(&buffer);
+ drm_gem_object_put(&buffer->base.base);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3bc63ae768f3..dcfb003841b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -683,7 +683,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
- if (base->shareable && res && res->backup)
+ if (res && res->backup)
drm_gem_object_put(&res->backup->base.base);
*p_base = NULL;
@@ -864,7 +864,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
vmw_bo_reference(res->backup);
- drm_gem_object_get(&res->backup->base.base);
+ /*
+ * We don't expose the handle to the userspace and surface
+ * already holds a gem reference
+ */
+ drm_gem_handle_delete(file_priv, backup_handle);
}
tmp = vmw_resource_reference(&srf->res);
@@ -1568,8 +1572,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle;
- if (user_srf->prime.base.shareable)
- drm_gem_object_get(&res->backup->base.base);
} else {
rep->buffer_map_handle = 0;
rep->buffer_size = 0;
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index 43b26bc12288..39357dc2d229 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -26,8 +26,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
if (umem_dmabuf->sgt)
goto wait_fence;
- sgt = dma_buf_map_attachment_unlocked(umem_dmabuf->attach,
- DMA_BIDIRECTIONAL);
+ sgt = dma_buf_map_attachment(umem_dmabuf->attach,
+ DMA_BIDIRECTIONAL);
if (IS_ERR(sgt))
return PTR_ERR(sgt);
@@ -103,8 +103,8 @@ void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
umem_dmabuf->last_sg_trim = 0;
}
- dma_buf_unmap_attachment_unlocked(umem_dmabuf->attach, umem_dmabuf->sgt,
- DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
+ DMA_BIDIRECTIONAL);
umem_dmabuf->sgt = NULL;
}
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index f5f9269fdc16..7c5d487ec916 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1318,12 +1318,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
- return -EFAULT;
+ ret = -EFAULT;
addr = arg + offsetof(struct hfi1_tid_info, length);
- if (copy_to_user((void __user *)addr, &tinfo.length,
+ if (!ret && copy_to_user((void __user *)addr, &tinfo.length,
sizeof(tinfo.length)))
ret = -EFAULT;
+
+ if (ret)
+ hfi1_user_exp_rcv_invalid(fd, &tinfo);
}
return ret;
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index b02f2f0809c8..350884d5f089 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -160,16 +160,11 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
{
int pinned;
- unsigned int npages;
+ unsigned int npages = tidbuf->npages;
unsigned long vaddr = tidbuf->vaddr;
struct page **pages = NULL;
struct hfi1_devdata *dd = fd->uctxt->dd;
- /* Get the number of pages the user buffer spans */
- npages = num_user_pages(vaddr, tidbuf->length);
- if (!npages)
- return -EINVAL;
-
if (npages > fd->uctxt->expected_count) {
dd_dev_err(dd, "Expected buffer too big\n");
return -EINVAL;
@@ -196,7 +191,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
return pinned;
}
tidbuf->pages = pages;
- tidbuf->npages = npages;
fd->tid_n_pinned += pinned;
return pinned;
}
@@ -274,6 +268,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
mutex_init(&tidbuf->cover_mutex);
tidbuf->vaddr = tinfo->vaddr;
tidbuf->length = tinfo->length;
+ tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length);
tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
GFP_KERNEL);
if (!tidbuf->psets) {
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 7b086fe63a24..195aa9ea18b6 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -1722,6 +1722,9 @@ static int irdma_add_mqh_4(struct irdma_device *iwdev,
continue;
idev = in_dev_get(ip_dev);
+ if (!idev)
+ continue;
+
in_dev_for_each_ifa_rtnl(ifa, idev) {
ibdev_dbg(&iwdev->ibdev,
"CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index ea15ec77e321..54b61930a7fd 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -289,7 +289,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
/* IB ports start with 1, MANA Ethernet ports start with 0 */
port = ucmd.port;
- if (ucmd.port > mc->num_ports)
+ if (port < 1 || port > mc->num_ports)
return -EINVAL;
if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index c301b3be9f30..a2857accc427 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -276,8 +276,8 @@ iter_chunk:
size = pa_end - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
va_start, &pa_start, size, flags);
- err = iommu_map(pd->domain, va_start, pa_start,
- size, flags);
+ err = iommu_map_atomic(pd->domain, va_start,
+ pa_start, size, flags);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
@@ -293,8 +293,8 @@ iter_chunk:
size = pa - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
va_start, &pa_start, size, flags);
- err = iommu_map(pd->domain, va_start, pa_start,
- size, flags);
+ err = iommu_map_atomic(pd->domain, va_start,
+ pa_start, size, flags);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ac25fc80fb33..f10d4bcf87d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2200,6 +2200,14 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
rn->hca = hca;
+
+ rc = netif_set_real_num_tx_queues(dev, 1);
+ if (rc)
+ goto out;
+
+ rc = netif_set_real_num_rx_queues(dev, 1);
+ if (rc)
+ goto out;
}
priv->rn_ops = dev->netdev_ops;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index c76ba29da1e2..5adba0f754b6 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -312,9 +312,8 @@ void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
if (srv_path->kobj.state_in_sysfs) {
sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
- kobject_del(&srv_path->kobj);
kobject_put(&srv_path->kobj);
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
- rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 02bd3cf9a260..6e4f36aaf5db 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -240,12 +240,12 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
- ci->pkg == BCMA_PKG_ID_BCM47186) {
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
- if (ci->pkg == BCMA_PKG_ID_BCM5358)
+ if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358)
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
break;
case BCMA_CHIP_ID_BCM53573:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ab5dfb3a4081..5d4b1f2ebeac 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -9273,10 +9273,14 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
return rc;
}
- if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
+ if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
+ bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ if (bp->tx_nr_rings_xdp)
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
+ else
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
return -ENOMEM;
}
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 11711886c3be..467001db5070 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2921,7 +2921,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
struct i40e_pf *pf = vsi->back;
if (i40e_enabled_xdp_vsi(vsi)) {
- int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
if (frame_size > i40e_max_xdp_frame_size(vsi))
return -EINVAL;
@@ -13167,6 +13167,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
}
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 63acb7b96053..05f216af8c81 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -927,7 +927,7 @@ static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched
{
int status;
- if (node->tx_priority >= 8) {
+ if (priority >= 8) {
NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8");
return -EINVAL;
}
@@ -957,7 +957,7 @@ static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_n
{
int status;
- if (node->tx_weight > 200 || node->tx_weight < 1) {
+ if (weight > 200 || weight < 1) {
NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 71e7fa0a0bde..567694bf098b 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -275,6 +275,8 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (status && status != -EEXIST)
return status;
+ netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
+ vsi->vsi_num, promisc_m);
return 0;
}
@@ -300,6 +302,8 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
promisc_m, 0);
}
+ netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
+ vsi->vsi_num, promisc_m);
return status;
}
@@ -414,6 +418,16 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
err = 0;
vlan_ops->dis_rx_filtering(vsi);
+
+ /* promiscuous mode implies allmulticast so
+ * that VSIs that are in promiscuous mode are
+ * subscribed to multicast packets coming to
+ * the port
+ */
+ err = ice_set_promisc(vsi,
+ ICE_MCAST_PROMISC_BITS);
+ if (err)
+ goto out_promisc;
}
} else {
/* Clear Rx filter to remove traffic from wire */
@@ -430,6 +444,18 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
NETIF_F_HW_VLAN_CTAG_FILTER)
vlan_ops->ena_rx_filtering(vsi);
}
+
+ /* disable allmulti here, but only if allmulti is not
+ * still enabled for the netdev
+ */
+ if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
+ err = ice_clear_promisc(vsi,
+ ICE_MCAST_PROMISC_BITS);
+ if (err) {
+ netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
+ err, vsi->vsi_num);
+ }
+ }
}
}
goto exit;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index a25a68c69f22..b2d96ae5668c 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -598,7 +598,22 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
}
/**
- * ice_clean_xdp_irq_zc - AF_XDP ZC specific Tx cleaning routine
+ * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
+ * @xdp_ring: XDP Tx ring
+ * @tx_buf: Tx buffer to clean
+ */
+static void
+ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+{
+ page_frag_free(tx_buf->raw_buf);
+ xdp_ring->xdp_tx_active--;
+ dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+}
+
+/**
+ * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
* @xdp_ring: XDP Tx ring
*/
static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
@@ -607,44 +622,47 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
struct ice_tx_desc *tx_desc;
u16 cnt = xdp_ring->count;
struct ice_tx_buf *tx_buf;
+ u16 completed_frames = 0;
u16 xsk_frames = 0;
u16 last_rs;
int i;
last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
- if (tx_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
+ if ((tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
if (last_rs >= ntc)
- xsk_frames = last_rs - ntc + 1;
+ completed_frames = last_rs - ntc + 1;
else
- xsk_frames = last_rs + cnt - ntc + 1;
+ completed_frames = last_rs + cnt - ntc + 1;
}
- if (!xsk_frames)
+ if (!completed_frames)
return;
- if (likely(!xdp_ring->xdp_tx_active))
+ if (likely(!xdp_ring->xdp_tx_active)) {
+ xsk_frames = completed_frames;
goto skip;
+ }
ntc = xdp_ring->next_to_clean;
- for (i = 0; i < xsk_frames; i++) {
+ for (i = 0; i < completed_frames; i++) {
tx_buf = &xdp_ring->tx_buf[ntc];
- if (tx_buf->xdp) {
- xsk_buff_free(tx_buf->xdp);
- xdp_ring->xdp_tx_active--;
+ if (tx_buf->raw_buf) {
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ tx_buf->raw_buf = NULL;
} else {
xsk_frames++;
}
ntc++;
- if (ntc == cnt)
+ if (ntc >= xdp_ring->count)
ntc = 0;
}
skip:
tx_desc->cmd_type_offset_bsz = 0;
- xdp_ring->next_to_clean += xsk_frames;
+ xdp_ring->next_to_clean += completed_frames;
if (xdp_ring->next_to_clean >= cnt)
xdp_ring->next_to_clean -= cnt;
if (xsk_frames)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 087c950fea0b..03bc1e8af575 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2256,6 +2256,30 @@ static void igb_enable_mas(struct igb_adapter *adapter)
}
}
+#ifdef CONFIG_IGB_HWMON
+/**
+ * igb_set_i2c_bb - Init I2C interface
+ * @hw: pointer to hardware structure
+ **/
+static void igb_set_i2c_bb(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ s32 i2cctl;
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+ wrfl();
+
+ i2cctl = rd32(E1000_I2CPARAMS);
+ i2cctl |= E1000_I2CBB_EN
+ | E1000_I2C_CLK_OE_N
+ | E1000_I2C_DATA_OE_N;
+ wr32(E1000_I2CPARAMS, i2cctl);
+ wrfl();
+}
+#endif
+
void igb_reset(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -2400,7 +2424,8 @@ void igb_reset(struct igb_adapter *adapter)
* interface.
*/
if (adapter->ets)
- mac->ops.init_thermal_sensor_thresh(hw);
+ igb_set_i2c_bb(hw);
+ mac->ops.init_thermal_sensor_thresh(hw);
}
}
#endif
@@ -3141,21 +3166,12 @@ static void igb_init_mas(struct igb_adapter *adapter)
**/
static s32 igb_init_i2c(struct igb_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
s32 status = 0;
- s32 i2cctl;
/* I2C interface supported on i350 devices */
if (adapter->hw.mac.type != e1000_i350)
return 0;
- i2cctl = rd32(E1000_I2CPARAMS);
- i2cctl |= E1000_I2CBB_EN
- | E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N
- | E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
- wr32(E1000_I2CPARAMS, i2cctl);
- wrfl();
-
/* Initialize the i2c bus which is controlled by the registers.
* This bus will use the i2c_algo_bit structure that implements
* the protocol through toggling of the 4 bits in the register.
@@ -3544,6 +3560,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->ets = true;
else
adapter->ets = false;
+ /* Only enable I2C bit banging if an external thermal
+ * sensor is supported.
+ */
+ if (adapter->ets)
+ igb_set_i2c_bb(hw);
+ hw->mac.ops.init_thermal_sensor_thresh(hw);
if (igb_sysfs_init(adapter))
dev_err(&pdev->dev,
"failed to allocate sysfs resources\n");
@@ -6814,7 +6836,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
struct timespec64 ts;
u32 tsauxc;
- if (pin < 0 || pin >= IGB_N_PEROUT)
+ if (pin < 0 || pin >= IGB_N_SDP)
return;
spin_lock(&adapter->tmreg_lock);
@@ -6822,7 +6844,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
if (hw->mac.type == e1000_82580 ||
hw->mac.type == e1000_i354 ||
hw->mac.type == e1000_i350) {
- s64 ns = timespec64_to_ns(&adapter->perout[pin].period);
+ s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period);
u32 systiml, systimh, level_mask, level, rem;
u64 systim, now;
@@ -6870,8 +6892,8 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
ts.tv_nsec = (u32)systim;
ts.tv_sec = ((u32)(systim >> 32)) & 0xFF;
} else {
- ts = timespec64_add(adapter->perout[pin].start,
- adapter->perout[pin].period);
+ ts = timespec64_add(adapter->perout[tsintr_tt].start,
+ adapter->perout[tsintr_tt].period);
}
/* u32 conversion of tv_sec is safe until y2106 */
@@ -6880,7 +6902,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
tsauxc = rd32(E1000_TSAUXC);
tsauxc |= TSAUXC_EN_TT0;
wr32(E1000_TSAUXC, tsauxc);
- adapter->perout[pin].start = ts;
+ adapter->perout[tsintr_tt].start = ts;
spin_unlock(&adapter->tmreg_lock);
}
@@ -6894,7 +6916,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
struct ptp_clock_event event;
struct timespec64 ts;
- if (pin < 0 || pin >= IGB_N_EXTTS)
+ if (pin < 0 || pin >= IGB_N_SDP)
return;
if (hw->mac.type == e1000_82580 ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index bc68b8f2176d..8736ca4b2628 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -73,6 +73,8 @@
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
+#define IXGBE_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
/* Attempt to maximize the headroom available for incoming frames. We
* use a 2K buffer for receives and need 1536/1534 to store the data for
* the frame. This leaves us with 512 bytes of room. From that we need
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a3aaf051f9f1..773c35fecace 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6778,6 +6778,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @adapter: device handle, pointer to adapter
+ */
+static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
+{
+ if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+ return IXGBE_RXBUFFER_2K;
+ else
+ return IXGBE_RXBUFFER_3K;
+}
+
+/**
* ixgbe_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -6788,18 +6800,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (adapter->xdp_prog) {
- int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
- int i;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
+ if (ixgbe_enabled_xdp_adapter(adapter)) {
+ int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
- if (new_frame_size > ixgbe_rx_bufsz(ring)) {
- e_warn(probe, "Requested MTU size is not supported with XDP\n");
- return -EINVAL;
- }
+ if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) {
+ e_warn(probe, "Requested MTU size is not supported with XDP\n");
+ return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index b8bc89fc2540..c0dcce8ae437 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -130,26 +130,31 @@ struct nfp_ipsec_cfg_mssg {
};
};
-static int nfp_ipsec_cfg_cmd_issue(struct nfp_net *nn, int type, int saidx,
- struct nfp_ipsec_cfg_mssg *msg)
+static int nfp_net_ipsec_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
{
+ unsigned int offset = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ struct nfp_ipsec_cfg_mssg *msg = (struct nfp_ipsec_cfg_mssg *)entry->msg;
int i, msg_size, ret;
- msg->cmd = type;
- msg->sa_idx = saidx;
- msg->rsp = 0;
- msg_size = ARRAY_SIZE(msg->raw);
+ ret = nfp_net_mbox_lock(nn, sizeof(*msg));
+ if (ret)
+ return ret;
+ msg_size = ARRAY_SIZE(msg->raw);
for (i = 0; i < msg_size; i++)
- nn_writel(nn, NFP_NET_CFG_MBOX_VAL + 4 * i, msg->raw[i]);
+ nn_writel(nn, offset + 4 * i, msg->raw[i]);
- ret = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_IPSEC);
- if (ret < 0)
+ ret = nfp_net_mbox_reconfig(nn, entry->cmd);
+ if (ret < 0) {
+ nn_ctrl_bar_unlock(nn);
return ret;
+ }
/* For now we always read the whole message response back */
for (i = 0; i < msg_size; i++)
- msg->raw[i] = nn_readl(nn, NFP_NET_CFG_MBOX_VAL + 4 * i);
+ msg->raw[i] = nn_readl(nn, offset + 4 * i);
+
+ nn_ctrl_bar_unlock(nn);
switch (msg->rsp) {
case NFP_IPSEC_CFG_MSSG_OK:
@@ -487,7 +492,10 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
}
/* Allocate saidx and commit the SA */
- err = nfp_ipsec_cfg_cmd_issue(nn, NFP_IPSEC_CFG_MSSG_ADD_SA, saidx, &msg);
+ msg.cmd = NFP_IPSEC_CFG_MSSG_ADD_SA;
+ msg.sa_idx = saidx;
+ err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
+ sizeof(msg), nfp_net_ipsec_cfg);
if (err) {
xa_erase(&nn->xa_ipsec, saidx);
NL_SET_ERR_MSG_MOD(extack, "Failed to issue IPsec command");
@@ -501,14 +509,17 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
static void nfp_net_xfrm_del_state(struct xfrm_state *x)
{
+ struct nfp_ipsec_cfg_mssg msg = {
+ .cmd = NFP_IPSEC_CFG_MSSG_INV_SA,
+ .sa_idx = x->xso.offload_handle - 1,
+ };
struct net_device *netdev = x->xso.dev;
- struct nfp_ipsec_cfg_mssg msg;
struct nfp_net *nn;
int err;
nn = netdev_priv(netdev);
- err = nfp_ipsec_cfg_cmd_issue(nn, NFP_IPSEC_CFG_MSSG_INV_SA,
- x->xso.offload_handle - 1, &msg);
+ err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
+ sizeof(msg), nfp_net_ipsec_cfg);
if (err)
nn_warn(nn, "Failed to invalidate SA in hardware\n");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 432d79d691c2..939cfce15830 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -617,9 +617,10 @@ struct nfp_net_dp {
* @vnic_no_name: For non-port PF vNIC make ndo_get_phys_port_name return
* -EOPNOTSUPP to keep backwards compatibility (set by app)
* @port: Pointer to nfp_port structure if vNIC is a port
- * @mc_lock: Protect mc_addrs list
- * @mc_addrs: List of mc addrs to add/del to HW
- * @mc_work: Work to update mc addrs
+ * @mbox_amsg: Asynchronously processed message via mailbox
+ * @mbox_amsg.lock: Protect message list
+ * @mbox_amsg.list: List of message to process
+ * @mbox_amsg.work: Work to process message asynchronously
* @app_priv: APP private data for this vNIC
*/
struct nfp_net {
@@ -721,13 +722,25 @@ struct nfp_net {
struct nfp_port *port;
- spinlock_t mc_lock;
- struct list_head mc_addrs;
- struct work_struct mc_work;
+ struct {
+ spinlock_t lock;
+ struct list_head list;
+ struct work_struct work;
+ } mbox_amsg;
void *app_priv;
};
+struct nfp_mbox_amsg_entry {
+ struct list_head list;
+ int (*cfg)(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry);
+ u32 cmd;
+ char msg[];
+};
+
+int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
+ int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *));
+
/* Functions to read/write from/to a BAR
* Performs any endian conversion necessary.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index e4825d885560..81b7ca0ad222 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1334,14 +1334,54 @@ err_unlock:
return err;
}
-struct nfp_mc_addr_entry {
- u8 addr[ETH_ALEN];
- u32 cmd;
- struct list_head list;
-};
+int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
+ int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *))
+{
+ struct nfp_mbox_amsg_entry *entry;
+
+ entry = kmalloc(sizeof(*entry) + len, GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(entry->msg, data, len);
+ entry->cmd = cmd;
+ entry->cfg = cb;
+
+ spin_lock_bh(&nn->mbox_amsg.lock);
+ list_add_tail(&entry->list, &nn->mbox_amsg.list);
+ spin_unlock_bh(&nn->mbox_amsg.lock);
+
+ schedule_work(&nn->mbox_amsg.work);
+
+ return 0;
+}
+
+static void nfp_net_mbox_amsg_work(struct work_struct *work)
+{
+ struct nfp_net *nn = container_of(work, struct nfp_net, mbox_amsg.work);
+ struct nfp_mbox_amsg_entry *entry, *tmp;
+ struct list_head tmp_list;
+
+ INIT_LIST_HEAD(&tmp_list);
+
+ spin_lock_bh(&nn->mbox_amsg.lock);
+ list_splice_init(&nn->mbox_amsg.list, &tmp_list);
+ spin_unlock_bh(&nn->mbox_amsg.lock);
+
+ list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
+ int err = entry->cfg(nn, entry);
+
+ if (err)
+ nn_err(nn, "Config cmd %d to HW failed %d.\n", entry->cmd, err);
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
-static int nfp_net_mc_cfg(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
+static int nfp_net_mc_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
{
+ unsigned char *addr = entry->msg;
int ret;
ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
@@ -1353,26 +1393,7 @@ static int nfp_net_mc_cfg(struct nfp_net *nn, const unsigned char *addr, const u
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_LO,
get_unaligned_be16(addr + 4));
- return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
-}
-
-static int nfp_net_mc_prep(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
-{
- struct nfp_mc_addr_entry *entry;
-
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
- return -ENOMEM;
-
- ether_addr_copy(entry->addr, addr);
- entry->cmd = cmd;
- spin_lock_bh(&nn->mc_lock);
- list_add_tail(&entry->list, &nn->mc_addrs);
- spin_unlock_bh(&nn->mc_lock);
-
- schedule_work(&nn->mc_work);
-
- return 0;
+ return nfp_net_mbox_reconfig_and_unlock(nn, entry->cmd);
}
static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
@@ -1385,35 +1406,16 @@ static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
return -EINVAL;
}
- return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD);
+ return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD, addr,
+ NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
}
static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
{
struct nfp_net *nn = netdev_priv(netdev);
- return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL);
-}
-
-static void nfp_net_mc_addr_config(struct work_struct *work)
-{
- struct nfp_net *nn = container_of(work, struct nfp_net, mc_work);
- struct nfp_mc_addr_entry *entry, *tmp;
- struct list_head tmp_list;
-
- INIT_LIST_HEAD(&tmp_list);
-
- spin_lock_bh(&nn->mc_lock);
- list_splice_init(&nn->mc_addrs, &tmp_list);
- spin_unlock_bh(&nn->mc_lock);
-
- list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
- if (nfp_net_mc_cfg(nn, entry->addr, entry->cmd))
- nn_err(nn, "Config mc address to HW failed.\n");
-
- list_del(&entry->list);
- kfree(entry);
- }
+ return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL, addr,
+ NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
}
static void nfp_net_set_rx_mode(struct net_device *netdev)
@@ -2686,9 +2688,9 @@ int nfp_net_init(struct nfp_net *nn)
if (!nn->dp.netdev)
return 0;
- spin_lock_init(&nn->mc_lock);
- INIT_LIST_HEAD(&nn->mc_addrs);
- INIT_WORK(&nn->mc_work, nfp_net_mc_addr_config);
+ spin_lock_init(&nn->mbox_amsg.lock);
+ INIT_LIST_HEAD(&nn->mbox_amsg.list);
+ INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
return register_netdev(nn->dp.netdev);
@@ -2709,6 +2711,6 @@ void nfp_net_clean(struct nfp_net *nn)
unregister_netdev(nn->dp.netdev);
nfp_net_ipsec_clean(nn);
nfp_ccm_mbox_clean(nn);
- flush_work(&nn->mc_work);
+ flush_work(&nn->mbox_amsg.work);
nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index a4096050c9bd..669b9dccb6a9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -403,7 +403,6 @@
*/
#define NFP_NET_CFG_MBOX_BASE 0x1800
#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8
-#define NFP_NET_CFG_MBOX_VAL 0x1808
#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 413f66017219..e95d35f1e5a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -541,9 +541,9 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
return 0;
}
- val |= PPSCMDx(index, 0x2);
val |= TRGTMODSELx(index, 0x2);
val |= PPSEN0;
+ writel(val, ioaddr + MAC_PPS_CONTROL);
writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
@@ -568,6 +568,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
/* Finally, activate it */
+ val |= PPSCMDx(index, 0x2);
writel(val, ioaddr + MAC_PPS_CONTROL);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5429531ae7c7..067a40fe0a23 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -559,7 +559,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
- if (plat->force_thresh_dma_mode) {
+ if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) {
plat->force_sf_dma_mode = 0;
dev_warn(&pdev->dev,
"force_sf_dma_mode is ignored if force_thresh_dma_mode is set.\n");
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index b23027874cc4..4e3861c47708 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -501,7 +501,15 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
}
+ reinit_completion(&common->tdown_complete);
k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
+
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
+ i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
+ if (!i)
+ dev_err(common->dev, "rx teardown timeout\n");
+ }
+
napi_disable(&common->napi_rx);
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
@@ -721,6 +729,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
if (cppi5_desc_is_tdcm(desc_dma)) {
dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ)
+ complete(&common->tdown_complete);
return 0;
}
@@ -2736,7 +2746,7 @@ static const struct am65_cpsw_pdata j721e_pdata = {
};
static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
- .quirks = 0,
+ .quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ,
.ale_dev_id = "am64-cpswxg",
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index ed26768a6e51..cad04662739c 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -91,6 +91,7 @@ struct am65_cpsw_rx_chn {
};
#define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
+#define AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ BIT(1)
struct am65_cpsw_pdata {
u32 quirks;
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 9f2b70ef39aa..613fc6910f14 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -65,8 +65,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
if (status != 0) {
netdev_err(dev->net,
- "Error sending init packet. Status %i, length %i\n",
- status, act_len);
+ "Error sending init packet. Status %i\n",
+ status);
return status;
}
else if (act_len != init_msg_len) {
@@ -83,8 +83,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
if (status != 0)
netdev_err(dev->net,
- "Error receiving init result. Status %i, length %i\n",
- status, act_len);
+ "Error receiving init result. Status %i\n",
+ status);
else if (act_len != expected_len)
netdev_err(dev->net, "Unexpected init result length: %i\n",
act_len);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 56267c327f0b..682987040ea8 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1546,31 +1546,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
rxd->len = rbi->len;
}
-#ifdef VMXNET3_RSS
- if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
- (adapter->netdev->features & NETIF_F_RXHASH)) {
- enum pkt_hash_types hash_type;
-
- switch (rcd->rssType) {
- case VMXNET3_RCD_RSS_TYPE_IPV4:
- case VMXNET3_RCD_RSS_TYPE_IPV6:
- hash_type = PKT_HASH_TYPE_L3;
- break;
- case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
- case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
- case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
- case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
- hash_type = PKT_HASH_TYPE_L4;
- break;
- default:
- hash_type = PKT_HASH_TYPE_L3;
- break;
- }
- skb_set_hash(ctx->skb,
- le32_to_cpu(rcd->rssHash),
- hash_type);
- }
-#endif
skb_record_rx_queue(ctx->skb, rq->qid);
skb_put(ctx->skb, rcd->len);
@@ -1653,6 +1628,31 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
u32 mtu = adapter->netdev->mtu;
skb->len += skb->data_len;
+#ifdef VMXNET3_RSS
+ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
+ (adapter->netdev->features & NETIF_F_RXHASH)) {
+ enum pkt_hash_types hash_type;
+
+ switch (rcd->rssType) {
+ case VMXNET3_RCD_RSS_TYPE_IPV4:
+ case VMXNET3_RCD_RSS_TYPE_IPV6:
+ hash_type = PKT_HASH_TYPE_L3;
+ break;
+ case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
+ case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
+ case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
+ case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
+ hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ hash_type = PKT_HASH_TYPE_L3;
+ break;
+ }
+ skb_set_hash(skb,
+ le32_to_cpu(rcd->rssHash),
+ hash_type);
+ }
+#endif
vmxnet3_rx_csum(adapter, skb,
(union Vmxnet3_GenericDesc *)rcd);
skb->protocol = eth_type_trans(skb, adapter->netdev);
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 79d93126453d..77b06d54cc62 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -102,6 +102,25 @@ config NVDIMM_KEYS
depends on ENCRYPTED_KEYS
depends on (LIBNVDIMM=ENCRYPTED_KEYS) || LIBNVDIMM=m
+config NVDIMM_KMSAN
+ bool
+ depends on KMSAN
+ help
+ KMSAN, and other memory debug facilities, increase the size of
+ 'struct page' to contain extra metadata. This collides with
+ the NVDIMM capability to store a potentially
+ larger-than-"System RAM" size 'struct page' array in a
+ reservation of persistent memory rather than limited /
+ precious DRAM. However, that reservation needs to persist for
+ the life of the given NVDIMM namespace. If you are using KMSAN
+ to debug an issue unrelated to NVDIMMs or DAX then say N to this
+ option. Otherwise, say Y but understand that any namespaces
+ (with the page array stored pmem) created with this build of
+ the kernel will permanently reserve and strand excess
+ capacity compared to the CONFIG_KMSAN=n case.
+
+ Select N if unsure.
+
config NVDIMM_TEST_BUILD
tristate "Build the unit test core"
depends on m
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 85ca5b4da3cf..ec5219680092 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -652,7 +652,7 @@ void devm_namespace_disable(struct device *dev,
struct nd_namespace_common *ndns);
#if IS_ENABLED(CONFIG_ND_CLAIM)
/* max struct page size independent of kernel config */
-#define MAX_STRUCT_PAGE_SIZE 128
+#define MAX_STRUCT_PAGE_SIZE 64
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
#else
static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 61af072ac98f..af7d9301520c 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -13,6 +13,8 @@
#include "pfn.h"
#include "nd.h"
+static const bool page_struct_override = IS_ENABLED(CONFIG_NVDIMM_KMSAN);
+
static void nd_pfn_release(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
@@ -758,12 +760,6 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
return -ENXIO;
}
- /*
- * Note, we use 64 here for the standard size of struct page,
- * debugging options may cause it to be larger in which case the
- * implementation will limit the pfns advertised through
- * ->direct_access() to those that are included in the memmap.
- */
start = nsio->res.start;
size = resource_size(&nsio->res);
npfns = PHYS_PFN(size - SZ_8K);
@@ -782,20 +778,33 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
}
end_trunc = start + size - ALIGN_DOWN(start + size, align);
if (nd_pfn->mode == PFN_MODE_PMEM) {
+ unsigned long page_map_size = MAX_STRUCT_PAGE_SIZE * npfns;
+
/*
* The altmap should be padded out to the block size used
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*
- * Also make sure size of struct page is less than 128. We
- * want to make sure we use large enough size here so that
- * we don't have a dynamic reserve space depending on
- * struct page size. But we also want to make sure we notice
- * when we end up adding new elements to struct page.
+ * Also make sure size of struct page is less than
+ * MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the
+ * face of production kernel configurations that reduce the
+ * 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug
+ * kernel configurations that increase the 'struct page' size
+ * above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows
+ * for continuing with the capacity that will be wasted when
+ * reverting to a production kernel configuration. Otherwise,
+ * those configurations are blocked by default.
*/
- BUILD_BUG_ON(sizeof(struct page) > MAX_STRUCT_PAGE_SIZE);
- offset = ALIGN(start + SZ_8K + MAX_STRUCT_PAGE_SIZE * npfns, align)
- - start;
+ if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE) {
+ if (page_struct_override)
+ page_map_size = sizeof(struct page) * npfns;
+ else {
+ dev_err(&nd_pfn->dev,
+ "Memory debug options prevent using pmem for the page map\n");
+ return -EINVAL;
+ }
+ }
+ offset = ALIGN(start + SZ_8K + page_map_size, align) - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
offset = ALIGN(start + SZ_8K, align) - start;
else
@@ -818,7 +827,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
pfn_sb->version_minor = cpu_to_le16(4);
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
- pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
+ if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE && page_struct_override)
+ pfn_sb->page_struct_size = cpu_to_le16(sizeof(struct page));
+ else
+ pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index b57630d1d3b8..bdb97496ba2d 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -45,7 +45,7 @@ struct nvme_dhchap_queue_context {
int sess_key_len;
};
-struct workqueue_struct *nvme_auth_wq;
+static struct workqueue_struct *nvme_auth_wq;
#define nvme_auth_flags_from_qid(qid) \
(qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c734934c407c..6d88ddd35565 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2509,18 +2509,12 @@ static int nvme_pci_enable(struct nvme_dev *dev)
{
int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- int dma_address_bits = 64;
if (pci_enable_device_mem(pdev))
return result;
pci_set_master(pdev);
- if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
- dma_address_bits = 48;
- if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits)))
- goto disable;
-
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
result = -ENODEV;
goto disable;
@@ -2970,7 +2964,7 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
mutex_init(&dev->shutdown_lock);
@@ -2998,7 +2992,11 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
quirks);
if (ret)
goto out_put_device;
-
+
+ if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ else
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1);
dma_set_max_seg_size(&pdev->dev, 0xffffffff);
@@ -3031,8 +3029,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int result = -ENOMEM;
dev = nvme_pci_alloc_dev(pdev, id);
- if (!dev)
- return -ENOMEM;
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
result = nvme_dev_map(dev);
if (result)
@@ -3423,6 +3421,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x10ec, 0x5763), /* ADATA SX6000PNP */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 65f3b02a0e4e..f90975e00446 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -48,9 +48,10 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
err = memblock_mark_nomap(base, size);
if (err)
memblock_phys_free(base, size);
- kmemleak_ignore_phys(base);
}
+ kmemleak_ignore_phys(base);
+
return err;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index fba95486caaf..5641786bd020 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1665,7 +1665,6 @@ int pci_save_state(struct pci_dev *dev)
return i;
pci_save_ltr_state(dev);
- pci_save_aspm_l1ss_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
@@ -1772,7 +1771,6 @@ void pci_restore_state(struct pci_dev *dev)
* LTR itself (in the PCIe capability).
*/
pci_restore_ltr_state(dev);
- pci_restore_aspm_l1ss_state(dev);
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
@@ -3465,11 +3463,6 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
if (error)
pci_err(dev, "unable to allocate suspend buffer for LTR\n");
- error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS,
- 2 * sizeof(u32));
- if (error)
- pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n");
-
pci_allocate_vc_save_buffers(dev);
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9ed3b5550043..9049d07d3aae 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -566,14 +566,10 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
-void pci_save_aspm_l1ss_state(struct pci_dev *dev);
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
-static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { }
-static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 53a1fa306e1e..4b4184563a92 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -470,31 +470,6 @@ static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
pci_write_config_dword(pdev, pos, val);
}
-static void aspm_program_l1ss(struct pci_dev *dev, u32 ctl1, u32 ctl2)
-{
- u16 l1ss = dev->l1ss;
- u32 l1_2_enable;
-
- /*
- * Per PCIe r6.0, sec 5.5.4, T_POWER_ON in PCI_L1SS_CTL2 must be
- * programmed prior to setting the L1.2 enable bits in PCI_L1SS_CTL1.
- */
- pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL2, ctl2);
-
- /*
- * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD in
- * PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
- * enable bits, even though they're all in PCI_L1SS_CTL1.
- */
- l1_2_enable = ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
- ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
-
- pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1, ctl1);
- if (l1_2_enable)
- pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1,
- ctl1 | l1_2_enable);
-}
-
/* Calculate L1.2 PM substate timing parameters */
static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 parent_l1ss_cap, u32 child_l1ss_cap)
@@ -504,6 +479,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
u32 ctl1 = 0, ctl2 = 0;
u32 pctl1, pctl2, cctl1, cctl2;
+ u32 pl1_2_enables, cl1_2_enables;
if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
return;
@@ -552,21 +528,39 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
ctl2 == pctl2 && ctl2 == cctl2)
return;
- pctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
- pctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
- aspm_program_l1ss(parent, pctl1, ctl2);
-
- cctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
- cctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
- aspm_program_l1ss(child, cctl1, ctl2);
+ /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
+ pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+
+ if (pl1_2_enables || cl1_2_enables) {
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ }
+
+ /* Program T_POWER_ON times in both ports */
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
+ pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
+
+ /* Program Common_Mode_Restore_Time in upstream device */
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+
+ /* Program LTR_L1.2_THRESHOLD time in both ports */
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+
+ if (pl1_2_enables || cl1_2_enables) {
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
+ pl1_2_enables);
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
+ cl1_2_enables);
+ }
}
static void aspm_l1ss_init(struct pcie_link_state *link)
@@ -757,43 +751,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
PCI_L1SS_CTL1_L1SS_MASK, val);
}
-void pci_save_aspm_l1ss_state(struct pci_dev *dev)
-{
- struct pci_cap_saved_state *save_state;
- u16 l1ss = dev->l1ss;
- u32 *cap;
-
- if (!l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL2, cap++);
- pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL1, cap++);
-}
-
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev)
-{
- struct pci_cap_saved_state *save_state;
- u32 *cap, ctl1, ctl2;
- u16 l1ss = dev->l1ss;
-
- if (!l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- ctl2 = *cap++;
- ctl1 = *cap;
- aspm_program_l1ss(dev, ctl1, ctl2);
-}
-
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 3945612900e6..9c6ee46ac7a0 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -93,10 +93,19 @@ static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx,
static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr)
{
+ int ret;
+
pr_debug("Disabling signal %s for %s\n", expr->signal,
expr->function);
- return aspeed_sig_expr_set(ctx, expr, false);
+ ret = aspeed_sig_expr_eval(ctx, expr, true);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ return aspeed_sig_expr_set(ctx, expr, false);
+
+ return 0;
}
/**
@@ -114,7 +123,7 @@ static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx,
int ret = 0;
if (!exprs)
- return true;
+ return -EINVAL;
while (*exprs && !ret) {
ret = aspeed_sig_expr_disable(ctx, *exprs);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index cc3aaba24188..e49f271de936 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1709,6 +1709,12 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
EXPORT_SYMBOL_GPL(intel_pinctrl_get_soc_data);
#ifdef CONFIG_PM_SLEEP
+static bool __intel_gpio_is_direct_irq(u32 value)
+{
+ return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
+ (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO);
+}
+
static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin)
{
const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
@@ -1742,8 +1748,7 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int
* See https://bugzilla.kernel.org/show_bug.cgi?id=214749.
*/
value = readl(intel_get_padcfg(pctrl, pin, PADCFG0));
- if ((value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
- (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO))
+ if (__intel_gpio_is_direct_irq(value))
return true;
return false;
@@ -1873,7 +1878,12 @@ int intel_pinctrl_resume_noirq(struct device *dev)
for (i = 0; i < pctrl->soc->npins; i++) {
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
- if (!intel_pinctrl_should_save(pctrl, desc->number))
+ if (!(intel_pinctrl_should_save(pctrl, desc->number) ||
+ /*
+ * If the firmware mangled the register contents too much,
+ * check the saved value for the Direct IRQ mode.
+ */
+ __intel_gpio_is_direct_irq(pads[i].padcfg0)))
continue;
intel_restore_padcfg(pctrl, desc->number, PADCFG0, pads[i].padcfg0);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8195.c b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
index 89557c7ed2ab..09c4dcef9338 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8195.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
@@ -659,7 +659,7 @@ static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
PIN_FIELD_BASE(10, 10, 4, 0x010, 0x10, 9, 3),
PIN_FIELD_BASE(11, 11, 4, 0x000, 0x10, 24, 3),
PIN_FIELD_BASE(12, 12, 4, 0x010, 0x10, 12, 3),
- PIN_FIELD_BASE(13, 13, 4, 0x010, 0x10, 27, 3),
+ PIN_FIELD_BASE(13, 13, 4, 0x000, 0x10, 27, 3),
PIN_FIELD_BASE(14, 14, 4, 0x010, 0x10, 15, 3),
PIN_FIELD_BASE(15, 15, 4, 0x010, 0x10, 0, 3),
PIN_FIELD_BASE(16, 16, 4, 0x010, 0x10, 18, 3),
@@ -708,7 +708,7 @@ static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
PIN_FIELD_BASE(78, 78, 3, 0x000, 0x10, 15, 3),
PIN_FIELD_BASE(79, 79, 3, 0x000, 0x10, 18, 3),
PIN_FIELD_BASE(80, 80, 3, 0x000, 0x10, 21, 3),
- PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 28, 3),
+ PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 24, 3),
PIN_FIELD_BASE(82, 82, 3, 0x000, 0x10, 27, 3),
PIN_FIELD_BASE(83, 83, 3, 0x010, 0x10, 0, 3),
PIN_FIELD_BASE(84, 84, 3, 0x010, 0x10, 3, 3),
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 9bc6e3922e78..32c3edaf9038 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -365,6 +365,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
} else {
debounce_enable = " ∅";
+ time = 0;
}
snprintf(debounce_value, sizeof(debounce_value), "%u", time * unit);
seq_printf(s, "debounce %s (🕑 %sus)| ", debounce_enable, debounce_value);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 99c3745da456..190923757cda 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -372,6 +372,8 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
if (!pcs->fmask)
return 0;
function = pinmux_generic_get_function(pctldev, fselector);
+ if (!function)
+ return -EINVAL;
func = function->data;
if (!func)
return -EINVAL;
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
index c3c8c34148f1..e22d03ce292e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
@@ -105,7 +105,7 @@ static const struct pinctrl_pin_desc sm8450_lpi_pins[] = {
static const char * const swr_tx_clk_groups[] = { "gpio0" };
static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
static const char * const swr_rx_clk_groups[] = { "gpio3" };
-static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5", "gpio15" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
static const char * const dmic1_clk_groups[] = { "gpio6" };
static const char * const dmic1_data_groups[] = { "gpio7" };
static const char * const dmic2_clk_groups[] = { "gpio8" };
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index bb81b8b1f7e9..89c5374e33b3 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -408,14 +408,23 @@ static const struct intel_vsec_platform_info dg1_info = {
.quirks = VSEC_QUIRK_NO_DVSEC | VSEC_QUIRK_EARLY_HW,
};
+/* MTL info */
+static const struct intel_vsec_platform_info mtl_info = {
+ .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG,
+};
+
#define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d
#define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e
+#define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d
+#define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d
#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &(struct intel_vsec_platform_info) {}) },
{ PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index 99edddf9958b..c3bfb6c84cab 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -366,7 +366,7 @@ static void dw_spi_irq_setup(struct dw_spi *dws)
* will be adjusted at the final stage of the IRQ-based SPI transfer
* execution so not to lose the leftover of the incoming data.
*/
- level = min_t(u16, dws->fifo_len / 2, dws->tx_len);
+ level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
dw_writel(dws, DW_SPI_TXFTLR, level);
dw_writel(dws, DW_SPI_RXFTLR, level - 1);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 15f174f4e056..3f33934f5429 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2220,11 +2220,26 @@ void spi_flush_queue(struct spi_controller *ctlr)
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_OF)
+static void of_spi_parse_dt_cs_delay(struct device_node *nc,
+ struct spi_delay *delay, const char *prop)
+{
+ u32 value;
+
+ if (!of_property_read_u32(nc, prop, &value)) {
+ if (value > U16_MAX) {
+ delay->value = DIV_ROUND_UP(value, 1000);
+ delay->unit = SPI_DELAY_UNIT_USECS;
+ } else {
+ delay->value = value;
+ delay->unit = SPI_DELAY_UNIT_NSECS;
+ }
+ }
+}
+
static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
struct device_node *nc)
{
u32 value;
- u16 cs_setup;
int rc;
/* Mode (clock phase/polarity/etc.) */
@@ -2310,10 +2325,8 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
spi->max_speed_hz = value;
- if (!of_property_read_u16(nc, "spi-cs-setup-delay-ns", &cs_setup)) {
- spi->cs_setup.value = cs_setup;
- spi->cs_setup.unit = SPI_DELAY_UNIT_NSECS;
- }
+ /* Device CS delays */
+ of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
return 0;
}
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 1935ca613447..a1ea093795cf 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -90,9 +90,21 @@ MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
/*-------------------------------------------------------------------------*/
static ssize_t
+spidev_sync_unlocked(struct spi_device *spi, struct spi_message *message)
+{
+ ssize_t status;
+
+ status = spi_sync(spi, message);
+ if (status == 0)
+ status = message->actual_length;
+
+ return status;
+}
+
+static ssize_t
spidev_sync(struct spidev_data *spidev, struct spi_message *message)
{
- int status;
+ ssize_t status;
struct spi_device *spi;
mutex_lock(&spidev->spi_lock);
@@ -101,12 +113,10 @@ spidev_sync(struct spidev_data *spidev, struct spi_message *message)
if (spi == NULL)
status = -ESHUTDOWN;
else
- status = spi_sync(spi, message);
-
- if (status == 0)
- status = message->actual_length;
+ status = spidev_sync_unlocked(spi, message);
mutex_unlock(&spidev->spi_lock);
+
return status;
}
@@ -294,7 +304,7 @@ static int spidev_message(struct spidev_data *spidev,
spi_message_add_tail(k_tmp, &msg);
}
- status = spidev_sync(spidev, &msg);
+ status = spidev_sync_unlocked(spidev->spi, &msg);
if (status < 0)
goto done;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 079e183cf3bf..934b3d997702 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -526,6 +526,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */
+ { USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM },
+
/* DELL USB GEN2 */
{ USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 8f12f3f8f6ee..e06022873df1 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -798,6 +798,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
net->max_mtu = GETHER_MAX_MTU_SIZE;
dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
SET_NETDEV_DEVTYPE(net, &gadget_type);
status = register_netdev(net);
@@ -872,6 +873,8 @@ int gether_register_netdev(struct net_device *net)
struct usb_gadget *g;
int status;
+ if (!net->dev.parent)
+ return -EINVAL;
dev = netdev_priv(net);
g = dev->gadget;
@@ -902,6 +905,7 @@ void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
dev = netdev_priv(net);
dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
}
EXPORT_SYMBOL_GPL(gether_set_gadget);
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 9a6860285fbe..50b24096eb7f 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -535,10 +535,10 @@ int dp_altmode_probe(struct typec_altmode *alt)
/* FIXME: Port can only be DFP_U. */
/* Make sure we have compatiple pin configurations */
- if (!(DP_CAP_DFP_D_PIN_ASSIGN(port->vdo) &
- DP_CAP_UFP_D_PIN_ASSIGN(alt->vdo)) &&
- !(DP_CAP_UFP_D_PIN_ASSIGN(port->vdo) &
- DP_CAP_DFP_D_PIN_ASSIGN(alt->vdo)))
+ if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) &
+ DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) &&
+ !(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) &
+ DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
return -ENODEV;
ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index c730253ab85c..583cbcf09446 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -313,7 +313,7 @@ void fb_deferred_io_open(struct fb_info *info,
}
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
-void fb_deferred_io_cleanup(struct fb_info *info)
+void fb_deferred_io_release(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page;
@@ -327,6 +327,14 @@ void fb_deferred_io_cleanup(struct fb_info *info)
page = fb_deferred_io_page(info, i);
page->mapping = NULL;
}
+}
+EXPORT_SYMBOL_GPL(fb_deferred_io_release);
+
+void fb_deferred_io_cleanup(struct fb_info *info)
+{
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+
+ fb_deferred_io_release(info);
kvfree(info->pagerefs);
mutex_destroy(&fbdefio->lock);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 3a6c8458eb8d..ab3545a00abc 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1454,6 +1454,10 @@ __releases(&info->lock)
struct fb_info * const info = file->private_data;
lock_fb_info(info);
+#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
+ if (info->fbdefio)
+ fb_deferred_io_release(info);
+#endif
if (info->fbops->fb_release)
info->fbops->fb_release(info,1);
module_put(info->fbops->owner);
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index 1960916098d4..e60a276b4855 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -1197,17 +1197,17 @@ static int nvidia_set_fbinfo(struct fb_info *info)
return nvidiafb_check_var(&info->var, info);
}
-static u32 nvidia_get_chipset(struct fb_info *info)
+static u32 nvidia_get_chipset(struct pci_dev *pci_dev,
+ volatile u32 __iomem *REGS)
{
- struct nvidia_par *par = info->par;
- u32 id = (par->pci_dev->vendor << 16) | par->pci_dev->device;
+ u32 id = (pci_dev->vendor << 16) | pci_dev->device;
printk(KERN_INFO PFX "Device ID: %x \n", id);
if ((id & 0xfff0) == 0x00f0 ||
(id & 0xfff0) == 0x02e0) {
/* pci-e */
- id = NV_RD32(par->REGS, 0x1800);
+ id = NV_RD32(REGS, 0x1800);
if ((id & 0x0000ffff) == 0x000010DE)
id = 0x10DE0000 | (id >> 16);
@@ -1220,12 +1220,11 @@ static u32 nvidia_get_chipset(struct fb_info *info)
return id;
}
-static u32 nvidia_get_arch(struct fb_info *info)
+static u32 nvidia_get_arch(u32 Chipset)
{
- struct nvidia_par *par = info->par;
u32 arch = 0;
- switch (par->Chipset & 0x0ff0) {
+ switch (Chipset & 0x0ff0) {
case 0x0100: /* GeForce 256 */
case 0x0110: /* GeForce2 MX */
case 0x0150: /* GeForce2 */
@@ -1278,16 +1277,44 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
struct fb_info *info;
unsigned short cmd;
int ret;
+ volatile u32 __iomem *REGS;
+ int Chipset;
+ u32 Architecture;
NVTRACE_ENTER();
assert(pd != NULL);
+ if (pci_enable_device(pd)) {
+ printk(KERN_ERR PFX "cannot enable PCI device\n");
+ return -ENODEV;
+ }
+
+ /* enable IO and mem if not already done */
+ pci_read_config_word(pd, PCI_COMMAND, &cmd);
+ cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pd, PCI_COMMAND, cmd);
+
+ nvidiafb_fix.mmio_start = pci_resource_start(pd, 0);
+ nvidiafb_fix.mmio_len = pci_resource_len(pd, 0);
+
+ REGS = ioremap(nvidiafb_fix.mmio_start, nvidiafb_fix.mmio_len);
+ if (!REGS) {
+ printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
+ return -ENODEV;
+ }
+
+ Chipset = nvidia_get_chipset(pd, REGS);
+ Architecture = nvidia_get_arch(Chipset);
+ if (Architecture == 0) {
+ printk(KERN_ERR PFX "unknown NV_ARCH\n");
+ goto err_out;
+ }
+
ret = aperture_remove_conflicting_pci_devices(pd, "nvidiafb");
if (ret)
- return ret;
+ goto err_out;
info = framebuffer_alloc(sizeof(struct nvidia_par), &pd->dev);
-
if (!info)
goto err_out;
@@ -1298,11 +1325,6 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
if (info->pixmap.addr == NULL)
goto err_out_kfree;
- if (pci_enable_device(pd)) {
- printk(KERN_ERR PFX "cannot enable PCI device\n");
- goto err_out_enable;
- }
-
if (pci_request_regions(pd, "nvidiafb")) {
printk(KERN_ERR PFX "cannot request PCI regions\n");
goto err_out_enable;
@@ -1318,34 +1340,17 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
par->paneltweak = paneltweak;
par->reverse_i2c = reverse_i2c;
- /* enable IO and mem if not already done */
- pci_read_config_word(pd, PCI_COMMAND, &cmd);
- cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
- pci_write_config_word(pd, PCI_COMMAND, cmd);
-
- nvidiafb_fix.mmio_start = pci_resource_start(pd, 0);
nvidiafb_fix.smem_start = pci_resource_start(pd, 1);
- nvidiafb_fix.mmio_len = pci_resource_len(pd, 0);
-
- par->REGS = ioremap(nvidiafb_fix.mmio_start, nvidiafb_fix.mmio_len);
- if (!par->REGS) {
- printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
- goto err_out_free_base0;
- }
+ par->REGS = REGS;
- par->Chipset = nvidia_get_chipset(info);
- par->Architecture = nvidia_get_arch(info);
-
- if (par->Architecture == 0) {
- printk(KERN_ERR PFX "unknown NV_ARCH\n");
- goto err_out_arch;
- }
+ par->Chipset = Chipset;
+ par->Architecture = Architecture;
sprintf(nvidiafb_fix.id, "NV%x", (pd->device & 0x0ff0) >> 4);
if (NVCommonSetup(info))
- goto err_out_arch;
+ goto err_out_free_base0;
par->FbAddress = nvidiafb_fix.smem_start;
par->FbMapSize = par->RamAmountKBytes * 1024;
@@ -1401,7 +1406,6 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
goto err_out_iounmap_fb;
}
-
printk(KERN_INFO PFX
"PCI nVidia %s framebuffer (%dMB @ 0x%lX)\n",
info->fix.id,
@@ -1415,15 +1419,14 @@ err_out_iounmap_fb:
err_out_free_base1:
fb_destroy_modedb(info->monspecs.modedb);
nvidia_delete_i2c_busses(par);
-err_out_arch:
- iounmap(par->REGS);
- err_out_free_base0:
+err_out_free_base0:
pci_release_regions(pd);
err_out_enable:
kfree(info->pixmap.addr);
err_out_kfree:
framebuffer_release(info);
err_out:
+ iounmap(REGS);
return -ENODEV;
}