summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/prmt.c6
-rw-r--r--drivers/acpi/x86/s2idle.c4
-rw-r--r--drivers/base/power/domain.c10
-rw-r--r--drivers/block/virtio_blk.c39
-rw-r--r--drivers/bus/ti-sysc.c4
-rw-r--r--drivers/clk/imx/clk-imx6q.c2
-rw-r--r--drivers/clk/qcom/gdsc.c54
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c6
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c15
-rw-r--r--drivers/gpu/drm/drm_ioc32.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c21
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c27
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/driver.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c92
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c2
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c10
-rw-r--r--drivers/iommu/dma-iommu.c1
-rw-r--r--drivers/iommu/intel/pasid.c10
-rw-r--r--drivers/iommu/intel/pasid.h6
-rw-r--r--drivers/iommu/intel/svm.c3
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/ipack/carriers/tpci200.c60
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-iproc.c21
-rw-r--r--drivers/mmc/host/sdhci-msm.c18
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c2
-rw-r--r--drivers/mtd/devices/mchp48l640.c5
-rw-r--r--drivers/mtd/mtd_blkdevs.c11
-rw-r--r--drivers/mtd/mtdcore.c4
-rw-r--r--drivers/mtd/nand/raw/nand_base.c10
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c113
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c5
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4
-rw-r--r--drivers/net/hamradio/6pack.c6
-rw-r--r--drivers/net/mdio/mdio-mux.c37
-rw-r--r--drivers/net/usb/asix_common.c70
-rw-r--r--drivers/net/usb/r8152.c23
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c70
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c7
-rw-r--r--drivers/opp/core.c15
-rw-r--r--drivers/opp/of.c5
-rw-r--r--drivers/pci/controller/pci-ixp4xx.c8
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/quirks.c1
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c28
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c2
-rw-r--r--drivers/ptp/Kconfig3
-rw-r--r--drivers/slimbus/messaging.c7
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c22
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c84
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c13
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c4
-rw-r--r--drivers/vdpa/mlx5/core/mr.c9
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c14
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c4
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c4
-rw-r--r--drivers/vhost/vdpa.c3
-rw-r--r--drivers/vhost/vhost.c10
-rw-r--r--drivers/vhost/vringh.c2
-rw-r--r--drivers/virtio/virtio.c1
-rw-r--r--drivers/virtio/virtio_pci_common.c7
-rw-r--r--drivers/virtio/virtio_ring.c18
-rw-r--r--drivers/virtio/virtio_vdpa.c3
138 files changed, 970 insertions, 815 deletions
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 31cf9aee5edd..1f6007abcf18 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -292,6 +292,12 @@ void __init init_prmt(void)
int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
sizeof (struct acpi_table_prmt_header),
0, acpi_parse_prmt, 0);
+ /*
+ * Return immediately if PRMT table is not present or no PRM module found.
+ */
+ if (mc <= 0)
+ return;
+
pr_info("PRM: found %u modules\n", mc);
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index fbdbef0ab552..3a308461246a 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -452,7 +452,7 @@ int acpi_s2idle_prepare_late(void)
if (lps0_dsm_func_mask_microsoft > 0) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
@@ -479,7 +479,7 @@ void acpi_s2idle_restore_early(void)
if (lps0_dsm_func_mask_microsoft > 0) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index adb41de68e0a..5db704f02e71 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -435,7 +435,7 @@ static void genpd_restore_performance_state(struct device *dev,
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
{
struct generic_pm_domain *genpd;
- int ret;
+ int ret = 0;
genpd = dev_to_genpd_safe(dev);
if (!genpd)
@@ -446,7 +446,13 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
return -EINVAL;
genpd_lock(genpd);
- ret = genpd_set_performance_state(dev, state);
+ if (pm_runtime_suspended(dev)) {
+ dev_gpd_data(dev)->rpm_pstate = state;
+ } else {
+ ret = genpd_set_performance_state(dev, state);
+ if (!ret)
+ dev_gpd_data(dev)->rpm_pstate = 0;
+ }
genpd_unlock(genpd);
return ret;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4b49df2dfd23..afb37aac09e8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -692,6 +692,28 @@ static const struct blk_mq_ops virtio_mq_ops = {
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
+static int virtblk_validate(struct virtio_device *vdev)
+{
+ u32 blk_size;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE))
+ return 0;
+
+ blk_size = virtio_cread32(vdev,
+ offsetof(struct virtio_blk_config, blk_size));
+
+ if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)
+ __virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE);
+
+ return 0;
+}
+
static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
@@ -703,12 +725,6 @@ static int virtblk_probe(struct virtio_device *vdev)
u8 physical_block_exp, alignment_offset;
unsigned int queue_depth;
- if (!vdev->config->get) {
- dev_err(&vdev->dev, "%s failure: config access disabled\n",
- __func__);
- return -EINVAL;
- }
-
err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
GFP_KERNEL);
if (err < 0)
@@ -823,6 +839,14 @@ static int virtblk_probe(struct virtio_device *vdev)
else
blk_size = queue_logical_block_size(q);
+ if (unlikely(blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)) {
+ dev_err(&vdev->dev,
+ "block size is changed unexpectedly, now is %u\n",
+ blk_size);
+ err = -EINVAL;
+ goto err_cleanup_disk;
+ }
+
/* Use topology information if available */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, physical_block_exp,
@@ -881,6 +905,8 @@ static int virtblk_probe(struct virtio_device *vdev)
device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
return 0;
+err_cleanup_disk:
+ blk_cleanup_disk(vblk->disk);
out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set);
out_free_vq:
@@ -983,6 +1009,7 @@ static struct virtio_driver virtio_blk = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
+ .validate = virtblk_validate,
.probe = virtblk_probe,
.remove = virtblk_remove,
.config_changed = virtblk_config_changed,
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 0ef98e3ba341..148a4dd8cb9a 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -3097,8 +3097,10 @@ static int sysc_probe(struct platform_device *pdev)
return error;
error = sysc_check_active_timer(ddata);
- if (error == -EBUSY)
+ if (error == -ENXIO)
ddata->reserved = true;
+ else if (error)
+ return error;
error = sysc_get_clocks(ddata);
if (error)
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 496900de0b0b..de36f58d551c 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -974,6 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
}
- imx_register_uart_clocks(1);
+ imx_register_uart_clocks(2);
}
CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 51ed640e527b..4ece326ea233 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -357,27 +357,43 @@ static int gdsc_init(struct gdsc *sc)
if (on < 0)
return on;
- /*
- * Votable GDSCs can be ON due to Vote from other masters.
- * If a Votable GDSC is ON, make sure we have a Vote.
- */
- if ((sc->flags & VOTABLE) && on)
- gdsc_enable(&sc->pd);
+ if (on) {
+ /* The regulator must be on, sync the kernel state */
+ if (sc->rsupply) {
+ ret = regulator_enable(sc->rsupply);
+ if (ret < 0)
+ return ret;
+ }
- /*
- * Make sure the retain bit is set if the GDSC is already on, otherwise
- * we end up turning off the GDSC and destroying all the register
- * contents that we thought we were saving.
- */
- if ((sc->flags & RETAIN_FF_ENABLE) && on)
- gdsc_retain_ff_on(sc);
+ /*
+ * Votable GDSCs can be ON due to Vote from other masters.
+ * If a Votable GDSC is ON, make sure we have a Vote.
+ */
+ if (sc->flags & VOTABLE) {
+ ret = regmap_update_bits(sc->regmap, sc->gdscr,
+ SW_COLLAPSE_MASK, val);
+ if (ret)
+ return ret;
+ }
+
+ /* Turn on HW trigger mode if supported */
+ if (sc->flags & HW_CTRL) {
+ ret = gdsc_hwctrl(sc, true);
+ if (ret < 0)
+ return ret;
+ }
- /* If ALWAYS_ON GDSCs are not ON, turn them ON */
- if (sc->flags & ALWAYS_ON) {
- if (!on)
- gdsc_enable(&sc->pd);
+ /*
+ * Make sure the retain bit is set if the GDSC is already on,
+ * otherwise we end up turning off the GDSC and destroying all
+ * the register contents that we thought we were saving.
+ */
+ if (sc->flags & RETAIN_FF_ENABLE)
+ gdsc_retain_ff_on(sc);
+ } else if (sc->flags & ALWAYS_ON) {
+ /* If ALWAYS_ON GDSCs are not ON, turn them ON */
+ gdsc_enable(&sc->pd);
on = true;
- sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
}
if (on || (sc->pwrsts & PWRSTS_RET))
@@ -385,6 +401,8 @@ static int gdsc_init(struct gdsc *sc)
else
gdsc_clear_mem_on(sc);
+ if (sc->flags & ALWAYS_ON)
+ sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
if (!sc->pd.power_off)
sc->pd.power_off = gdsc_disable;
if (!sc->pd.power_on)
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 3fc98a3ffd91..c10fc33b29b1 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -104,7 +104,11 @@ struct armada_37xx_dvfs {
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
- {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
+ /*
+ * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
+ * unstable because we do not know how to configure it properly.
+ */
+ /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index bef7528aecd3..231e585f6ba2 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -139,7 +139,9 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,qcs404", },
{ .compatible = "qcom,sc7180", },
{ .compatible = "qcom,sc7280", },
+ { .compatible = "qcom,sc8180x", },
{ .compatible = "qcom,sdm845", },
+ { .compatible = "qcom,sm8150", },
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index ec9a87ca2dbb..75f818d04b48 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -134,7 +134,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
}
if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
- ret = -ENOMEM;
+ return -ENOMEM;
/* Obtain CPUs that share SCMI performance controls */
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index c7b364e4a287..e883731c3f8f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -3026,6 +3026,14 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
start + size - 1, nattr);
+ /* Flush pending deferred work to avoid racing with deferred actions from
+ * previous memory map changes (e.g. munmap). Concurrent memory map changes
+ * can still race with get_attr because we don't hold the mmap lock. But that
+ * would be a race condition in the application anyway, and undefined
+ * behaviour is acceptable in that case.
+ */
+ flush_work(&p->svms.deferred_list_work);
+
mmap_read_lock(mm);
if (!svm_range_is_valid(mm, start, size)) {
pr_debug("invalid range\n");
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 605e297b7a59..a30283fa5173 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1530,6 +1530,12 @@ void dc_z10_restore(struct dc *dc)
if (dc->hwss.z10_restore)
dc->hwss.z10_restore(dc);
}
+
+void dc_z10_save_init(struct dc *dc)
+{
+ if (dc->hwss.z10_save_init)
+ dc->hwss.z10_save_init(dc);
+}
#endif
/*
* Applies given context to HW and copy it into current context.
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index f2b39ec35c89..cde8ed2560b3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dc_z10_save_init(dc);
+#endif
}
return num_vmids;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index af7b60108e9d..21d78289b048 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -1338,6 +1338,7 @@ void dc_hardware_release(struct dc *dc);
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
#if defined(CONFIG_DRM_AMD_DC_DCN)
void dc_z10_restore(struct dc *dc);
+void dc_z10_save_init(struct dc *dc);
#endif
bool dc_enable_dmub_notifications(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 9776d1737818..912285fdce18 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
}
-static void calculate_wm_set_for_vlevel(
- int vlevel,
- struct wm_range_table_entry *table_entry,
- struct dcn_watermarks *wm_set,
- struct display_mode_lib *dml,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt)
-{
- double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
-
- ASSERT(vlevel < dml->soc.num_states);
- /* only pipe 0 is read for voltage and dcf/soc clocks */
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
- pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
-
- dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
- dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
- dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
-
- wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
- wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
- wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
- wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
- wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
- wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
- wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
- wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
- dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
-
-}
-
-static void dcn301_calculate_wm_and_dlg(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel_req)
-{
- int i, pipe_idx;
- int vlevel, vlevel_max;
- struct wm_range_table_entry *table_entry;
- struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
-
- ASSERT(bw_params);
-
- vlevel_max = bw_params->clk_table.num_entries - 1;
-
- /* WM Set D */
- table_entry = &bw_params->wm_table.entries[WM_D];
- if (table_entry->wm_type == WM_TYPE_RETRAINING)
- vlevel = 0;
- else
- vlevel = vlevel_max;
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
- &context->bw_ctx.dml, pipes, pipe_cnt);
- /* WM Set C */
- table_entry = &bw_params->wm_table.entries[WM_C];
- vlevel = min(max(vlevel_req, 2), vlevel_max);
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
- &context->bw_ctx.dml, pipes, pipe_cnt);
- /* WM Set B */
- table_entry = &bw_params->wm_table.entries[WM_B];
- vlevel = min(max(vlevel_req, 1), vlevel_max);
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
- &context->bw_ctx.dml, pipes, pipe_cnt);
-
- /* WM Set A */
- table_entry = &bw_params->wm_table.entries[WM_A];
- vlevel = min(vlevel_req, vlevel_max);
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
- &context->bw_ctx.dml, pipes, pipe_cnt);
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
- pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
- if (dc->config.forced_clocks) {
- pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
- if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
- pipe_idx++;
- }
-
- dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
-}
-
static struct resource_funcs dcn301_res_pool_funcs = {
.destroy = dcn301_destroy_resource_pool,
.link_enc_create = dcn301_link_encoder_create,
.panel_cntl_create = dcn301_panel_cntl_create,
.validate_bandwidth = dcn30_validate_bandwidth,
- .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
+ .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 6ac6faf0c533..8a2119d8ca0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -404,6 +404,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
&pipe_ctx->stream_res.encoder_info_frame);
}
}
+void dcn31_z10_save_init(struct dc *dc)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
+
+ dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+}
void dcn31_z10_restore(struct dc *dc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
index 40dfebe78fdd..140435e4f7ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
@@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane(
void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);
void dcn31_z10_restore(struct dc *dc);
+void dcn31_z10_save_init(struct dc *dc);
void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index aaf2dbd095fe..b30d923471cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -97,6 +97,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.z10_restore = dcn31_z10_restore,
+ .z10_save_init = dcn31_z10_save_init,
.is_abm_supported = dcn31_is_abm_supported,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 5ab008e62b82..ad5f2adcc40d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -237,6 +237,7 @@ struct hw_sequencer_funcs {
int width, int height, int offset);
void (*z10_restore)(struct dc *dc);
+ void (*z10_save_init)(struct dc *dc);
void (*update_visual_confirm_color)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 7c4734f905d9..7fafb8d6c1da 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -856,6 +856,11 @@ enum dmub_cmd_idle_opt_type {
* DCN hardware restore.
*/
DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0,
+
+ /**
+ * DCN hardware save.
+ */
+ DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1
};
/**
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 25979106fd25..02e8c6e5448d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -5127,6 +5127,13 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
return size;
}
+static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ return (adev->pdev->device == 0x6860);
+}
+
static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -5163,9 +5170,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
}
out:
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+ if (vega10_get_power_profile_mode_quirks(hwmgr))
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+ 1 << power_profile_mode,
+ NULL);
+ else
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
(!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
NULL);
+
hwmgr->power_profile_mode = power_profile_mode;
return 0;
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index d29907955ff7..5d82891c3222 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -855,8 +855,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
- if (err)
- return err;
req32.reply.type = req.reply.type;
req32.reply.sequence = req.reply.sequence;
@@ -865,7 +863,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
- return 0;
+ return err;
}
#if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index be716b56e8e0..00dade49665b 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -2463,6 +2463,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
}
}
+/* Splitter enable for eDP MSO is limited to certain pipes. */
+static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
+{
+ if (IS_ALDERLAKE_P(i915))
+ return BIT(PIPE_A) | BIT(PIPE_B);
+ else
+ return BIT(PIPE_A);
+}
+
static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -2480,8 +2489,7 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
if (!pipe_config->splitter.enable)
return;
- /* Splitter enable is supported for pipe A only. */
- if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) {
+ if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
pipe_config->splitter.enable = false;
return;
}
@@ -2513,10 +2521,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
return;
if (crtc_state->splitter.enable) {
- /* Splitter enable is supported for pipe A only. */
- if (drm_WARN_ON(&i915->drm, pipe != PIPE_A))
- return;
-
dss1 |= SPLITTER_ENABLE;
dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap);
if (crtc_state->splitter.link_count == 2)
@@ -4743,12 +4747,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
dig_port->hpd_pulse = intel_dp_hpd_pulse;
- /* Splitter enable for eDP MSO is limited to certain pipes. */
- if (dig_port->dp.mso_link_count) {
- encoder->pipe_mask = BIT(PIPE_A);
- if (IS_ALDERLAKE_P(dev_priv))
- encoder->pipe_mask |= BIT(PIPE_B);
- }
+ if (dig_port->dp.mso_link_count)
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
}
/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 4298ae684d7d..86b7ac7b65ec 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -6387,13 +6387,13 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
IS_BROXTON(i915)) {
bxt_enable_dc9(i915);
- /* Tweaked Wa_14010685332:icp,jsp,mcc */
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
- intel_de_rmw(i915, SOUTH_CHICKEN1,
- SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_enable_pc8(i915);
}
+
+ /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+ if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+ intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
void intel_display_power_resume_early(struct drm_i915_private *i915)
@@ -6402,13 +6402,13 @@ void intel_display_power_resume_early(struct drm_i915_private *i915)
IS_BROXTON(i915)) {
gen9_sanitize_dc_state(i915);
bxt_disable_dc9(i915);
- /* Tweaked Wa_14010685332:icp,jsp,mcc */
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
- intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
-
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
}
+
+ /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+ if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+ intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
}
void intel_display_power_suspend(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 08bceae40aa8..053a3c2f7267 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -206,7 +206,6 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
return lttpr_count;
}
-EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
static u8 dp_voltage_max(u8 preemph)
{
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c03943198089..c3816f5c6900 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3064,24 +3064,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
-static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- /*
- * Wa_14010685332:cnp/cmp,tgp,adp
- * TODO: Clarify which platforms this applies to
- * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
- * on earlier platforms and whether the workaround is also needed for runtime suspend/resume
- */
- if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
- (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
- intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
- SBCLK_RUN_REFCLK_DIS);
- intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
- }
-}
-
static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3115,7 +3097,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
- cnp_display_clock_wa(dev_priv);
}
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -3159,8 +3140,6 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
-
- cnp_display_clock_wa(dev_priv);
}
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 6f4c80bbc0eb..473f5bb5cbad 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -133,6 +133,8 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
static int mtk_disp_color_remove(struct platform_device *pdev)
{
+ component_del(&pdev->dev, &mtk_disp_color_component_ops);
+
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index fa9d79963cd3..5326989d5206 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -423,6 +423,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
static int mtk_disp_ovl_remove(struct platform_device *pdev)
{
+ component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
+
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 75bc00e17fc4..50d20562e612 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -34,6 +34,7 @@
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
+#define DISP_AAL_OUTPUT_SIZE 0x04d8
#define DISP_DITHER_EN 0x0000
#define DITHER_EN BIT(0)
@@ -197,6 +198,7 @@ static void mtk_aal_config(struct device *dev, unsigned int w,
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_OUTPUT_SIZE);
}
static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index f949767698fc..bcb0310a41b6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2237,6 +2237,33 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
}
+ /* Finish updating head(s)...
+ *
+ * NVD is rather picky about both where window assignments can change,
+ * *and* about certain core and window channel states matching.
+ *
+ * The EFI GOP driver on newer GPUs configures window channels with a
+ * different output format to what we do, and the core channel update
+ * in the assign_windows case above would result in a state mismatch.
+ *
+ * Delay some of the head update until after that point to workaround
+ * the issue. This only affects the initial modeset.
+ *
+ * TODO: handle this better when adding flexible window mapping
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
+ struct nv50_head *head = nv50_head(crtc);
+
+ NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
+ asyh->set.mask, asyh->clr.mask);
+
+ if (asyh->set.mask) {
+ nv50_head_flush_set_wndw(head, asyh);
+ interlock[NV50_DISP_INTERLOCK_CORE] = 1;
+ }
+ }
+
/* Update plane(s). */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index ec361d17e900..d66f97280282 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -50,11 +50,8 @@ nv50_head_flush_clr(struct nv50_head *head,
}
void
-nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- if (asyh->set.view ) head->func->view (head, asyh);
- if (asyh->set.mode ) head->func->mode (head, asyh);
- if (asyh->set.core ) head->func->core_set(head, asyh);
if (asyh->set.olut ) {
asyh->olut.offset = nv50_lut_load(&head->olut,
asyh->olut.buffer,
@@ -62,6 +59,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
asyh->olut.load);
head->func->olut_set(head, asyh);
}
+}
+
+void
+nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ if (asyh->set.view ) head->func->view (head, asyh);
+ if (asyh->set.mode ) head->func->mode (head, asyh);
+ if (asyh->set.core ) head->func->core_set(head, asyh);
if (asyh->set.curs ) head->func->curs_set(head, asyh);
if (asyh->set.base ) head->func->base (head, asyh);
if (asyh->set.ovly ) head->func->ovly (head, asyh);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index dae841dc05fd..0bac6be9ba34 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -21,6 +21,7 @@ struct nv50_head {
struct nv50_head *nv50_head_create(struct drm_device *, int index);
void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh);
+void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh);
void nv50_head_flush_clr(struct nv50_head *head,
struct nv50_head_atom *asyh, bool flush);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 0b86c44878e0..59759c4fb62e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -4,7 +4,8 @@
struct nv_device_v0 {
__u8 version;
- __u8 pad01[7];
+ __u8 priv;
+ __u8 pad02[6];
__u64 device; /* device identifier, ~0 for client default */
};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index ba2c28ea43d2..c68cc957248e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -61,8 +61,6 @@
#define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e
#define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e
#define NV40_CHANNEL_DMA /* cl506b.h */ 0x0000406e
-#define NV50_CHANNEL_DMA /* cl506e.h */ 0x0000506e
-#define G82_CHANNEL_DMA /* cl826e.h */ 0x0000826e
#define NV50_CHANNEL_GPFIFO /* cl506f.h */ 0x0000506f
#define G82_CHANNEL_GPFIFO /* cl826f.h */ 0x0000826f
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index 347d2c020bd1..5d9395e651b6 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -9,7 +9,6 @@ struct nvif_client {
const struct nvif_driver *driver;
u64 version;
u8 route;
- bool super;
};
int nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h
index 8e85b936eaa0..7a3af05f7f98 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h
@@ -11,7 +11,7 @@ struct nvif_driver {
void (*fini)(void *priv);
int (*suspend)(void *priv);
int (*resume)(void *priv);
- int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack);
+ int (*ioctl)(void *priv, void *data, u32 size, void **hack);
void __iomem *(*map)(void *priv, u64 handle, u32 size);
void (*unmap)(void *priv, void __iomem *ptr, u32 size);
bool keep;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index 5d7017fe5039..2f86606e708c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -13,7 +13,6 @@ struct nvkm_client {
struct nvkm_client_notify *notify[32];
struct rb_root objroot;
- bool super;
void *data;
int (*ntfy)(const void *, u32, const void *, u32);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
index 71ed147ad077..f52918a43246 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
@@ -4,5 +4,5 @@
#include <core/os.h>
struct nvkm_client;
-int nvkm_ioctl(struct nvkm_client *, bool, void *, u32, void **);
+int nvkm_ioctl(struct nvkm_client *, void *, u32, void **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 0911e73f7424..70e7887ef4b4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -15,7 +15,6 @@ struct nvkm_vma {
u8 refd:3; /* Current page type (index, or NONE for unreferenced). */
bool used:1; /* Region allocated. */
bool part:1; /* Region was split from an allocated region by map(). */
- bool user:1; /* Region user-allocated. */
bool busy:1; /* Region busy (for temporarily preventing user access). */
bool mapped:1; /* Region contains valid pages. */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index b45ec3086285..4107b7006539 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -570,11 +570,9 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
}
client->route = NVDRM_OBJECT_ABI16;
- client->super = true;
ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
NV_DMA_IN_MEMORY, &args, sizeof(args),
&ntfy->object);
- client->super = false;
client->route = NVDRM_OBJECT_NVIF;
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 40362600eed2..80099ef75702 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -86,12 +86,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
struct nouveau_channel *chan = *pchan;
if (chan) {
struct nouveau_cli *cli = (void *)chan->user.client;
- bool super;
-
- if (cli) {
- super = cli->base.super;
- cli->base.super = true;
- }
if (chan->fence)
nouveau_fence(chan->drm)->context_del(chan);
@@ -111,9 +105,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan);
-
- if (cli)
- cli->base.super = super;
}
*pchan = NULL;
}
@@ -512,20 +503,16 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
struct nouveau_channel **pchan)
{
struct nouveau_cli *cli = (void *)device->object.client;
- bool super;
int ret;
/* hack until fencenv50 is fixed, and agp access relaxed */
- super = cli->base.super;
- cli->base.super = true;
-
ret = nouveau_channel_ind(drm, device, arg0, priv, pchan);
if (ret) {
NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
ret = nouveau_channel_dma(drm, device, pchan);
if (ret) {
NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
- goto done;
+ return ret;
}
}
@@ -533,15 +520,13 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
if (ret) {
NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
nouveau_channel_del(pchan);
- goto done;
+ return ret;
}
ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
if (ret)
nouveau_channel_del(pchan);
-done:
- cli->base.super = super;
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index a616cf4573b8..ba4cd5f83725 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -244,6 +244,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
&(struct nv_device_v0) {
.device = ~0,
+ .priv = true,
}, sizeof(struct nv_device_v0),
&cli->device);
if (ret) {
@@ -1086,8 +1087,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
if (ret)
goto done;
- cli->base.super = false;
-
fpriv->driver_priv = cli;
mutex_lock(&drm->client.mutex);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 0de6549fb875..2ca3207c13fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -41,8 +41,6 @@ nouveau_mem_map(struct nouveau_mem *mem,
struct gf100_vmm_map_v0 gf100;
} args;
u32 argc = 0;
- bool super;
- int ret;
switch (vmm->object.oclass) {
case NVIF_CLASS_VMM_NV04:
@@ -73,12 +71,7 @@ nouveau_mem_map(struct nouveau_mem *mem,
return -ENOSYS;
}
- super = vmm->object.client->super;
- vmm->object.client->super = true;
- ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc,
- &mem->mem, 0);
- vmm->object.client->super = super;
- return ret;
+ return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0);
}
void
@@ -99,7 +92,6 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
struct nouveau_drm *drm = cli->drm;
struct nvif_mmu *mmu = &cli->mmu;
struct nvif_mem_ram_v0 args = {};
- bool super = cli->base.super;
u8 type;
int ret;
@@ -122,11 +114,9 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
args.dma = tt->dma_address;
mutex_lock(&drm->master.lock);
- cli->base.super = true;
ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
reg->num_pages << PAGE_SHIFT,
&args, sizeof(args), &mem->mem);
- cli->base.super = super;
mutex_unlock(&drm->master.lock);
return ret;
}
@@ -138,12 +128,10 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
struct nouveau_cli *cli = mem->cli;
struct nouveau_drm *drm = cli->drm;
struct nvif_mmu *mmu = &cli->mmu;
- bool super = cli->base.super;
u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
int ret;
mutex_lock(&drm->master.lock);
- cli->base.super = true;
switch (cli->mem->oclass) {
case NVIF_CLASS_MEM_GF100:
ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
@@ -167,7 +155,6 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
WARN_ON(1);
break;
}
- cli->base.super = super;
mutex_unlock(&drm->master.lock);
reg->start = mem->mem.addr >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index b3f29b1ce9ea..52f5793b7274 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -52,9 +52,9 @@ nvkm_client_map(void *priv, u64 handle, u32 size)
}
static int
-nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
+nvkm_client_ioctl(void *priv, void *data, u32 size, void **hack)
{
- return nvkm_ioctl(priv, super, data, size, hack);
+ return nvkm_ioctl(priv, data, size, hack);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 82b583f5fca8..b0c3422cb01f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -237,14 +237,11 @@ void
nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
{
if (limit > start) {
- bool super = svmm->vmm->vmm.object.client->super;
- svmm->vmm->vmm.object.client->super = true;
nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
&(struct nvif_vmm_pfnclr_v0) {
.addr = start,
.size = limit - start,
}, sizeof(struct nvif_vmm_pfnclr_v0));
- svmm->vmm->vmm.object.client->super = super;
}
}
@@ -634,9 +631,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
NVIF_VMM_PFNMAP_V0_A |
NVIF_VMM_PFNMAP_V0_HOST;
- svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
- svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
unlock_page(page);
@@ -702,9 +697,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
nouveau_hmm_convert_pfn(drm, &range, args);
- svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
- svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
out:
@@ -928,10 +921,8 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
mutex_lock(&svmm->mutex);
- svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
npages * sizeof(args->p.phys[0]), NULL);
- svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 9dc10b17ad34..5da1f4d223d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -32,6 +32,9 @@
#include <nvif/event.h>
#include <nvif/ioctl.h>
+#include <nvif/class.h>
+#include <nvif/cl0080.h>
+
struct usif_notify_p {
struct drm_pending_event base;
struct {
@@ -261,7 +264,7 @@ usif_object_dtor(struct usif_object *object)
}
static int
-usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16)
{
struct nouveau_cli *cli = nouveau_cli(f);
struct nvif_client *client = &cli->base;
@@ -271,23 +274,48 @@ usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
struct usif_object *object;
int ret = -ENOSYS;
+ if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true)))
+ return ret;
+
+ switch (args->v0.oclass) {
+ case NV_DMA_FROM_MEMORY:
+ case NV_DMA_TO_MEMORY:
+ case NV_DMA_IN_MEMORY:
+ return -EINVAL;
+ case NV_DEVICE: {
+ union {
+ struct nv_device_v0 v0;
+ } *args = data;
+
+ if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false)))
+ return ret;
+
+ args->v0.priv = false;
+ break;
+ }
+ default:
+ if (!parent_abi16)
+ return -EINVAL;
+ break;
+ }
+
if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
return -ENOMEM;
list_add(&object->head, &cli->objects);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- object->route = args->v0.route;
- object->token = args->v0.token;
- args->v0.route = NVDRM_OBJECT_USIF;
- args->v0.token = (unsigned long)(void *)object;
- ret = nvif_client_ioctl(client, argv, argc);
- args->v0.token = object->token;
- args->v0.route = object->route;
+ object->route = args->v0.route;
+ object->token = args->v0.token;
+ args->v0.route = NVDRM_OBJECT_USIF;
+ args->v0.token = (unsigned long)(void *)object;
+ ret = nvif_client_ioctl(client, argv, argc);
+ if (ret) {
+ usif_object_dtor(object);
+ return ret;
}
- if (ret)
- usif_object_dtor(object);
- return ret;
+ args->v0.token = object->token;
+ args->v0.route = object->route;
+ return 0;
}
int
@@ -301,6 +329,7 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
struct nvif_ioctl_v0 v0;
} *argv = data;
struct usif_object *object;
+ bool abi16 = false;
u8 owner;
int ret;
@@ -331,11 +360,13 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
mutex_unlock(&cli->mutex);
goto done;
}
+
+ abi16 = true;
}
switch (argv->v0.type) {
case NVIF_IOCTL_V0_NEW:
- ret = usif_object_new(filp, data, size, argv, argc);
+ ret = usif_object_new(filp, data, size, argv, argc, abi16);
break;
case NVIF_IOCTL_V0_NTFY_NEW:
ret = usif_notify_new(filp, data, size, argv, argc);
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 12644f811b3e..a3264a0e933a 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -32,7 +32,7 @@
int
nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
{
- return client->driver->ioctl(client->object.priv, client->super, data, size, NULL);
+ return client->driver->ioctl(client->object.priv, data, size, NULL);
}
int
@@ -80,7 +80,6 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
client->object.client = client;
client->object.handle = ~0;
client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
- client->super = true;
client->driver = parent->driver;
if (ret == 0) {
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 671a5c0199e0..dce1ecee2af5 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -44,8 +44,7 @@ nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
} else
return -ENOSYS;
- return client->driver->ioctl(client->object.priv, client->super,
- data, size, hack);
+ return client->driver->ioctl(client->object.priv, data, size, hack);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index d777df5a64e6..735cb6816f10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -426,8 +426,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
}
int
-nvkm_ioctl(struct nvkm_client *client, bool supervisor,
- void *data, u32 size, void **hack)
+nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack)
{
struct nvkm_object *object = &client->object;
union {
@@ -435,7 +434,6 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor,
} *args = data;
int ret = -ENOSYS;
- client->super = supervisor;
nvif_ioctl(object, "size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index b930f539feec..93ddf63d1114 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2624,6 +2624,26 @@ nv174_chipset = {
.dma = { 0x00000001, gv100_dma_new },
};
+static const struct nvkm_device_chip
+nv177_chipset = {
+ .name = "GA107",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -3049,6 +3069,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
+ case 0x177: device->chip = &nv177_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
switch (device->chipset) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index fea9d8f2b10c..f28894fdede9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -397,7 +397,7 @@ nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
return ret;
/* give priviledged clients register access */
- if (client->super)
+ if (args->v0.priv)
func = &nvkm_udevice_super;
else
func = &nvkm_udevice;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 55fbfe28c6dc..9669472a2749 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -440,7 +440,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
return ret;
}
-static void
+void
nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
{
struct nvkm_dp *dp = nvkm_dp(outp);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
index 428b3f488f03..e484d0c3b0d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
@@ -32,6 +32,7 @@ struct nvkm_dp {
int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
struct nvkm_outp **);
+void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
/* DPCD Receiver Capabilities */
#define DPCD_RC00_DPCD_REV 0x00000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index dffcac249211..129982fef7ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "outp.h"
+#include "dp.h"
#include "ior.h"
#include <subdev/bios.h>
@@ -257,6 +258,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
if (!ior->arm.head || ior->arm.proto != proto) {
OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
ior->arm.proto, proto);
+
+ /* The EFI GOP driver on Ampere can leave unused DP links routed,
+ * which we don't expect. The DisableLT IED script *should* get
+ * us back to where we need to be.
+ */
+ if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
+ nvkm_dp_disable(outp, ior);
+
return;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
index d20cc0681a88..797131ed7d67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
@@ -26,7 +26,6 @@
#include <core/client.h>
#include <core/gpuobj.h>
#include <subdev/fb.h>
-#include <subdev/instmem.h>
#include <nvif/cl0002.h>
#include <nvif/unpack.h>
@@ -72,11 +71,7 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
union {
struct nv_dma_v0 v0;
} *args = *pdata;
- struct nvkm_device *device = dma->engine.subdev.device;
- struct nvkm_client *client = oclass->client;
struct nvkm_object *parent = oclass->parent;
- struct nvkm_instmem *instmem = device->imem;
- struct nvkm_fb *fb = device->fb;
void *data = *pdata;
u32 size = *psize;
int ret = -ENOSYS;
@@ -109,23 +104,13 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
dmaobj->target = NV_MEM_TARGET_VM;
break;
case NV_DMA_V0_TARGET_VRAM:
- if (!client->super) {
- if (dmaobj->limit >= fb->ram->size - instmem->reserved)
- return -EACCES;
- if (device->card_type >= NV_50)
- return -EACCES;
- }
dmaobj->target = NV_MEM_TARGET_VRAM;
break;
case NV_DMA_V0_TARGET_PCI:
- if (!client->super)
- return -EACCES;
dmaobj->target = NV_MEM_TARGET_PCI;
break;
case NV_DMA_V0_TARGET_PCI_US:
case NV_DMA_V0_TARGET_AGP:
- if (!client->super)
- return -EACCES;
dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 90e9a0972a44..3209eb7af65f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -27,8 +27,6 @@ nvkm-y += nvkm/engine/fifo/dmanv04.o
nvkm-y += nvkm/engine/fifo/dmanv10.o
nvkm-y += nvkm/engine/fifo/dmanv17.o
nvkm-y += nvkm/engine/fifo/dmanv40.o
-nvkm-y += nvkm/engine/fifo/dmanv50.o
-nvkm-y += nvkm/engine/fifo/dmag84.o
nvkm-y += nvkm/engine/fifo/gpfifonv50.o
nvkm-y += nvkm/engine/fifo/gpfifog84.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
index af8bdf275552..3a95730d7ff5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -48,8 +48,6 @@ void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
const struct nvkm_oclass *, struct nv50_fifo_chan *);
-extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
-extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass;
extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
deleted file mode 100644
index fc34cddcd2f5..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-
-#include <core/client.h>
-#include <core/ramht.h>
-
-#include <nvif/class.h>
-#include <nvif/cl826e.h>
-#include <nvif/unpack.h>
-
-static int
-g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
- void *data, u32 size, struct nvkm_object **pobject)
-{
- struct nvkm_object *parent = oclass->parent;
- union {
- struct g82_channel_dma_v0 v0;
- } *args = data;
- struct nv50_fifo *fifo = nv50_fifo(base);
- struct nv50_fifo_chan *chan;
- int ret = -ENOSYS;
-
- nvif_ioctl(parent, "create channel dma size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
- "pushbuf %llx offset %016llx\n",
- args->v0.version, args->v0.vmm, args->v0.pushbuf,
- args->v0.offset);
- if (!args->v0.pushbuf)
- return -EINVAL;
- } else
- return ret;
-
- if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
- return -ENOMEM;
- *pobject = &chan->base.object;
-
- ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
- oclass, chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- nvkm_kmap(chan->ramfc);
- nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
- nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
- nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
- nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
- nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
- nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
- nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
- nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj->node->offset >> 4));
- nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
- nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
- nvkm_done(chan->ramfc);
- return 0;
-}
-
-const struct nvkm_fifo_chan_oclass
-g84_fifo_dma_oclass = {
- .base.oclass = G82_CHANNEL_DMA,
- .base.minver = 0,
- .base.maxver = 0,
- .ctor = g84_fifo_dma_new,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
deleted file mode 100644
index 8043718ad150..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-
-#include <core/client.h>
-#include <core/ramht.h>
-
-#include <nvif/class.h>
-#include <nvif/cl506e.h>
-#include <nvif/unpack.h>
-
-static int
-nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
- void *data, u32 size, struct nvkm_object **pobject)
-{
- struct nvkm_object *parent = oclass->parent;
- union {
- struct nv50_channel_dma_v0 v0;
- } *args = data;
- struct nv50_fifo *fifo = nv50_fifo(base);
- struct nv50_fifo_chan *chan;
- int ret = -ENOSYS;
-
- nvif_ioctl(parent, "create channel dma size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
- "pushbuf %llx offset %016llx\n",
- args->v0.version, args->v0.vmm, args->v0.pushbuf,
- args->v0.offset);
- if (!args->v0.pushbuf)
- return -EINVAL;
- } else
- return ret;
-
- if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
- return -ENOMEM;
- *pobject = &chan->base.object;
-
- ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
- oclass, chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- nvkm_kmap(chan->ramfc);
- nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
- nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
- nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
- nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
- nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
- nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
- nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
- nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj->node->offset >> 4));
- nvkm_done(chan->ramfc);
- return 0;
-}
-
-const struct nvkm_fifo_chan_oclass
-nv50_fifo_dma_oclass = {
- .base.oclass = NV50_CHANNEL_DMA,
- .base.minver = 0,
- .base.maxver = 0,
- .ctor = nv50_fifo_dma_new,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
index c0a7d0f21dac..3885c3830b94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -119,7 +119,6 @@ g84_fifo = {
.uevent_init = g84_fifo_uevent_init,
.uevent_fini = g84_fifo_uevent_fini,
.chan = {
- &g84_fifo_dma_oclass,
&g84_fifo_gpfifo_oclass,
NULL
},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index b6900a52bcce..ae6c4d846eb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -341,8 +341,6 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
"runlist %016llx priv %d\n",
args->v0.version, args->v0.vmm, args->v0.ioffset,
args->v0.ilength, args->v0.runlist, args->v0.priv);
- if (args->v0.priv && !oclass->client->super)
- return -EINVAL;
return gk104_fifo_gpfifo_new_(fifo,
&args->v0.runlist,
&args->v0.chid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
index ee4967b706a7..743791c514fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
@@ -226,8 +226,6 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
"runlist %016llx priv %d\n",
args->v0.version, args->v0.vmm, args->v0.ioffset,
args->v0.ilength, args->v0.runlist, args->v0.priv);
- if (args->v0.priv && !oclass->client->super)
- return -EINVAL;
return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
&args->v0.runlist,
&args->v0.chid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
index abef7fb6e2d3..99aafa103a31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
@@ -65,8 +65,6 @@ tu102_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
"runlist %016llx priv %d\n",
args->v0.version, args->v0.vmm, args->v0.ioffset,
args->v0.ilength, args->v0.runlist, args->v0.priv);
- if (args->v0.priv && !oclass->client->super)
- return -EINVAL;
return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo,
&args->v0.runlist,
&args->v0.chid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index be94156ea248..a08742cf425a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -136,7 +136,6 @@ nv50_fifo = {
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.chan = {
- &nv50_fifo_dma_oclass,
&nv50_fifo_gpfifo_oclass,
NULL
},
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
index fac2f9a45ea6..e530bb8b3b17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
@@ -41,7 +41,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
object = nvkm_object_search(client, handle, &nvkm_umem);
if (IS_ERR(object)) {
- if (client->super && client != master) {
+ if (client != master) {
spin_lock(&master->lock);
list_for_each_entry(umem, &master->umem, head) {
if (umem->object.object == handle) {
@@ -53,8 +53,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
}
} else {
umem = nvkm_umem(object);
- if (!umem->priv || client->super)
- memory = nvkm_memory_ref(umem->memory);
+ memory = nvkm_memory_ref(umem->memory);
}
return memory ? memory : ERR_PTR(-ENOENT);
@@ -167,7 +166,6 @@ nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
umem->mmu = mmu;
umem->type = mmu->type[type].type;
- umem->priv = oclass->client->super;
INIT_LIST_HEAD(&umem->head);
*pobject = &umem->object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
index 85cf692d620a..d56a594016cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
@@ -8,7 +8,6 @@ struct nvkm_umem {
struct nvkm_object object;
struct nvkm_mmu *mmu;
u8 type:8;
- bool priv:1;
bool mappable:1;
bool io:1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
index 0e4b8941da37..6870fda4b188 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
@@ -34,7 +34,7 @@ nvkm_ummu_sclass(struct nvkm_object *object, int index,
{
struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
- if (mmu->func->mem.user.oclass && oclass->client->super) {
+ if (mmu->func->mem.user.oclass) {
if (index-- == 0) {
oclass->base = mmu->func->mem.user;
oclass->ctor = nvkm_umem_new;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index c43b8248c682..d6a1f8d04c09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -45,7 +45,6 @@ nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
static int
nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_pfnclr_v0 v0;
} *args = argv;
@@ -59,9 +58,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- if (!client->super)
- return -ENOENT;
-
if (size) {
mutex_lock(&vmm->mutex);
ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
@@ -74,7 +70,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
static int
nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_pfnmap_v0 v0;
} *args = argv;
@@ -93,9 +88,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- if (!client->super)
- return -ENOENT;
-
if (size) {
mutex_lock(&vmm->mutex);
ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
@@ -108,7 +100,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
static int
nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_unmap_v0 v0;
} *args = argv;
@@ -130,9 +121,8 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto done;
}
- if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
- VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
- vma->user, !client->super, vma->busy);
+ if (ret = -ENOENT, vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
goto done;
}
@@ -181,9 +171,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto fail;
}
- if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
- VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
- vma->user, !client->super, vma->busy);
+ if (ret = -ENOENT, vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
goto fail;
}
@@ -230,7 +219,6 @@ fail:
static int
nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_put_v0 v0;
} *args = argv;
@@ -252,9 +240,8 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto done;
}
- if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
- VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
- vma->user, !client->super, vma->busy);
+ if (ret = -ENOENT, vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
goto done;
}
@@ -268,7 +255,6 @@ done:
static int
nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_get_v0 v0;
} *args = argv;
@@ -297,7 +283,6 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
return ret;
args->v0.addr = vma->addr;
- vma->user = !client->super;
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 710f3f8dc7c9..8bf00b396ec1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -774,7 +774,6 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
new->refd = vma->refd;
new->used = vma->used;
new->part = vma->part;
- new->user = vma->user;
new->busy = vma->busy;
new->mapped = vma->mapped;
list_add(&new->head, &vma->head);
@@ -951,7 +950,7 @@ nvkm_vmm_node_split(struct nvkm_vmm *vmm,
static void
nvkm_vma_dump(struct nvkm_vma *vma)
{
- printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n",
+ printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n",
vma->addr, (u64)vma->size,
vma->used ? '-' : 'F',
vma->mapref ? 'R' : '-',
@@ -959,7 +958,6 @@ nvkm_vma_dump(struct nvkm_vma *vma)
vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
vma->part ? 'P' : '-',
- vma->user ? 'U' : '-',
vma->busy ? 'B' : '-',
vma->mapped ? 'M' : '-',
vma->memory);
@@ -1024,7 +1022,6 @@ nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
vma->mapref = true;
vma->sparse = false;
vma->used = true;
- vma->user = true;
nvkm_vmm_node_insert(vmm, vma);
list_add_tail(&vma->head, &vmm->list);
return 0;
@@ -1615,7 +1612,6 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
vma->page = NVKM_VMA_PAGE_NONE;
vma->refd = NVKM_VMA_PAGE_NONE;
vma->used = false;
- vma->user = false;
nvkm_vmm_put_region(vmm, vma);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index f02abd9cb4dd..b5e733783b5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -534,15 +534,13 @@ int
gp100_vmm_mthd(struct nvkm_vmm *vmm,
struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
{
- if (client->super) {
- switch (mthd) {
- case GP100_VMM_VN_FAULT_REPLAY:
- return gp100_vmm_fault_replay(vmm, argv, argc);
- case GP100_VMM_VN_FAULT_CANCEL:
- return gp100_vmm_fault_cancel(vmm, argv, argc);
- default:
- break;
- }
+ switch (mthd) {
+ case GP100_VMM_VN_FAULT_REPLAY:
+ return gp100_vmm_fault_replay(vmm, argv, argc);
+ case GP100_VMM_VN_FAULT_CANCEL:
+ return gp100_vmm_fault_cancel(vmm, argv, argc);
+ default:
+ break;
}
return -EINVAL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 74e3b460132b..2df59b3c2ea1 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -78,9 +78,7 @@ static int ttm_global_init(void)
ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
if (IS_ERR(ttm_debugfs_root)) {
- ret = PTR_ERR(ttm_debugfs_root);
ttm_debugfs_root = NULL;
- goto out;
}
/* Limit the number of pages in the pool to about 50% of the total
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index 27cc5f03611c..f6fae64861ce 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -20,18 +20,13 @@ void qcom_icc_pre_aggregate(struct icc_node *node)
{
size_t i;
struct qcom_icc_node *qn;
- struct qcom_icc_provider *qp;
qn = node->data;
- qp = to_qcom_provider(node->provider);
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
qn->sum_avg[i] = 0;
qn->max_peak[i] = 0;
}
-
- for (i = 0; i < qn->num_bcms; i++)
- qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
}
EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
@@ -49,8 +44,10 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
{
size_t i;
struct qcom_icc_node *qn;
+ struct qcom_icc_provider *qp;
qn = node->data;
+ qp = to_qcom_provider(node->provider);
if (!tag)
tag = QCOM_ICC_TAG_ALWAYS;
@@ -70,6 +67,9 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
*agg_avg += avg_bw;
*agg_peak = max_t(u32, *agg_peak, peak_bw);
+ for (i = 0; i < qn->num_bcms; i++)
+ qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
+
return 0;
}
EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 98ba927aee1a..6f0df629353f 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -768,6 +768,7 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
__iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
__iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
sg_free_table(&sh->sgt);
+ kfree(sh);
}
#endif /* CONFIG_DMA_REMAP */
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index c6cf44a6c923..9ec374e17469 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -511,7 +511,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
u32 pasid, bool fault_ignore)
{
struct pasid_entry *pte;
- u16 did;
+ u16 did, pgtt;
pte = intel_pasid_get_entry(dev, pasid);
if (WARN_ON(!pte))
@@ -521,13 +521,19 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
return;
did = pasid_get_domain_id(pte);
+ pgtt = pasid_pte_get_pgtt(pte);
+
intel_pasid_clear_entry(dev, pasid, fault_ignore);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+
+ if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
+ qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+ else
+ iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
/* Device IOTLB doesn't need to be flushed in caching mode. */
if (!cap_caching_mode(iommu->cap))
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 5ff61c3d401f..c11bc8b833b8 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -99,6 +99,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
}
+/* Get PGTT field of a PASID table entry */
+static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
+{
+ return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
+}
+
extern unsigned int intel_pasid_max_id;
int intel_pasid_alloc_table(struct device *dev);
void intel_pasid_free_table(struct device *dev);
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 9b0f22bc0514..4b9b3f35ba0e 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -675,7 +675,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
- intel_svm_free_pasid(mm);
if (svm->notifier.ops) {
mmu_notifier_unregister(&svm->notifier, mm);
/* Clear mm's pasid. */
@@ -690,6 +689,8 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
kfree(svm);
}
}
+ /* Drop a PASID reference and free it if no reference. */
+ intel_svm_free_pasid(mm);
}
out:
return ret;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 5419c4b9f27a..63f0af10c403 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -924,6 +924,9 @@ void iommu_group_remove_device(struct device *dev)
struct iommu_group *group = dev->iommu_group;
struct group_device *tmp_device, *device = NULL;
+ if (!group)
+ return;
+
dev_info(dev, "Removing from iommu group %d\n", group->id);
/* Pre-notify listeners that a device is being removed. */
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index 3461b0a7dc62..cbfdadecb23b 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -89,16 +89,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200)
free_irq(tpci200->info->pdev->irq, (void *) tpci200);
pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
- pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
- pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
pci_disable_device(tpci200->info->pdev);
- pci_dev_put(tpci200->info->pdev);
}
static void tpci200_enable_irq(struct tpci200_board *tpci200,
@@ -257,7 +254,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
- goto out_disable_pci;
+ goto err_disable_device;
}
/* Request IO ID INT space (Bar 3) */
@@ -269,7 +266,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
- goto out_release_ip_space;
+ goto err_ip_interface_bar;
}
/* Request MEM8 space (Bar 5) */
@@ -280,7 +277,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
- goto out_release_ioid_int_space;
+ goto err_io_id_int_spaces_bar;
}
/* Request MEM16 space (Bar 4) */
@@ -291,7 +288,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
- goto out_release_mem8_space;
+ goto err_mem8_space_bar;
}
/* Map internal tpci200 driver user space */
@@ -305,7 +302,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
res = -ENOMEM;
- goto out_release_mem8_space;
+ goto err_mem16_space_bar;
}
/* Initialize lock that protects interface_regs */
@@ -344,18 +341,22 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) unable to register IRQ !",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
- goto out_release_ioid_int_space;
+ goto err_interface_regs;
}
return 0;
-out_release_mem8_space:
+err_interface_regs:
+ pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
+err_mem16_space_bar:
+ pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
+err_mem8_space_bar:
pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
-out_release_ioid_int_space:
+err_io_id_int_spaces_bar:
pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
-out_release_ip_space:
+err_ip_interface_bar:
pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
-out_disable_pci:
+err_disable_device:
pci_disable_device(tpci200->info->pdev);
return res;
}
@@ -527,7 +528,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL);
if (!tpci200->info) {
ret = -ENOMEM;
- goto out_err_info;
+ goto err_tpci200;
}
pci_dev_get(pdev);
@@ -538,7 +539,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory");
ret = -EBUSY;
- goto out_err_pci_request;
+ goto err_tpci200_info;
}
tpci200->info->cfg_regs = ioremap(
pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
@@ -546,7 +547,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
if (!tpci200->info->cfg_regs) {
dev_err(&pdev->dev, "Failed to map PCI Configuration Memory");
ret = -EFAULT;
- goto out_err_ioremap;
+ goto err_request_region;
}
/* Disable byte swapping for 16 bit IP module access. This will ensure
@@ -569,7 +570,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_err(&pdev->dev, "error during tpci200 install\n");
ret = -ENODEV;
- goto out_err_install;
+ goto err_cfg_regs;
}
/* Register the carrier in the industry pack bus driver */
@@ -581,7 +582,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
dev_err(&pdev->dev,
"error registering the carrier on ipack driver\n");
ret = -EFAULT;
- goto out_err_bus_register;
+ goto err_tpci200_install;
}
/* save the bus number given by ipack to logging purpose */
@@ -592,19 +593,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
tpci200_create_device(tpci200, i);
return 0;
-out_err_bus_register:
+err_tpci200_install:
tpci200_uninstall(tpci200);
- /* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
- tpci200->info->cfg_regs = NULL;
-out_err_install:
- if (tpci200->info->cfg_regs)
- iounmap(tpci200->info->cfg_regs);
-out_err_ioremap:
+err_cfg_regs:
+ pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
+err_request_region:
pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
-out_err_pci_request:
- pci_dev_put(pdev);
+err_tpci200_info:
kfree(tpci200->info);
-out_err_info:
+ pci_dev_put(pdev);
+err_tpci200:
kfree(tpci200);
return ret;
}
@@ -614,6 +612,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200)
ipack_bus_unregister(tpci200->info->ipack_bus);
tpci200_uninstall(tpci200);
+ pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
+
+ pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
+
+ pci_dev_put(tpci200->info->pdev);
+
kfree(tpci200->info);
kfree(tpci200);
}
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index d333130d1531..c3229d8c7041 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2018,8 +2018,8 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
continue;
}
- dw_mci_stop_dma(host);
send_stop_abort(host, data);
+ dw_mci_stop_dma(host);
state = STATE_SENDING_STOP;
break;
}
@@ -2043,10 +2043,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
- dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
+ dw_mci_stop_dma(host);
state = STATE_DATA_ERROR;
break;
}
@@ -2079,10 +2079,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
- dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
+ dw_mci_stop_dma(host);
state = STATE_DATA_ERROR;
break;
}
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 51db30acf4dc..fdaa11f92fe6 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -479,8 +479,9 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
u32 status;
int ret = 0;
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
- spin_lock_irqsave(&host->lock, flags);
+ spin_lock_irqsave(&host->lock, flags);
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 &&
+ host->pwr_reg & MCI_STM32_VSWITCHEN) {
mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH);
spin_unlock_irqrestore(&host->lock, flags);
@@ -492,9 +493,11 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC,
host->base + MMCICLEAR);
+ spin_lock_irqsave(&host->lock, flags);
mmci_write_pwrreg(host, host->pwr_reg &
~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH));
}
+ spin_unlock_irqrestore(&host->lock, flags);
return ret;
}
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index cce390fe9cf3..e7565c671998 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -173,6 +173,23 @@ static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host)
return pltfm_host->clock;
}
+/*
+ * There is a known bug on BCM2711's SDHCI core integration where the
+ * controller will hang when the difference between the core clock and the bus
+ * clock is too great. Specifically this can be reproduced under the following
+ * conditions:
+ *
+ * - No SD card plugged in, polling thread is running, probing cards at
+ * 100 kHz.
+ * - BCM2711's core clock configured at 500MHz or more
+ *
+ * So we set 200kHz as the minimum clock frequency available for that SoC.
+ */
+static unsigned int sdhci_iproc_bcm2711_get_min_clock(struct sdhci_host *host)
+{
+ return 200000;
+}
+
static const struct sdhci_ops sdhci_iproc_ops = {
.set_clock = sdhci_set_clock,
.get_max_clock = sdhci_iproc_get_max_clock,
@@ -271,13 +288,15 @@ static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
.set_clock = sdhci_set_clock,
.set_power = sdhci_set_power_and_bus_voltage,
.get_max_clock = sdhci_iproc_get_max_clock,
+ .get_min_clock = sdhci_iproc_bcm2711_get_min_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
- .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.ops = &sdhci_iproc_bcm2711_ops,
};
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index e44b7a66b73c..290a14cdc1cf 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2089,6 +2089,23 @@ static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
sdhci_cqe_disable(mmc, recovery);
}
+static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ u32 count, start = 15;
+
+ __sdhci_set_timeout(host, cmd);
+ count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
+ /*
+ * Update software timeout value if its value is less than hardware data
+ * timeout value. Qcom SoC hardware data timeout value was calculated
+ * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock.
+ */
+ if (cmd && cmd->data && host->clock > 400000 &&
+ host->clock <= 50000000 &&
+ ((1 << (count + start)) > (10 * host->clock)))
+ host->data_timeout = 22LL * NSEC_PER_SEC;
+}
+
static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
.enable = sdhci_msm_cqe_enable,
.disable = sdhci_msm_cqe_disable,
@@ -2438,6 +2455,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
.irq = sdhci_msm_cqe_irq,
.dump_vendor_regs = sdhci_msm_dump_vendor_regs,
.set_power = sdhci_set_power_noreg,
+ .set_timeout = sdhci_msm_set_timeout,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 3097e93787f7..a761134fd3be 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -119,7 +119,7 @@ static int cfi_use_status_reg(struct cfi_private *cfi)
struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
- return extp->MinorVersion >= '5' &&
+ return extp && extp->MinorVersion >= '5' &&
(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
}
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
index efc2003bd13a..99400d0fb8c1 100644
--- a/drivers/mtd/devices/mchp48l640.c
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -229,7 +229,7 @@ static int mchp48l640_write(struct mtd_info *mtd, loff_t to, size_t len,
woff += ws;
}
- return ret;
+ return 0;
}
static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len,
@@ -255,6 +255,7 @@ static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len,
if (!ret)
*retlen += len;
+ kfree(cmd);
return ret;
fail:
@@ -286,7 +287,7 @@ static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len,
woff += ws;
}
- return ret;
+ return 0;
};
static const struct mchp48_caps mchp48l640_caps = {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 6ce4bc57f919..44bea3f65060 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -419,6 +419,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (tr->discard) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
blk_queue_max_discard_sectors(new->rq, UINT_MAX);
+ new->rq->limits.discard_granularity = tr->blksize;
}
gd->queue = new->rq;
@@ -525,14 +526,10 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (!blktrans_notifier.list.next)
register_mtd_user(&blktrans_notifier);
-
- mutex_lock(&mtd_table_mutex);
-
ret = register_blkdev(tr->major, tr->name);
if (ret < 0) {
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
- mutex_unlock(&mtd_table_mutex);
return ret;
}
@@ -542,12 +539,12 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
tr->blkshift = ffs(tr->blksize) - 1;
INIT_LIST_HEAD(&tr->devs);
- list_add(&tr->list, &blktrans_majors);
+ mutex_lock(&mtd_table_mutex);
+ list_add(&tr->list, &blktrans_majors);
mtd_for_each_device(mtd)
if (mtd->type != MTD_ABSENT)
tr->add_mtd(tr, mtd);
-
mutex_unlock(&mtd_table_mutex);
return 0;
}
@@ -564,8 +561,8 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
list_for_each_entry_safe(dev, next, &tr->devs, list)
tr->remove_dev(dev);
- unregister_blkdev(tr->major, tr->name);
mutex_unlock(&mtd_table_mutex);
+ unregister_blkdev(tr->major, tr->name);
BUG_ON(!list_empty(&tr->devs));
return 0;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index b5ccd3037788..c8fd7f758938 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -806,7 +806,9 @@ static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
err:
kfree(info);
- return ret;
+
+ /* ENODATA means there is no OTP region. */
+ return ret == -ENODATA ? 0 : ret;
}
static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 57a583149cc0..3d6c6e880520 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -5228,12 +5228,18 @@ static bool of_get_nand_on_flash_bbt(struct device_node *np)
static int of_get_nand_secure_regions(struct nand_chip *chip)
{
struct device_node *dn = nand_get_flash_node(chip);
+ struct property *prop;
int nr_elem, i, j;
- nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
- if (!nr_elem)
+ /* Only proceed if the "secure-regions" property is present in DT */
+ prop = of_find_property(dn, "secure-regions", NULL);
+ if (!prop)
return 0;
+ nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
+ if (nr_elem <= 0)
+ return nr_elem;
+
chip->nr_secure_regions = nr_elem / 2;
chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
GFP_KERNEL);
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 19aea8fb76f6..705d3900e43a 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -284,8 +284,7 @@ static int sja1105_mdiobus_base_tx_register(struct sja1105_private *priv,
struct mii_bus *bus;
int rc = 0;
- np = of_find_compatible_node(mdio_node, NULL,
- "nxp,sja1110-base-tx-mdio");
+ np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-tx-mdio");
if (!np)
return 0;
@@ -339,8 +338,7 @@ static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
struct mii_bus *bus;
int rc = 0;
- np = of_find_compatible_node(mdio_node, NULL,
- "nxp,sja1110-base-t1-mdio");
+ np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-t1-mdio");
if (!np)
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2fe743503949..8a97640cdfe7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -72,7 +72,8 @@
#include "bnxt_debugfs.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
-#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW)
+#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
+ NETIF_MSG_TX_ERR)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
@@ -365,6 +366,33 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
return md_dst->u.port_info.port_id;
}
+static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ u16 prod)
+{
+ bnxt_db_write(bp, &txr->tx_db, prod);
+ txr->kick_pending = 0;
+}
+
+static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ struct netdev_queue *txq)
+{
+ netif_tx_stop_queue(txq);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * tx index in bnxt_tx_avail() below, because in
+ * bnxt_tx_int(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+ netif_tx_wake_queue(txq);
+ return false;
+ }
+
+ return true;
+}
+
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -384,6 +412,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
dev_kfree_skb_any(skb);
+ atomic_long_inc(&dev->tx_dropped);
return NETDEV_TX_OK;
}
@@ -393,8 +422,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
- netif_tx_stop_queue(txq);
- return NETDEV_TX_BUSY;
+ /* We must have raced with NAPI cleanup */
+ if (net_ratelimit() && txr->kick_pending)
+ netif_warn(bp, tx_err, dev,
+ "bnxt: ring busy w/ flush pending!\n");
+ if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
+ return NETDEV_TX_BUSY;
}
length = skb->len;
@@ -517,21 +550,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
normal_tx:
if (length < BNXT_MIN_PKT_SIZE) {
pad = BNXT_MIN_PKT_SIZE - length;
- if (skb_pad(skb, pad)) {
+ if (skb_pad(skb, pad))
/* SKB already freed. */
- tx_buf->skb = NULL;
- return NETDEV_TX_OK;
- }
+ goto tx_kick_pending;
length = BNXT_MIN_PKT_SIZE;
}
mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
- dev_kfree_skb_any(skb);
- tx_buf->skb = NULL;
- return NETDEV_TX_OK;
- }
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+ goto tx_free;
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
@@ -618,24 +646,17 @@ normal_tx:
txr->tx_prod = prod;
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
- bnxt_db_write(bp, &txr->tx_db, prod);
+ bnxt_txr_db_kick(bp, txr, prod);
+ else
+ txr->kick_pending = 1;
tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (netdev_xmit_more() && !tx_buf->is_push)
- bnxt_db_write(bp, &txr->tx_db, prod);
-
- netif_tx_stop_queue(txq);
+ bnxt_txr_db_kick(bp, txr, prod);
- /* netif_tx_stop_queue() must be done before checking
- * tx index in bnxt_tx_avail() below, because in
- * bnxt_tx_int(), we update tx index before checking for
- * netif_tx_queue_stopped().
- */
- smp_mb();
- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
- netif_tx_wake_queue(txq);
+ bnxt_txr_netif_try_stop_queue(bp, txr, txq);
}
return NETDEV_TX_OK;
@@ -648,7 +669,6 @@ tx_dma_error:
/* start back at beginning and unmap skb */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
- tx_buf->skb = NULL;
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
prod = NEXT_TX(prod);
@@ -662,7 +682,13 @@ tx_dma_error:
PCI_DMA_TODEVICE);
}
+tx_free:
dev_kfree_skb_any(skb);
+tx_kick_pending:
+ if (txr->kick_pending)
+ bnxt_txr_db_kick(bp, txr, txr->tx_prod);
+ txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+ atomic_long_inc(&dev->tx_dropped);
return NETDEV_TX_OK;
}
@@ -732,14 +758,9 @@ next_tx_int:
smp_mb();
if (unlikely(netif_tx_queue_stopped(txq)) &&
- (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
- __netif_tx_lock(txq, smp_processor_id());
- if (netif_tx_queue_stopped(txq) &&
- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
- txr->dev_state != BNXT_DEV_STATE_CLOSING)
- netif_tx_wake_queue(txq);
- __netif_tx_unlock(txq);
- }
+ bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+ READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
+ netif_tx_wake_queue(txq);
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -1767,6 +1788,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
prod = rxr->rx_prod;
if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@@ -1989,6 +2014,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
cmp_type = RX_CMP_TYPE(rxcmp);
if (cmp_type == CMP_TYPE_RX_L2_CMP) {
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
@@ -2454,6 +2483,10 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
if (!TX_CMP_VALID(txcmp, raw_cons))
break;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
cp_cons = RING_CMP(tmp_raw_cons);
@@ -9128,10 +9161,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+ napi_disable(&bp->bnapi[i]->napi);
if (bp->bnapi[i]->rx_ring)
cancel_work_sync(&cpr->dim.work);
-
- napi_disable(&bp->bnapi[i]->napi);
}
}
@@ -9165,9 +9197,11 @@ void bnxt_tx_disable(struct bnxt *bp)
if (bp->tx_ring) {
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
- txr->dev_state = BNXT_DEV_STATE_CLOSING;
+ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
}
}
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
/* Drop carrier first to prevent TX timeout */
netif_carrier_off(bp->dev);
/* Stop all TX queues */
@@ -9181,8 +9215,10 @@ void bnxt_tx_enable(struct bnxt *bp)
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
- txr->dev_state = 0;
+ WRITE_ONCE(txr->dev_state, 0);
}
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
netif_tx_wake_all_queues(bp->dev);
if (bp->link_info.link_up)
netif_carrier_on(bp->dev);
@@ -10768,6 +10804,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
return true;
return false;
}
+ /* 212 firmware is broken for aRFS */
+ if (BNXT_FW_MAJ(bp) == 212)
+ return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index bcf8d00b8c80..ba4e0fc38520 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -786,6 +786,7 @@ struct bnxt_tx_ring_info {
u16 tx_prod;
u16 tx_cons;
u16 txq_index;
+ u8 kick_pending;
struct bnxt_db_info tx_db;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 68b78642c045..98cc0133c343 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -3038,26 +3038,30 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
return err;
}
-static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
+static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
+{
+ dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ dpaa2_switch_free_dpio(ethsw);
+ dpaa2_switch_destroy_rings(ethsw);
+ dpaa2_switch_drain_bp(ethsw);
+ dpaa2_switch_free_dpbp(ethsw);
+}
+
+static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
int err;
+ dpaa2_switch_ctrl_if_teardown(ethsw);
+
+ destroy_workqueue(ethsw->workqueue);
+
err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
if (err)
dev_warn(dev, "dpsw_close err %d\n", err);
}
-static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
-{
- dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
- dpaa2_switch_free_dpio(ethsw);
- dpaa2_switch_destroy_rings(ethsw);
- dpaa2_switch_drain_bp(ethsw);
- dpaa2_switch_free_dpbp(ethsw);
-}
-
static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
{
struct ethsw_port_priv *port_priv;
@@ -3068,8 +3072,6 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
dev = &sw_dev->dev;
ethsw = dev_get_drvdata(dev);
- dpaa2_switch_ctrl_if_teardown(ethsw);
-
dpaa2_switch_teardown_irqs(sw_dev);
dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
@@ -3084,9 +3086,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
kfree(ethsw->acls);
kfree(ethsw->ports);
- dpaa2_switch_takedown(sw_dev);
-
- destroy_workqueue(ethsw->workqueue);
+ dpaa2_switch_teardown(sw_dev);
fsl_mc_portal_free(ethsw->mc_io);
@@ -3199,7 +3199,7 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
GFP_KERNEL);
if (!(ethsw->ports)) {
err = -ENOMEM;
- goto err_takedown;
+ goto err_teardown;
}
ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
@@ -3270,8 +3270,8 @@ err_free_fdbs:
err_free_ports:
kfree(ethsw->ports);
-err_takedown:
- dpaa2_switch_takedown(sw_dev);
+err_teardown:
+ dpaa2_switch_teardown(sw_dev);
err_free_cmdport:
fsl_mc_portal_free(ethsw->mc_io);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 3f25bd8c4924..10a83e5385c7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3663,8 +3663,7 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
/* is DCB enabled at all? */
if (vsi->tc_config.numtc == 1)
- return i40e_swdcb_skb_tx_hash(netdev, skb,
- netdev->real_num_tx_queues);
+ return netdev_pick_tx(netdev, skb, sb_dev);
prio = skb->priority;
hw = &vsi->back->hw;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index e8bd04100ecd..90793b36126e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -136,6 +136,7 @@ struct iavf_q_vector {
struct iavf_mac_filter {
struct list_head list;
u8 macaddr[ETH_ALEN];
+ bool is_new_mac; /* filter is new, wait for PF decision */
bool remove; /* filter needs to be removed */
bool add; /* filter needs to be added */
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 244ec74ceca7..606a01ce4073 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -751,6 +751,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
+ f->is_new_mac = true;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
} else {
f->remove = false;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 0eab3c43bdc5..3c735968e1b8 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -541,6 +541,47 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
}
/**
+ * iavf_mac_add_ok
+ * @adapter: adapter structure
+ *
+ * Submit list of filters based on PF response.
+ **/
+static void iavf_mac_add_ok(struct iavf_adapter *adapter)
+{
+ struct iavf_mac_filter *f, *ftmp;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ f->is_new_mac = false;
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
+ * iavf_mac_add_reject
+ * @adapter: adapter structure
+ *
+ * Remove filters from list based on PF response.
+ **/
+static void iavf_mac_add_reject(struct iavf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct iavf_mac_filter *f, *ftmp;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
+ f->remove = false;
+
+ if (f->is_new_mac) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
* iavf_add_vlans
* @adapter: adapter structure
*
@@ -1492,6 +1533,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
case VIRTCHNL_OP_ADD_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
iavf_stat_str(&adapter->hw, v_retval));
+ iavf_mac_add_reject(adapter);
/* restore administratively set MAC address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
break;
@@ -1639,10 +1681,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
switch (v_opcode) {
- case VIRTCHNL_OP_ADD_ETH_ADDR: {
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ if (!v_retval)
+ iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
- }
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 5d5207b56ca9..9e3ddb9b8b51 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -656,7 +656,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
* maintaining phase
*/
if (start_time < current_time)
- start_time = div64_u64(current_time + NSEC_PER_MSEC - 1,
+ start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
NSEC_PER_SEC) * NSEC_PER_SEC + phase;
start_time -= E810_OUT_PROP_DELAY_NS;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 96dd1a4f956a..b1d22e4d5ec9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -52,8 +52,11 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
/* Kick start the NAPI context so that receiving will start */
err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
- if (err)
+ if (err) {
+ clear_bit(qid, adapter->af_xdp_zc_qps);
+ xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
return err;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index adfb9781799e..2948d731a1c1 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1334,6 +1334,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
struct net_device *bond = ocelot_port->bond;
mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
+ mask |= cpu_fwd_mask;
mask &= ~BIT(port);
if (bond) {
mask &= ~ocelot_get_bond_mask(ocelot, bond,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 02a4610d9330..c46a7f756ed5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -327,6 +327,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
unsigned long flags;
int rc = -EINVAL;
+ if (!p_ll2_conn)
+ return rc;
+
spin_lock_irqsave(&p_tx->lock, flags);
if (p_tx->b_completing_packet) {
rc = -EBUSY;
@@ -500,7 +503,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
unsigned long flags = 0;
int rc = 0;
+ if (!p_ll2_conn)
+ return rc;
+
spin_lock_irqsave(&p_rx->lock, flags);
+
+ if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return 0;
+ }
+
cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
@@ -821,6 +833,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
int rc;
+ if (!p_ll2_conn)
+ return 0;
+
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
return 0;
@@ -844,6 +859,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
u16 new_idx = 0, num_bds = 0;
int rc;
+ if (!p_ll2_conn)
+ return 0;
+
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
@@ -1728,6 +1746,8 @@ int qed_ll2_post_rx_buffer(void *cxt,
if (!p_ll2_conn)
return -EINVAL;
p_rx = &p_ll2_conn->rx_queue;
+ if (!p_rx->set_prod_addr)
+ return -EIO;
spin_lock_irqsave(&p_rx->lock, flags);
if (!list_empty(&p_rx->free_descq))
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index da864d12916b..4f4b79250a2b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1285,8 +1285,7 @@ qed_rdma_create_qp(void *rdma_cxt,
if (!rdma_cxt || !in_params || !out_params ||
!p_hwfn->p_rdma_info->active) {
- DP_ERR(p_hwfn->cdev,
- "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+ pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
rdma_cxt, in_params, out_params);
return NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index d8882d0b6b49..d51bac7ba5af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3156,8 +3156,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
ret = QLCRD32(adapter, indirect_addr, &err);
- if (err == -EIO)
+ if (err == -EIO) {
+ qlcnic_83xx_unlock_flash(adapter);
return err;
+ }
word = ret;
*(u32 *)p_data = word;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index fcf3af76b6d7..8fe8887d506a 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -827,6 +827,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
return;
}
+ if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) {
+ pr_err("6pack: cooked buffer overrun, data loss\n");
+ sp->rx_count = 0;
+ return;
+ }
+
buf = sp->raw_buf;
sp->cooked_buf[sp->rx_count_cooked++] =
buf[0] | ((buf[1] << 2) & 0xc0);
diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
index 110e4ee85785..ebd001f0eece 100644
--- a/drivers/net/mdio/mdio-mux.c
+++ b/drivers/net/mdio/mdio-mux.c
@@ -82,6 +82,17 @@ out:
static int parent_count;
+static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
+{
+ struct mdio_mux_child_bus *cb = pb->children;
+
+ while (cb) {
+ mdiobus_unregister(cb->mii_bus);
+ mdiobus_free(cb->mii_bus);
+ cb = cb->next;
+ }
+}
+
int mdio_mux_init(struct device *dev,
struct device_node *mux_node,
int (*switch_fn)(int cur, int desired, void *data),
@@ -144,7 +155,7 @@ int mdio_mux_init(struct device *dev,
cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
if (!cb) {
ret_val = -ENOMEM;
- continue;
+ goto err_loop;
}
cb->bus_number = v;
cb->parent = pb;
@@ -152,8 +163,7 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus = mdiobus_alloc();
if (!cb->mii_bus) {
ret_val = -ENOMEM;
- devm_kfree(dev, cb);
- continue;
+ goto err_loop;
}
cb->mii_bus->priv = cb;
@@ -165,11 +175,15 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus->write = mdio_mux_write;
r = of_mdiobus_register(cb->mii_bus, child_bus_node);
if (r) {
+ mdiobus_free(cb->mii_bus);
+ if (r == -EPROBE_DEFER) {
+ ret_val = r;
+ goto err_loop;
+ }
+ devm_kfree(dev, cb);
dev_err(dev,
"Error: Failed to register MDIO bus for child %pOF\n",
child_bus_node);
- mdiobus_free(cb->mii_bus);
- devm_kfree(dev, cb);
} else {
cb->next = pb->children;
pb->children = cb;
@@ -181,7 +195,10 @@ int mdio_mux_init(struct device *dev,
}
dev_err(dev, "Error: No acceptable child buses found\n");
- devm_kfree(dev, pb);
+
+err_loop:
+ mdio_mux_uninit_children(pb);
+ of_node_put(child_bus_node);
err_pb_kz:
put_device(&parent_bus->dev);
err_parent_bus:
@@ -193,14 +210,8 @@ EXPORT_SYMBOL_GPL(mdio_mux_init);
void mdio_mux_uninit(void *mux_handle)
{
struct mdio_mux_parent_bus *pb = mux_handle;
- struct mdio_mux_child_bus *cb = pb->children;
-
- while (cb) {
- mdiobus_unregister(cb->mii_bus);
- mdiobus_free(cb->mii_bus);
- cb = cb->next;
- }
+ mdio_mux_uninit_children(pb);
put_device(&pb->mii_bus->dev);
}
EXPORT_SYMBOL_GPL(mdio_mux_uninit);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index ac92bc52a85e..38cda590895c 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -63,6 +63,29 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
value, index, data, size);
}
+static int asix_check_host_enable(struct usbnet *dev, int in_pm)
+{
+ int i, ret;
+ u8 smsr;
+
+ for (i = 0; i < 30; ++i) {
+ ret = asix_set_sw_mii(dev, in_pm);
+ if (ret == -ENODEV || ret == -ETIMEDOUT)
+ break;
+ usleep_range(1000, 1100);
+ ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
+ 0, 0, 1, &smsr, in_pm);
+ if (ret == -ENODEV)
+ break;
+ else if (ret < 0)
+ continue;
+ else if (smsr & AX_HOST_EN)
+ break;
+ }
+
+ return ret;
+}
+
static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
{
/* Reset the variables that have a lifetime outside of
@@ -467,19 +490,11 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
- u8 smsr;
- int i = 0;
int ret;
mutex_lock(&dev->phy_mutex);
- do {
- ret = asix_set_sw_mii(dev, 0);
- if (ret == -ENODEV || ret == -ETIMEDOUT)
- break;
- usleep_range(1000, 1100);
- ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
- 0, 0, 1, &smsr, 0);
- } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+ ret = asix_check_host_enable(dev, 0);
if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
@@ -505,23 +520,14 @@ static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
- u8 smsr;
- int i = 0;
int ret;
netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
phy_id, loc, val);
mutex_lock(&dev->phy_mutex);
- do {
- ret = asix_set_sw_mii(dev, 0);
- if (ret == -ENODEV)
- break;
- usleep_range(1000, 1100);
- ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
- 0, 0, 1, &smsr, 0);
- } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+ ret = asix_check_host_enable(dev, 0);
if (ret == -ENODEV)
goto out;
@@ -561,19 +567,11 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
- u8 smsr;
- int i = 0;
int ret;
mutex_lock(&dev->phy_mutex);
- do {
- ret = asix_set_sw_mii(dev, 1);
- if (ret == -ENODEV || ret == -ETIMEDOUT)
- break;
- usleep_range(1000, 1100);
- ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
- 0, 0, 1, &smsr, 1);
- } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+ ret = asix_check_host_enable(dev, 1);
if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
@@ -595,22 +593,14 @@ asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
- u8 smsr;
- int i = 0;
int ret;
netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
phy_id, loc, val);
mutex_lock(&dev->phy_mutex);
- do {
- ret = asix_set_sw_mii(dev, 1);
- if (ret == -ENODEV)
- break;
- usleep_range(1000, 1100);
- ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
- 0, 0, 1, &smsr, 1);
- } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+ ret = asix_check_host_enable(dev, 1);
if (ret == -ENODEV) {
mutex_unlock(&dev->phy_mutex);
return;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e09b107b5c99..79832374f78d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3955,17 +3955,28 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
case RTL_VER_06:
ocp_write_byte(tp, type, PLA_BP_EN, 0);
break;
+ case RTL_VER_14:
+ ocp_write_word(tp, type, USB_BP2_EN, 0);
+
+ ocp_write_word(tp, type, USB_BP_8, 0);
+ ocp_write_word(tp, type, USB_BP_9, 0);
+ ocp_write_word(tp, type, USB_BP_10, 0);
+ ocp_write_word(tp, type, USB_BP_11, 0);
+ ocp_write_word(tp, type, USB_BP_12, 0);
+ ocp_write_word(tp, type, USB_BP_13, 0);
+ ocp_write_word(tp, type, USB_BP_14, 0);
+ ocp_write_word(tp, type, USB_BP_15, 0);
+ break;
case RTL_VER_08:
case RTL_VER_09:
case RTL_VER_10:
case RTL_VER_11:
case RTL_VER_12:
case RTL_VER_13:
- case RTL_VER_14:
case RTL_VER_15:
default:
if (type == MCU_TYPE_USB) {
- ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0);
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0);
@@ -4331,7 +4342,6 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
case RTL_VER_11:
case RTL_VER_12:
case RTL_VER_13:
- case RTL_VER_14:
case RTL_VER_15:
fw_reg = 0xf800;
bp_ba_addr = PLA_BP_BA;
@@ -4339,6 +4349,13 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
bp_start = PLA_BP_0;
max_bp = 8;
break;
+ case RTL_VER_14:
+ fw_reg = 0xf800;
+ bp_ba_addr = PLA_BP_BA;
+ bp_en_addr = USB_BP2_EN;
+ bp_start = PLA_BP_0;
+ max_bp = 16;
+ break;
default:
goto out;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 56c3f8519093..eee493685aad 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -63,7 +63,7 @@ static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_CSUM
};
-#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
+#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \
(1ULL << VIRTIO_NET_F_GUEST_UFO))
@@ -2515,7 +2515,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
- NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
+ NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
return -EOPNOTSUPP;
}
@@ -2646,15 +2646,15 @@ static int virtnet_set_features(struct net_device *dev,
u64 offloads;
int err;
- if ((dev->features ^ features) & NETIF_F_LRO) {
+ if ((dev->features ^ features) & NETIF_F_GRO_HW) {
if (vi->xdp_enabled)
return -EBUSY;
- if (features & NETIF_F_LRO)
+ if (features & NETIF_F_GRO_HW)
offloads = vi->guest_offloads_capable;
else
offloads = vi->guest_offloads_capable &
- ~GUEST_OFFLOAD_LRO_MASK;
+ ~GUEST_OFFLOAD_GRO_HW_MASK;
err = virtnet_set_guest_offloads(vi, offloads);
if (err)
@@ -3134,9 +3134,9 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= NETIF_F_RXCSUM;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
- dev->features |= NETIF_F_LRO;
+ dev->features |= NETIF_F_GRO_HW;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
- dev->hw_features |= NETIF_F_LRO;
+ dev->hw_features |= NETIF_F_GRO_HW;
dev->vlan_features = dev->features;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 2b1b944d4b28..8bbe2a7bb141 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1367,6 +1367,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
bool is_ndisc = ipv6_ndisc_frame(skb);
+ nf_reset_ct(skb);
+
/* loopback, multicast & non-ND link-local traffic; do not push through
* packet taps again. Reset pkt_type for upper layers to process skb.
* For strict packets with a source LLA, determine the dst using the
@@ -1429,6 +1431,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
skb->skb_iif = vrf_dev->ifindex;
IPCB(skb)->flags |= IPSKB_L3SLAVE;
+ nf_reset_ct(skb);
+
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
goto out;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 2403490cbc26..b4b1f75b9c2a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -37,6 +37,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
u32 sha1 = 0;
u16 mac_type = 0, rf_id = 0;
u8 *pnvm_data = NULL, *tmp;
+ bool hw_match = false;
u32 size = 0;
int ret;
@@ -83,6 +84,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
break;
}
+ if (hw_match)
+ break;
+
mac_type = le16_to_cpup((__le16 *)data);
rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
@@ -90,15 +94,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
"Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
mac_type, rf_id);
- if (mac_type != CSR_HW_REV_TYPE(trans->hw_rev) ||
- rf_id != CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
- IWL_DEBUG_FW(trans,
- "HW mismatch, skipping PNVM section, mac_type 0x%0x, rf_id 0x%0x.\n",
- CSR_HW_REV_TYPE(trans->hw_rev), trans->hw_rf_id);
- ret = -ENOENT;
- goto out;
- }
-
+ if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) &&
+ rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id))
+ hw_match = true;
break;
case IWL_UCODE_TLV_SEC_RT: {
struct iwl_pnvm_section *section = (void *)data;
@@ -149,6 +147,15 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
}
done:
+ if (!hw_match) {
+ IWL_DEBUG_FW(trans,
+ "HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n",
+ CSR_HW_REV_TYPE(trans->hw_rev),
+ CSR_HW_RFID_TYPE(trans->hw_rf_id));
+ ret = -ENOENT;
+ goto out;
+ }
+
if (!size) {
IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n");
ret = -ENOENT;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 16baee3d52ae..0b8a0cd3b652 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1110,12 +1110,80 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_bz_a0_mr_a0, iwl_ax211_name),
+/* SoF with JF2 */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
+
+/* SoF with JF */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+
/* So with GF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name)
+ iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
+
+/* So with JF2 */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
+
+/* So with JF */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
#endif /* CONFIG_IWLMVM */
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 863aa18b3024..43960770a9af 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -111,7 +111,7 @@ mt7915_mcu_get_cipher(int cipher)
case WLAN_CIPHER_SUITE_SMS4:
return MCU_CIPHER_WAPI;
default:
- return MT_CIPHER_NONE;
+ return MCU_CIPHER_NONE;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index edd3ba3a0c2d..e68a562cc5b4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -1073,7 +1073,8 @@ enum {
};
enum mcu_cipher_type {
- MCU_CIPHER_WEP40 = 1,
+ MCU_CIPHER_NONE = 0,
+ MCU_CIPHER_WEP40,
MCU_CIPHER_WEP104,
MCU_CIPHER_WEP128,
MCU_CIPHER_TKIP,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index cd690c64f65b..9fbaacc67cfa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -111,7 +111,7 @@ mt7921_mcu_get_cipher(int cipher)
case WLAN_CIPHER_SUITE_SMS4:
return MCU_CIPHER_WAPI;
default:
- return MT_CIPHER_NONE;
+ return MCU_CIPHER_NONE;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
index d76cf8f8dfdf..de3c091f6736 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
@@ -199,7 +199,8 @@ struct sta_rec_sec {
} __packed;
enum mcu_cipher_type {
- MCU_CIPHER_WEP40 = 1,
+ MCU_CIPHER_NONE = 0,
+ MCU_CIPHER_WEP40,
MCU_CIPHER_WEP104,
MCU_CIPHER_WEP128,
MCU_CIPHER_TKIP,
diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
index 804e6c4f2c78..519361ec40df 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
@@ -64,10 +64,9 @@ static struct ipc_chnl_cfg modem_cfg[] = {
int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index)
{
- int array_size = ARRAY_SIZE(modem_cfg);
-
- if (index >= array_size) {
- pr_err("index: %d and array_size %d", index, array_size);
+ if (index >= ARRAY_SIZE(modem_cfg)) {
+ pr_err("index: %d and array size %zu", index,
+ ARRAY_SIZE(modem_cfg));
return -ECHRNG;
}
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index b335c077f215..5543c54dacc5 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1856,9 +1856,6 @@ void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
if (unlikely(!opp_table))
return;
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
kfree(opp_table->supported_hw);
opp_table->supported_hw = NULL;
opp_table->supported_hw_count = 0;
@@ -1944,9 +1941,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
if (unlikely(!opp_table))
return;
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
kfree(opp_table->prop_name);
opp_table->prop_name = NULL;
@@ -2056,9 +2050,6 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
if (!opp_table->regulators)
goto put_opp_table;
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
if (opp_table->enabled) {
for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_disable(opp_table->regulators[i]);
@@ -2178,9 +2169,6 @@ void dev_pm_opp_put_clkname(struct opp_table *opp_table)
if (unlikely(!opp_table))
return;
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
clk_put(opp_table->clk);
opp_table->clk = ERR_PTR(-EINVAL);
@@ -2279,9 +2267,6 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
if (unlikely(!opp_table))
return;
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
opp_table->set_opp = NULL;
mutex_lock(&opp_table->lock);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 9bdabad11ca5..2a97c6535c4c 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -956,8 +956,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
}
}
- /* There should be one of more OPP defined */
- if (WARN_ON(!count)) {
+ /* There should be one or more OPPs defined */
+ if (!count) {
+ dev_err(dev, "%s: no supported OPPs", __func__);
ret = -ENOENT;
goto remove_static_opp;
}
diff --git a/drivers/pci/controller/pci-ixp4xx.c b/drivers/pci/controller/pci-ixp4xx.c
index 896a45b24236..654ac4a82beb 100644
--- a/drivers/pci/controller/pci-ixp4xx.c
+++ b/drivers/pci/controller/pci-ixp4xx.c
@@ -145,7 +145,7 @@ static int ixp4xx_pci_check_master_abort(struct ixp4xx_pci *p)
return 0;
}
-static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
+static int ixp4xx_pci_read_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
{
ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr);
@@ -170,7 +170,7 @@ static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
return ixp4xx_pci_check_master_abort(p);
}
-static int ixp4xx_pci_write(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data)
+static int ixp4xx_pci_write_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data)
{
ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr);
@@ -308,7 +308,7 @@ static int ixp4xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
dev_dbg(p->dev, "read_config from %d size %d dev %d:%d:%d address: %08x cmd: %08x\n",
where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd);
- ret = ixp4xx_pci_read(p, addr, cmd, &val);
+ ret = ixp4xx_pci_read_indirect(p, addr, cmd, &val);
if (ret)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -356,7 +356,7 @@ static int ixp4xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
dev_dbg(p->dev, "write_config_byte %#x to %d size %d dev %d:%d:%d addr: %08x cmd %08x\n",
value, where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd);
- ret = ixp4xx_pci_write(p, addr, cmd, val);
+ ret = ixp4xx_pci_write_indirect(p, addr, cmd, val);
if (ret)
return PCIBIOS_DEVICE_NOT_FOUND;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 5d63df7c1820..7bbf2673c7f2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -978,7 +978,7 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_mem->size = 1024*1024;
b->legacy_mem->attr.mode = 0600;
b->legacy_mem->mmap = pci_mmap_legacy_mem;
- b->legacy_io->mapping = iomem_get_mapping();
+ b->legacy_mem->mapping = iomem_get_mapping();
pci_adjust_legacy_attr(b, pci_mmap_mem);
error = device_create_bin_file(&b->dev, b->legacy_mem);
if (error)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6d74386eadc2..ab3de1551b50 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1900,6 +1900,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
#ifdef CONFIG_X86_IO_APIC
static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 0cb927f0f301..a81dc4b191b7 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -41,6 +41,10 @@ static int wapf = -1;
module_param(wapf, uint, 0444);
MODULE_PARM_DESC(wapf, "WAPF value");
+static int tablet_mode_sw = -1;
+module_param(tablet_mode_sw, uint, 0444);
+MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip");
+
static struct quirk_entry *quirks;
static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
@@ -458,6 +462,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_use_lid_flip_devid,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS TP200s / E205SA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E205SA"),
+ },
+ .driver_data = &quirk_asus_use_lid_flip_devid,
+ },
{},
};
@@ -477,6 +490,21 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
else
wapf = quirks->wapf;
+ switch (tablet_mode_sw) {
+ case 0:
+ quirks->use_kbd_dock_devid = false;
+ quirks->use_lid_flip_devid = false;
+ break;
+ case 1:
+ quirks->use_kbd_dock_devid = true;
+ quirks->use_lid_flip_devid = false;
+ break;
+ case 2:
+ quirks->use_kbd_dock_devid = false;
+ quirks->use_lid_flip_devid = true;
+ break;
+ }
+
if (quirks->i8042_filter) {
ret = i8042_install_filter(quirks->i8042_filter);
if (ret) {
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index fbb224a82e34..7f3a03f937f6 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
}}
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
@@ -147,6 +148,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
{ }
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 8c20e524e9ad..e085c255da0c 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -90,7 +90,8 @@ config PTP_1588_CLOCK_INES
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86_32 || COMPILE_TEST
- depends on HAS_IOMEM && NET
+ depends on HAS_IOMEM && PCI
+ depends on NET
imply PTP_1588_CLOCK
help
This driver adds support for using the PCH EG20T as a PTP
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index f2b5d347d227..e5ae26227bdb 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -66,7 +66,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
int ret = 0;
spin_lock_irqsave(&ctrl->txn_lock, flags);
- ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0,
+ ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1,
SLIM_MAX_TIDS, GFP_ATOMIC);
if (ret < 0) {
spin_unlock_irqrestore(&ctrl->txn_lock, flags);
@@ -131,7 +131,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
goto slim_xfer_err;
}
}
-
+ /* Initialize tid to invalid value */
+ txn->tid = 0;
need_tid = slim_tid_txn(txn->mt, txn->mc);
if (need_tid) {
@@ -163,7 +164,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
txn->mt, txn->mc, txn->la, ret);
slim_xfer_err:
- if (!clk_pause_msg && (!need_tid || ret == -ETIMEDOUT)) {
+ if (!clk_pause_msg && (txn->tid == 0 || ret == -ETIMEDOUT)) {
/*
* remove runtime-pm vote if this was TX only, or
* if there was error during this transaction
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index c054e83ab636..7040293c2ee8 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -618,7 +618,7 @@ static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf)
(mc == SLIM_USR_MC_GENERIC_ACK &&
mt == SLIM_MSG_MT_SRC_REFERRED_USER)) {
slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4);
- pm_runtime_mark_last_busy(ctrl->dev);
+ pm_runtime_mark_last_busy(ctrl->ctrl.dev);
}
}
@@ -1080,7 +1080,8 @@ static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
{
u32 cfg = readl_relaxed(ctrl->ngd->base);
- if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
+ if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN ||
+ ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP)
qcom_slim_ngd_init_dma(ctrl);
/* By default enable message queues */
@@ -1131,6 +1132,7 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
return 0;
}
+ qcom_slim_ngd_setup(ctrl);
return 0;
}
@@ -1257,13 +1259,14 @@ static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable)
}
/* controller state should be in sync with framework state */
complete(&ctrl->qmi.qmi_comp);
- if (!pm_runtime_enabled(ctrl->dev) ||
- !pm_runtime_suspended(ctrl->dev))
- qcom_slim_ngd_runtime_resume(ctrl->dev);
+ if (!pm_runtime_enabled(ctrl->ctrl.dev) ||
+ !pm_runtime_suspended(ctrl->ctrl.dev))
+ qcom_slim_ngd_runtime_resume(ctrl->ctrl.dev);
else
- pm_runtime_resume(ctrl->dev);
- pm_runtime_mark_last_busy(ctrl->dev);
- pm_runtime_put(ctrl->dev);
+ pm_runtime_resume(ctrl->ctrl.dev);
+
+ pm_runtime_mark_last_busy(ctrl->ctrl.dev);
+ pm_runtime_put(ctrl->ctrl.dev);
ret = slim_register_controller(&ctrl->ctrl);
if (ret) {
@@ -1389,7 +1392,7 @@ static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl,
/* Make sure the last dma xfer is finished */
mutex_lock(&ctrl->tx_lock);
if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) {
- pm_runtime_get_noresume(ctrl->dev);
+ pm_runtime_get_noresume(ctrl->ctrl.dev);
ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN;
qcom_slim_ngd_down(ctrl);
qcom_slim_ngd_exit_dma(ctrl);
@@ -1617,6 +1620,7 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
int ret = 0;
+ qcom_slim_ngd_exit_dma(ctrl);
if (!ctrl->qmi.handle)
return 0;
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
index 3f711c1a0996..bbae3d39c7be 100644
--- a/drivers/soc/fsl/qe/qe_ic.c
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -23,6 +23,7 @@
#include <linux/signal.h>
#include <linux/device.h>
#include <linux/spinlock.h>
+#include <linux/platform_device.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <soc/fsl/qe/qe.h>
@@ -53,8 +54,8 @@ struct qe_ic {
struct irq_chip hc_irq;
/* VIRQ numbers of QE high/low irqs */
- unsigned int virq_high;
- unsigned int virq_low;
+ int virq_high;
+ int virq_low;
};
/*
@@ -404,42 +405,40 @@ static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
-static void __init qe_ic_init(struct device_node *node)
+static int qe_ic_init(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
void (*low_handler)(struct irq_desc *desc);
void (*high_handler)(struct irq_desc *desc);
struct qe_ic *qe_ic;
- struct resource res;
- u32 ret;
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
- ret = of_address_to_resource(node, 0, &res);
- if (ret)
- return;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
- qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
+ qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL);
if (qe_ic == NULL)
- return;
+ return -ENOMEM;
- qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
- &qe_ic_host_ops, qe_ic);
- if (qe_ic->irqhost == NULL) {
- kfree(qe_ic);
- return;
+ qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (qe_ic->regs == NULL) {
+ dev_err(dev, "failed to ioremap() registers\n");
+ return -ENODEV;
}
- qe_ic->regs = ioremap(res.start, resource_size(&res));
-
qe_ic->hc_irq = qe_ic_irq_chip;
- qe_ic->virq_high = irq_of_parse_and_map(node, 0);
- qe_ic->virq_low = irq_of_parse_and_map(node, 1);
+ qe_ic->virq_high = platform_get_irq(pdev, 0);
+ qe_ic->virq_low = platform_get_irq(pdev, 1);
- if (!qe_ic->virq_low) {
- printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
- kfree(qe_ic);
- return;
- }
- if (qe_ic->virq_high != qe_ic->virq_low) {
+ if (qe_ic->virq_low <= 0)
+ return -ENODEV;
+
+ if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) {
low_handler = qe_ic_cascade_low;
high_handler = qe_ic_cascade_high;
} else {
@@ -447,29 +446,42 @@ static void __init qe_ic_init(struct device_node *node)
high_handler = NULL;
}
+ qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+ &qe_ic_host_ops, qe_ic);
+ if (qe_ic->irqhost == NULL) {
+ dev_err(dev, "failed to add irq domain\n");
+ return -ENODEV;
+ }
+
qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
irq_set_handler_data(qe_ic->virq_low, qe_ic);
irq_set_chained_handler(qe_ic->virq_low, low_handler);
- if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) {
+ if (high_handler) {
irq_set_handler_data(qe_ic->virq_high, qe_ic);
irq_set_chained_handler(qe_ic->virq_high, high_handler);
}
+ return 0;
}
+static const struct of_device_id qe_ic_ids[] = {
+ { .compatible = "fsl,qe-ic"},
+ { .type = "qeic"},
+ {},
+};
-static int __init qe_ic_of_init(void)
+static struct platform_driver qe_ic_driver =
{
- struct device_node *np;
+ .driver = {
+ .name = "qe-ic",
+ .of_match_table = qe_ic_ids,
+ },
+ .probe = qe_ic_init,
+};
- np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
- if (!np) {
- np = of_find_node_by_type(NULL, "qeic");
- if (!np)
- return -ENODEV;
- }
- qe_ic_init(np);
- of_node_put(np);
+static int __init qe_ic_of_init(void)
+{
+ platform_driver_register(&qe_ic_driver);
return 0;
}
subsys_initcall(qe_ic_of_init);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index b9bb63d749ec..f4079b5cb26d 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1737,6 +1737,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
return rlen;
}
+static void tcpm_pd_handle_msg(struct tcpm_port *port,
+ enum pd_msg_request message,
+ enum tcpm_ams ams);
+
static void tcpm_handle_vdm_request(struct tcpm_port *port,
const __le32 *payload, int cnt)
{
@@ -1764,11 +1768,11 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
port->vdm_state = VDM_STATE_DONE;
}
- if (PD_VDO_SVDM(p[0])) {
+ if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
} else {
if (port->negotiated_rev >= PD_REV30)
- tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+ tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
}
/*
@@ -2471,10 +2475,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
NONE_AMS);
break;
case PD_DATA_VENDOR_DEF:
- if (tcpm_vdm_ams(port) || port->nr_snk_vdo)
- tcpm_handle_vdm_request(port, msg->payload, cnt);
- else if (port->negotiated_rev > PD_REV20)
- tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
+ tcpm_handle_vdm_request(port, msg->payload, cnt);
break;
case PD_DATA_BIST:
port->bist_request = le32_to_cpu(msg->payload[0]);
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 21b78f1cd521..351c6cfb24c3 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -493,9 +493,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
dev, &ifc_vdpa_ops, NULL);
- if (adapter == NULL) {
+ if (IS_ERR(adapter)) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
- return -ENOMEM;
+ return PTR_ERR(adapter);
}
pci_set_master(pdev);
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index dcee6039e966..e59135fa867e 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -512,11 +512,6 @@ out:
mutex_unlock(&mr->mkey_mtx);
}
-static bool map_empty(struct vhost_iotlb *iotlb)
-{
- return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
-}
-
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map)
{
@@ -524,10 +519,6 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
int err = 0;
*change_map = false;
- if (map_empty(iotlb)) {
- mlx5_vdpa_destroy_mr(mvdev);
- return 0;
- }
mutex_lock(&mr->mkey_mtx);
if (mr->initialized) {
mlx5_vdpa_info(mvdev, "memory map update\n");
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 379a19144a25..3cc12fcab08d 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -752,12 +752,12 @@ static int get_queue_type(struct mlx5_vdpa_net *ndev)
type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
/* prefer split queue */
- if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)
- return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
+ if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)
+ return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
- WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT));
+ WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED));
- return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
+ return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
}
static bool vq_is_tx(u16 idx)
@@ -2029,6 +2029,12 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
return -ENOSPC;
mdev = mgtdev->madev->mdev;
+ if (!(MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_queue_type) &
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)) {
+ dev_warn(mdev->device, "missing support for split virtqueues\n");
+ return -EOPNOTSUPP;
+ }
+
/* we save one virtqueue for control virtqueue should we require it */
max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 14e024de5cbf..c621cf7feec0 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -251,8 +251,10 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
dev_attr->name);
- if (!vdpasim)
+ if (IS_ERR(vdpasim)) {
+ ret = PTR_ERR(vdpasim);
goto err_alloc;
+ }
vdpasim->dev_attr = *dev_attr;
INIT_WORK(&vdpasim->work, dev_attr->work_fn);
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index 7b4a6396c553..fe0527329857 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -436,9 +436,9 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
dev, &vp_vdpa_ops, NULL);
- if (vp_vdpa == NULL) {
+ if (IS_ERR(vp_vdpa)) {
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
- return -ENOMEM;
+ return PTR_ERR(vp_vdpa);
}
mdev = &vp_vdpa->mdev;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 210ab35a7ebf..9479f7f79217 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -614,7 +614,8 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
long pinned;
int ret = 0;
- if (msg->iova < v->range.first ||
+ if (msg->iova < v->range.first || !msg->size ||
+ msg->iova > U64_MAX - msg->size + 1 ||
msg->iova + msg->size - 1 > v->range.last)
return -EINVAL;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index b9e853e6094d..59edb5a1ffe2 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -735,10 +735,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
}
+/* Make sure 64 bit math will not overflow. */
static bool vhost_overflow(u64 uaddr, u64 size)
{
- /* Make sure 64 bit math will not overflow. */
- return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
+ if (uaddr > ULONG_MAX || size > ULONG_MAX)
+ return true;
+
+ if (!size)
+ return false;
+
+ return uaddr > ULONG_MAX - size + 1;
}
/* Caller should have vq mutex and device mutex. */
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 4af8fa259d65..14e2043d7685 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -359,7 +359,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
iov = wiov;
else {
iov = riov;
- if (unlikely(wiov && wiov->i)) {
+ if (unlikely(wiov && wiov->used)) {
vringh_bad("Readable desc %p after writable",
&descs[i]);
err = -EINVAL;
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 4b15c00c0a0a..49984d2cba24 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -355,6 +355,7 @@ int register_virtio_device(struct virtio_device *dev)
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
INIT_LIST_HEAD(&dev->vqs);
+ spin_lock_init(&dev->vqs_list_lock);
/*
* device_add() causes the bus infrastructure to look for a matching
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 222d630c41fc..b35bb2d57f62 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -576,6 +576,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
struct device *dev = get_device(&vp_dev->vdev.dev);
+ /*
+ * Device is marked broken on surprise removal so that virtio upper
+ * layers can abort any ongoing operation.
+ */
+ if (!pci_device_is_present(pci_dev))
+ virtio_break_device(&vp_dev->vdev);
+
pci_disable_sriov(pci_dev);
unregister_virtio_device(&vp_dev->vdev);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 89bfe46a8a7f..dd95dfd85e98 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
#include <xen/xen.h>
#ifdef DEBUG
@@ -1755,7 +1756,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
cpu_to_le16(vq->packed.event_flags_shadow);
}
+ spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
+ spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
err_desc_extra:
@@ -2229,7 +2232,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
memset(vq->split.desc_state, 0, vring.num *
sizeof(struct vring_desc_state_split));
+ spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
+ spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
err_extra:
@@ -2291,6 +2296,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ spin_lock(&vq->vq.vdev->vqs_list_lock);
+ list_del(&_vq->list);
+ spin_unlock(&vq->vq.vdev->vqs_list_lock);
+
if (vq->we_own_ring) {
if (vq->packed_ring) {
vring_free_queue(vq->vq.vdev,
@@ -2321,7 +2330,6 @@ void vring_del_virtqueue(struct virtqueue *_vq)
kfree(vq->split.desc_state);
kfree(vq->split.desc_extra);
}
- list_del(&_vq->list);
kfree(vq);
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -2373,7 +2381,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->broken;
+ return READ_ONCE(vq->broken);
}
EXPORT_SYMBOL_GPL(virtqueue_is_broken);
@@ -2385,10 +2393,14 @@ void virtio_break_device(struct virtio_device *dev)
{
struct virtqueue *_vq;
+ spin_lock(&dev->vqs_list_lock);
list_for_each_entry(_vq, &dev->vqs, list) {
struct vring_virtqueue *vq = to_vvq(_vq);
- vq->broken = true;
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, true);
}
+ spin_unlock(&dev->vqs_list_lock);
}
EXPORT_SYMBOL_GPL(virtio_break_device);
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index e1a141135992..72eaef2caeb1 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -151,6 +151,9 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
if (!name)
return NULL;
+ if (index >= vdpa->nvqs)
+ return ERR_PTR(-ENOENT);
+
/* Queue shouldn't already be set up. */
if (ops->get_vq_ready(vdpa, index))
return ERR_PTR(-ENOENT);