summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-08-18 12:44:22 -0700
committerJakub Kicinski <kuba@kernel.org>2023-08-18 12:44:56 -0700
commit7ff57803d24e0cb326251489d70f60462e3e6c04 (patch)
tree4ff813109291e5d2c7745619d490af3cce481674 /drivers
parentc2e5f4fd1148727801a63d938cec210f16b48864 (diff)
parent0e8860d2125f51ba9bca67a520d826cb8f66cf42 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. Conflicts: drivers/net/ethernet/sfc/tc.c fa165e194997 ("sfc: don't unregister flow_indr if it was never registered") 3bf969e88ada ("sfc: add MAE table machinery for conntrack table") https://lore.kernel.org/all/20230818112159.7430e9b4@canb.auug.org.au/ No adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c8
-rw-r--r--drivers/accel/qaic/qaic_control.c26
-rw-r--r--drivers/accel/qaic/qaic_data.c1
-rw-r--r--drivers/acpi/resource.c64
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/android/binder.c1
-rw-r--r--drivers/android/binder_alloc.c6
-rw-r--r--drivers/android/binder_alloc.h1
-rw-r--r--drivers/base/cpu.c4
-rw-r--r--drivers/block/zram/zram_drv.c32
-rw-r--r--drivers/char/tpm/tpm_tis.c4
-rw-r--r--drivers/counter/Kconfig14
-rw-r--r--drivers/cpufreq/amd-pstate.c10
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c39
-rw-r--r--drivers/cpuidle/dt_idle_genpd.c24
-rw-r--r--drivers/cpuidle/dt_idle_genpd.h7
-rw-r--r--drivers/gpio/gpio-sim.c1
-rw-r--r--drivers/gpio/gpio-ws16c48.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c12
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c29
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c13
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c36
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c24
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c25
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c17
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c37
-rw-r--r--drivers/hwmon/pmbus/bel-pfe.c16
-rw-r--r--drivers/iio/adc/ad7192.c16
-rw-r--r--drivers/iio/adc/ina2xx-adc.c9
-rw-r--r--drivers/iio/adc/meson_saradc.c23
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c2
-rw-r--r--drivers/iio/frequency/admv1013.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c2
-rw-r--r--drivers/iio/industrialio-core.c5
-rw-r--r--drivers/iio/light/rohm-bu27008.c22
-rw-r--r--drivers/iio/light/rohm-bu27034.c22
-rw-r--r--drivers/infiniband/core/umem.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c1
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c1
-rw-r--r--drivers/interconnect/qcom/bcm-voter.c5
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h2
-rw-r--r--drivers/interconnect/qcom/sa8775p.c1
-rw-r--r--drivers/interconnect/qcom/sm8450.c9
-rw-r--r--drivers/interconnect/qcom/sm8550.c17
-rw-r--r--drivers/misc/cardreader/rts5227.c2
-rw-r--r--drivers/misc/cardreader/rts5228.c18
-rw-r--r--drivers/misc/cardreader/rts5249.c3
-rw-r--r--drivers/misc/cardreader/rts5260.c18
-rw-r--r--drivers/misc/cardreader/rts5261.c18
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c5
-rw-r--r--drivers/misc/tps6594-esm.c19
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c8
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c16
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c77
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c5
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c10
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c2
-rw-r--r--drivers/net/ethernet/sfc/tc.c2
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c10
-rw-r--r--drivers/net/phy/broadcom.c13
-rw-r--r--drivers/net/phy/phy_device.c13
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c6
-rw-r--r--drivers/nvme/host/core.c10
-rw-r--r--drivers/nvme/host/ioctl.c2
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/rdma.c3
-rw-r--r--drivers/nvme/host/tcp.c3
-rw-r--r--drivers/parisc/sba_iommu.c6
-rw-r--r--drivers/pci/controller/Kconfig1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c13
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c20
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c8
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c3
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c4
-rw-r--r--drivers/platform/x86/lenovo-ymc.c25
-rw-r--r--drivers/platform/x86/mlx-platform.c23
-rw-r--r--drivers/platform/x86/msi-ec.c18
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c14
-rw-r--r--drivers/regulator/da9063-regulator.c12
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c2
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c20
-rw-r--r--drivers/scsi/qedf/qedf_main.c18
-rw-r--r--drivers/scsi/qedi/qedi_main.c23
-rw-r--r--drivers/scsi/raid_class.c1
-rw-r--r--drivers/scsi/scsi_proc.c30
-rw-r--r--drivers/scsi/snic/snic_disc.c1
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/thunderbolt/tb.c2
-rw-r--r--drivers/thunderbolt/tmu.c4
-rw-r--r--drivers/ufs/host/ufs-renesas.c2
-rw-r--r--drivers/usb/common/usb-conn-gpio.c6
-rw-r--r--drivers/usb/dwc3/gadget.c9
-rw-r--r--drivers/usb/gadget/udc/core.c9
-rw-r--r--drivers/usb/storage/alauda.c12
-rw-r--r--drivers/usb/typec/altmodes/displayport.c18
-rw-r--r--drivers/usb/typec/mux/Kconfig1
-rw-r--r--drivers/usb/typec/mux/nb7vpq904m.c25
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c7
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h2
-rw-r--r--drivers/vdpa/mlx5/core/mr.c97
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c26
-rw-r--r--drivers/vdpa/pds/Makefile3
-rw-r--r--drivers/vdpa/pds/debugfs.c15
-rw-r--r--drivers/vdpa/pds/vdpa_dev.c176
-rw-r--r--drivers/vdpa/pds/vdpa_dev.h5
-rw-r--r--drivers/vdpa/vdpa.c9
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c8
-rw-r--r--drivers/vhost/scsi.c187
-rw-r--r--drivers/virtio/virtio_mem.c168
-rw-r--r--drivers/virtio/virtio_mmio.c5
-rw-r--r--drivers/virtio/virtio_pci_common.c2
-rw-r--r--drivers/virtio/virtio_pci_legacy.c1
-rw-r--r--drivers/virtio/virtio_vdpa.c2
169 files changed, 1667 insertions, 733 deletions
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 52b339aefadc..9967fcfa27ec 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -173,6 +173,9 @@ static void internal_free_pages_locked(struct ivpu_bo *bo)
{
unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
+ if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
+ set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
+
for (i = 0; i < npages; i++)
put_page(bo->pages[i]);
@@ -587,6 +590,11 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT);
+ if (bo->flags & DRM_IVPU_BO_WC)
+ set_pages_array_wc(bo->pages, bo->base.size >> PAGE_SHIFT);
+ else if (bo->flags & DRM_IVPU_BO_UNCACHED)
+ set_pages_array_uc(bo->pages, bo->base.size >> PAGE_SHIFT);
+
prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot);
if (!bo->kvaddr) {
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index cfbc92da426f..388abd40024b 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -392,18 +392,31 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
struct qaic_manage_trans_dma_xfer *in_trans,
struct ioctl_resources *resources, struct dma_xfer *xfer)
{
+ u64 xfer_start_addr, remaining, end, total;
unsigned long need_pages;
struct page **page_list;
unsigned long nr_pages;
struct sg_table *sgt;
- u64 xfer_start_addr;
int ret;
int i;
- xfer_start_addr = in_trans->addr + resources->xferred_dma_size;
+ if (check_add_overflow(in_trans->addr, resources->xferred_dma_size, &xfer_start_addr))
+ return -EINVAL;
- need_pages = DIV_ROUND_UP(in_trans->size + offset_in_page(xfer_start_addr) -
- resources->xferred_dma_size, PAGE_SIZE);
+ if (in_trans->size < resources->xferred_dma_size)
+ return -EINVAL;
+ remaining = in_trans->size - resources->xferred_dma_size;
+ if (remaining == 0)
+ return 0;
+
+ if (check_add_overflow(xfer_start_addr, remaining, &end))
+ return -EINVAL;
+
+ total = remaining + offset_in_page(xfer_start_addr);
+ if (total >= SIZE_MAX)
+ return -EINVAL;
+
+ need_pages = DIV_ROUND_UP(total, PAGE_SIZE);
nr_pages = need_pages;
@@ -435,7 +448,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages,
offset_in_page(xfer_start_addr),
- in_trans->size - resources->xferred_dma_size, GFP_KERNEL);
+ remaining, GFP_KERNEL);
if (ret) {
ret = -ENOMEM;
goto free_sgt;
@@ -566,9 +579,6 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list
QAIC_MANAGE_EXT_MSG_LENGTH)
return -ENOMEM;
- if (in_trans->addr + in_trans->size < in_trans->addr || !in_trans->size)
- return -EINVAL;
-
xfer = kmalloc(sizeof(*xfer), GFP_KERNEL);
if (!xfer)
return -ENOMEM;
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index e9a1cb779b30..6b6d981a71be 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -1021,6 +1021,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
bo->dbc = dbc;
srcu_read_unlock(&dbc->ch_lock, rcu_id);
drm_gem_object_put(obj);
+ kfree(slice_ent);
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 1dd8d5aebf67..a4d9f149b48d 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -470,6 +470,45 @@ static const struct dmi_system_id asus_laptop[] = {
{ }
};
+static const struct dmi_system_id tongfang_gm_rg[] = {
+ {
+ .ident = "TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
+ },
+ },
+ { }
+};
+
+static const struct dmi_system_id maingear_laptop[] = {
+ {
+ .ident = "MAINGEAR Vector Pro 2 15",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
+ }
+ },
+ {
+ .ident = "MAINGEAR Vector Pro 2 17",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-17A3070T"),
+ },
+ },
+ { }
+};
+
+static const struct dmi_system_id pcspecialist_laptop[] = {
+ {
+ .ident = "PCSpecialist Elimina Pro 16 M",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Elimina Pro 16 M"),
+ },
+ },
+ { }
+};
+
static const struct dmi_system_id lg_laptop[] = {
{
.ident = "LG Electronics 17U70P",
@@ -493,6 +532,9 @@ struct irq_override_cmp {
static const struct irq_override_cmp override_table[] = {
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
+ { tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
+ { maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
+ { pcspecialist_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
{ lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
};
@@ -512,6 +554,28 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
return entry->override;
}
+#ifdef CONFIG_X86
+ /*
+ * Always use the MADT override info, except for the i8042 PS/2 ctrl
+ * IRQs (1 and 12). For these the DSDT IRQ settings should sometimes
+ * be used otherwise PS/2 keyboards / mice will not work.
+ */
+ if (gsi != 1 && gsi != 12)
+ return true;
+
+ /* If the override comes from an INT_SRC_OVR MADT entry, honor it. */
+ if (acpi_int_src_ovr[gsi])
+ return true;
+
+ /*
+ * IRQ override isn't needed on modern AMD Zen systems and
+ * this override breaks active low IRQs on AMD Ryzen 6000 and
+ * newer systems. Skip it.
+ */
+ if (boot_cpu_has(X86_FEATURE_ZEN))
+ return false;
+#endif
+
return true;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5b145f1aaa1b..87e385542576 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1714,6 +1714,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"BSG1160", },
{"BSG2150", },
{"CSC3551", },
+ {"CSC3556", },
{"INT33FE", },
{"INT3515", },
/* Non-conforming _HID for Cirrus Logic already released */
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 486c8271cab7..d720f93d8b19 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -6617,6 +6617,7 @@ err_init_binder_device_failed:
err_alloc_device_names_failed:
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
+ binder_alloc_shrinker_exit();
return ret;
}
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 662a2a2e2e84..e3db8297095a 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1087,6 +1087,12 @@ int binder_alloc_shrinker_init(void)
return ret;
}
+void binder_alloc_shrinker_exit(void)
+{
+ unregister_shrinker(&binder_shrinker);
+ list_lru_destroy(&binder_alloc_lru);
+}
+
/**
* check_buffer() - verify that buffer/offset is safe to access
* @alloc: binder_alloc for this proc
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 138d1d5af9ce..dc1e2b01dd64 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -129,6 +129,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
int pid);
extern void binder_alloc_init(struct binder_alloc *alloc);
extern int binder_alloc_shrinker_init(void);
+extern void binder_alloc_shrinker_exit(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index d7300d885822..fe6690ecf563 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -532,7 +532,7 @@ CPU_SHOW_VULN_FALLBACK(srbds);
CPU_SHOW_VULN_FALLBACK(mmio_stale_data);
CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
-CPU_SHOW_VULN_FALLBACK(gather_data_sampling);
+CPU_SHOW_VULN_FALLBACK(gds);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -546,7 +546,7 @@ static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
-static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gather_data_sampling, NULL);
+static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 5676e6dd5b16..06673c6ca255 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1870,15 +1870,16 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio)
static void zram_bio_read(struct zram *zram, struct bio *bio)
{
- struct bvec_iter iter;
- struct bio_vec bv;
- unsigned long start_time;
+ unsigned long start_time = bio_start_io_acct(bio);
+ struct bvec_iter iter = bio->bi_iter;
- start_time = bio_start_io_acct(bio);
- bio_for_each_segment(bv, bio, iter) {
+ do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
+ struct bio_vec bv = bio_iter_iovec(bio, iter);
+
+ bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_reads);
@@ -1890,22 +1891,26 @@ static void zram_bio_read(struct zram *zram, struct bio *bio)
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
- }
+
+ bio_advance_iter_single(bio, &iter, bv.bv_len);
+ } while (iter.bi_size);
+
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}
static void zram_bio_write(struct zram *zram, struct bio *bio)
{
- struct bvec_iter iter;
- struct bio_vec bv;
- unsigned long start_time;
+ unsigned long start_time = bio_start_io_acct(bio);
+ struct bvec_iter iter = bio->bi_iter;
- start_time = bio_start_io_acct(bio);
- bio_for_each_segment(bv, bio, iter) {
+ do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
+ struct bio_vec bv = bio_iter_iovec(bio, iter);
+
+ bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_writes);
@@ -1916,7 +1921,10 @@ static void zram_bio_write(struct zram *zram, struct bio *bio)
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
- }
+
+ bio_advance_iter_single(bio, &iter, bv.bv_len);
+ } while (iter.bi_size);
+
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index ac4daaf294a3..7fa3d91042b2 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -89,7 +89,7 @@ static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr)
tpm_tis_flush(iobase);
}
-static int interrupts = -1;
+static int interrupts;
module_param(interrupts, int, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
@@ -183,7 +183,7 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = {
.ident = "UPX-TGL",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "UPX-TGL"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UPX-TGL01"),
},
},
{}
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index bca21df51168..62962ae84b77 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -3,13 +3,6 @@
# Counter devices
#
-menuconfig COUNTER
- tristate "Counter support"
- help
- This enables counter device support through the Generic Counter
- interface. You only need to enable this, if you also want to enable
- one or more of the counter device drivers below.
-
config I8254
tristate
select COUNTER
@@ -25,6 +18,13 @@ config I8254
If built as a module its name will be i8254.
+menuconfig COUNTER
+ tristate "Counter support"
+ help
+ This enables counter device support through the Generic Counter
+ interface. You only need to enable this, if you also want to enable
+ one or more of the counter device drivers below.
+
if COUNTER
config 104_QUAD_8
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 81fba0dcbee9..9a1e194d5cf8 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1012,8 +1012,8 @@ static int amd_pstate_update_status(const char *buf, size_t size)
return 0;
}
-static ssize_t show_status(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
ssize_t ret;
@@ -1024,7 +1024,7 @@ static ssize_t show_status(struct kobject *kobj,
return ret;
}
-static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
+static ssize_t status_store(struct device *a, struct device_attribute *b,
const char *buf, size_t count)
{
char *p = memchr(buf, '\n', count);
@@ -1043,7 +1043,7 @@ cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
cpufreq_freq_attr_ro(amd_pstate_highest_perf);
cpufreq_freq_attr_rw(energy_performance_preference);
cpufreq_freq_attr_ro(energy_performance_available_preferences);
-define_one_global_rw(status);
+static DEVICE_ATTR_RW(status);
static struct freq_attr *amd_pstate_attr[] = {
&amd_pstate_max_freq,
@@ -1062,7 +1062,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
};
static struct attribute *pstate_global_attributes[] = {
- &status.attr,
+ &dev_attr_status.attr,
NULL
};
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index c2d6d9c3c930..b88af1262f1a 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -120,20 +120,6 @@ static void psci_pd_remove(void)
}
}
-static bool psci_pd_try_set_osi_mode(void)
-{
- int ret;
-
- if (!psci_has_osi_support())
- return false;
-
- ret = psci_set_osi_mode(true);
- if (ret)
- return false;
-
- return true;
-}
-
static void psci_cpuidle_domain_sync_state(struct device *dev)
{
/*
@@ -152,15 +138,12 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *node;
- bool use_osi;
+ bool use_osi = psci_has_osi_support();
int ret = 0, pd_count = 0;
if (!np)
return -ENODEV;
- /* If OSI mode is supported, let's try to enable it. */
- use_osi = psci_pd_try_set_osi_mode();
-
/*
* Parse child nodes for the "#power-domain-cells" property and
* initialize a genpd/genpd-of-provider pair when it's found.
@@ -170,33 +153,37 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
continue;
ret = psci_pd_init(node, use_osi);
- if (ret)
- goto put_node;
+ if (ret) {
+ of_node_put(node);
+ goto exit;
+ }
pd_count++;
}
/* Bail out if not using the hierarchical CPU topology. */
if (!pd_count)
- goto no_pd;
+ return 0;
/* Link genpd masters/subdomains to model the CPU topology. */
ret = dt_idle_pd_init_topology(np);
if (ret)
goto remove_pd;
+ /* let's try to enable OSI. */
+ ret = psci_set_osi_mode(use_osi);
+ if (ret)
+ goto remove_pd;
+
pr_info("Initialized CPU PM domain topology using %s mode\n",
use_osi ? "OSI" : "PC");
return 0;
-put_node:
- of_node_put(node);
remove_pd:
+ dt_idle_pd_remove_topology(np);
psci_pd_remove();
+exit:
pr_err("failed to create CPU PM domains ret=%d\n", ret);
-no_pd:
- if (use_osi)
- psci_set_osi_mode(false);
return ret;
}
diff --git a/drivers/cpuidle/dt_idle_genpd.c b/drivers/cpuidle/dt_idle_genpd.c
index b37165514d4e..1af63c189039 100644
--- a/drivers/cpuidle/dt_idle_genpd.c
+++ b/drivers/cpuidle/dt_idle_genpd.c
@@ -152,6 +152,30 @@ int dt_idle_pd_init_topology(struct device_node *np)
return 0;
}
+int dt_idle_pd_remove_topology(struct device_node *np)
+{
+ struct device_node *node;
+ struct of_phandle_args child, parent;
+ int ret;
+
+ for_each_child_of_node(np, node) {
+ if (of_parse_phandle_with_args(node, "power-domains",
+ "#power-domain-cells", 0, &parent))
+ continue;
+
+ child.np = node;
+ child.args_count = 0;
+ ret = of_genpd_remove_subdomain(&parent, &child);
+ of_node_put(parent.np);
+ if (ret) {
+ of_node_put(node);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
struct device *dt_idle_attach_cpu(int cpu, const char *name)
{
struct device *dev;
diff --git a/drivers/cpuidle/dt_idle_genpd.h b/drivers/cpuidle/dt_idle_genpd.h
index a95483d08a02..3be1f70f55b5 100644
--- a/drivers/cpuidle/dt_idle_genpd.h
+++ b/drivers/cpuidle/dt_idle_genpd.h
@@ -14,6 +14,8 @@ struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
int dt_idle_pd_init_topology(struct device_node *np);
+int dt_idle_pd_remove_topology(struct device_node *np);
+
struct device *dt_idle_attach_cpu(int cpu, const char *name);
void dt_idle_detach_cpu(struct device *dev);
@@ -36,6 +38,11 @@ static inline int dt_idle_pd_init_topology(struct device_node *np)
return 0;
}
+static inline int dt_idle_pd_remove_topology(struct device_node *np)
+{
+ return 0;
+}
+
static inline struct device *dt_idle_attach_cpu(int cpu, const char *name)
{
return NULL;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 8b49b0abacd5..f1f6f1c32987 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -429,6 +429,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
gc->set_config = gpio_sim_set_config;
gc->to_irq = gpio_sim_to_irq;
gc->free = gpio_sim_free;
+ gc->can_sleep = true;
ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret)
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index e73885a4dc32..afb42a8e916f 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -18,7 +18,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
-#define WS16C48_EXTENT 10
+#define WS16C48_EXTENT 11
#define MAX_NUM_WS16C48 max_num_isa_dev(WS16C48_EXTENT)
static unsigned int base[MAX_NUM_WS16C48];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a3b86b86dc47..6dc950c1b689 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1296,6 +1296,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
+bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
bool amdgpu_device_pcie_dynamic_switching_supported(void);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
bool amdgpu_device_aspm_support_quirk(void);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 040f4cb6ab2d..fb78a8f47587 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -295,7 +295,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
if (!p->gang_size) {
ret = -EINVAL;
- goto free_partial_kdata;
+ goto free_all_kdata;
}
for (i = 0; i < p->gang_size; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a2cdde0ca0a7..6238701cde23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1459,6 +1459,32 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
}
/*
+ * On APUs with >= 64GB white flickering has been observed w/ SG enabled.
+ * Disable S/G on such systems until we have a proper fix.
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2354
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2735
+ */
+bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
+{
+ switch (amdgpu_sg_display) {
+ case -1:
+ break;
+ case 0:
+ return false;
+ case 1:
+ return true;
+ default:
+ return false;
+ }
+ if ((totalram_pages() << (PAGE_SHIFT - 10)) +
+ (adev->gmc.real_vram_size / 1024) >= 64000000) {
+ DRM_WARN("Disabling S/G due to >=64GB RAM\n");
+ return false;
+ }
+ return true;
+}
+
+/*
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
* speed switching. Until we have confirmation from Intel that a specific host
* supports it, it's safer that we keep it disabled for all.
@@ -3696,10 +3722,11 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
{
if (amdgpu_mcbp == 1)
adev->gfx.mcbp = true;
-
- if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) &&
- (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) &&
- adev->gfx.num_gfx_rings)
+ else if (amdgpu_mcbp == 0)
+ adev->gfx.mcbp = false;
+ else if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) &&
+ (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) &&
+ adev->gfx.num_gfx_rings)
adev->gfx.mcbp = true;
if (amdgpu_sriov_vf(adev))
@@ -4367,6 +4394,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
cancel_delayed_work_sync(&adev->delayed_init_work);
+ flush_delayed_work(&adev->gfx.gfx_off_delay_work);
amdgpu_ras_suspend(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index c694b41f6461..7537f5aa76f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -552,6 +552,41 @@ int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
}
/**
+ * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
+ * fence driver interrupts need to be restored.
+ *
+ * @ring: ring that to be checked
+ *
+ * Interrupts for rings that belong to GFX IP don't need to be restored
+ * when the target power state is s0ix.
+ *
+ * Return true if need to restore interrupts, false otherwise.
+ */
+static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool is_gfx_power_domain = false;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_SDMA:
+ /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
+ if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0))
+ is_gfx_power_domain = true;
+ break;
+ case AMDGPU_RING_TYPE_GFX:
+ case AMDGPU_RING_TYPE_COMPUTE:
+ case AMDGPU_RING_TYPE_KIQ:
+ case AMDGPU_RING_TYPE_MES:
+ is_gfx_power_domain = true;
+ break;
+ default:
+ break;
+ }
+
+ return !(adev->in_s0ix && is_gfx_power_domain);
+}
+
+/**
* amdgpu_fence_driver_hw_fini - tear down the fence driver
* for all possible rings.
*
@@ -579,7 +614,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_force_completion(ring);
if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
- ring->fence_drv.irq_src)
+ ring->fence_drv.irq_src &&
+ amdgpu_fence_need_ring_interrupt_restore(ring))
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
@@ -655,7 +691,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
continue;
/* enable the interrupt */
- if (ring->fence_drv.irq_src)
+ if (ring->fence_drv.irq_src &&
+ amdgpu_fence_need_ring_interrupt_restore(ring))
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index a33d4bc34cee..fd81b04559d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -692,15 +692,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) {
- /* If going to s2idle, no need to wait */
- if (adev->in_s0ix) {
- if (!amdgpu_dpm_set_powergating_by_smu(adev,
- AMD_IP_BLOCK_TYPE_GFX, true))
- adev->gfx.gfx_off_state = true;
- } else {
- schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
delay);
- }
}
} else {
if (adev->gfx.gfx_off_req_count == 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
index b779ee4bbaa7..e1ee1c7117fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -397,7 +397,7 @@ void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring)
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
WARN_ON(!ring->is_sw_ring);
- if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
+ if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
if (amdgpu_mcbp_scan(mux) > 0)
amdgpu_mcbp_trigger_preempt(mux);
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index 9c9cca129498..565a1fa436d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -239,8 +239,13 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
for (i = 1; i < MAX_XCP; i++) {
ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
- if (ret)
+ if (ret == -ENOSPC) {
+ dev_warn(adev->dev,
+ "Skip xcp node #%d when out of drm node resource.", i);
+ return 0;
+ } else if (ret) {
return ret;
+ }
/* Redirect all IOCTLs to the primary device */
adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
@@ -328,6 +333,9 @@ int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
return 0;
for (i = 1; i < MAX_XCP; i++) {
+ if (!adev->xcp_mgr->xcp[i].ddev)
+ break;
+
ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
if (ret)
return ret;
@@ -345,6 +353,9 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
return;
for (i = 1; i < MAX_XCP; i++) {
+ if (!adev->xcp_mgr->xcp[i].ddev)
+ break;
+
p_ddev = adev->xcp_mgr->xcp[i].ddev;
drm_dev_unplug(p_ddev);
p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 3a7af59e83ca..0451533ddde4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -471,8 +471,12 @@ static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 3):
if ((adev->gfx.me_fw_version >= 1505) &&
(adev->gfx.pfp_fw_version >= 1600) &&
- (adev->gfx.mec_fw_version >= 512))
- adev->gfx.cp_gfx_shadow = true;
+ (adev->gfx.mec_fw_version >= 512)) {
+ if (amdgpu_sriov_vf(adev))
+ adev->gfx.cp_gfx_shadow = true;
+ else
+ adev->gfx.cp_gfx_shadow = false;
+ }
break;
default:
adev->gfx.cp_gfx_shadow = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index e1a392bcea70..af5685f4cb34 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -137,14 +137,15 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
int ret;
int retry_loop;
+ /* Wait for bootloader to signify that it is ready having bit 31 of
+ * C2PMSG_35 set to 1. All other bits are expected to be cleared.
+ * If there is an error in processing command, bits[7:0] will be set.
+ * This is applicable for PSP v13.0.6 and newer.
+ */
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
- /* Wait for bootloader to signify that is
- ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0xffffffff, false);
if (ret == 0)
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 49f40d9f16e8..f5a6f562e2a8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1543,11 +1543,7 @@ static bool kfd_ignore_crat(void)
if (ignore_crat)
return true;
-#ifndef KFD_SUPPORT_IOMMU_V2
ret = true;
-#else
- ret = false;
-#endif
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 0b3dc754e06b..a53e0757fe64 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -194,11 +194,6 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
kfd_device_info_set_event_interrupt_class(kfd);
- /* Raven */
- if (gc_version == IP_VERSION(9, 1, 0) ||
- gc_version == IP_VERSION(9, 2, 2))
- kfd->device_info.needs_iommu_device = true;
-
if (gc_version < IP_VERSION(11, 0, 0)) {
/* Navi2x+, Navi1x+ */
if (gc_version == IP_VERSION(10, 3, 6))
@@ -233,10 +228,6 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
asic_type != CHIP_TONGA)
kfd->device_info.supports_cwsr = true;
- if (asic_type == CHIP_KAVERI ||
- asic_type == CHIP_CARRIZO)
- kfd->device_info.needs_iommu_device = true;
-
if (asic_type != CHIP_HAWAII && !vf)
kfd->device_info.needs_pci_atomics = true;
}
@@ -249,7 +240,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
uint32_t gfx_target_version = 0;
switch (adev->asic_type) {
-#ifdef KFD_SUPPORT_IOMMU_V2
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
gfx_target_version = 70000;
@@ -262,7 +252,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
if (!vf)
f2g = &gfx_v8_kfd2kgd;
break;
-#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_HAWAII:
gfx_target_version = 70001;
@@ -298,7 +287,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 90000;
f2g = &gfx_v9_kfd2kgd;
break;
-#ifdef KFD_SUPPORT_IOMMU_V2
/* Raven */
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
@@ -306,7 +294,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
if (!vf)
f2g = &gfx_v9_kfd2kgd;
break;
-#endif
/* Vega12 */
case IP_VERSION(9, 2, 1):
gfx_target_version = 90004;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 2df153828ff4..01192f5abe46 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -2538,18 +2538,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
}
switch (dev->adev->asic_type) {
- case CHIP_CARRIZO:
- device_queue_manager_init_vi(&dqm->asic_ops);
- break;
-
case CHIP_KAVERI:
- device_queue_manager_init_cik(&dqm->asic_ops);
- break;
-
case CHIP_HAWAII:
device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
break;
+ case CHIP_CARRIZO:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_POLARIS10:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 61fc62f3e003..4a17bb7c7b27 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1965,7 +1965,14 @@ int kfd_topology_add_device(struct kfd_node *gpu)
const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type];
gpu_id = kfd_generate_gpu_id(gpu);
- pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
+ if (gpu->xcp && !gpu->xcp->ddev) {
+ dev_warn(gpu->adev->dev,
+ "Won't add GPU (ID: 0x%x) to topology since it has no drm node assigned.",
+ gpu_id);
+ return 0;
+ } else {
+ pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
+ }
/* Check to see if this gpu device exists in the topology_device_list.
* If so, assign the gpu to that device,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0fa739fd6a9c..e5554a36e8c8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1638,9 +1638,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
break;
}
- if (init_data.flags.gpu_vm_support &&
- (amdgpu_sg_display == 0))
- init_data.flags.gpu_vm_support = false;
+ if (init_data.flags.gpu_vm_support)
+ init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);
if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 9bc86deac9e8..b885c39bd16b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1320,7 +1320,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (computed_streams[i])
continue;
- if (!res_pool->funcs->remove_stream_from_ctx ||
+ if (res_pool->funcs->remove_stream_from_ctx &&
res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 20d4d08a6a2f..6966420dfbac 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -777,7 +777,8 @@ void dce110_edp_wait_for_hpd_ready(
dal_gpio_destroy_irq(&hpd);
/* ensure that the panel is detected */
- ASSERT(edp_hpd_high);
+ if (!edp_hpd_high)
+ DC_LOG_DC("%s: wait timed out!\n", __func__);
}
void dce110_edp_power_control(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 4cc8de2627ce..9f2e24398cd7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -712,7 +712,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index e5b7ef7422b8..50dc83404644 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -357,8 +357,11 @@ void dpp3_set_cursor_attributes(
int cur_rom_en = 0;
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
- color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA)
- cur_rom_en = 1;
+ color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
+ if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
+ cur_rom_en = 1;
+ }
+ }
REG_UPDATE_3(CURSOR0_CONTROL,
CUR0_MODE, color_format,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index ce41a8309582..222af2fae745 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1581,9 +1581,9 @@ static int smu_disable_dpms(struct smu_context *smu)
/*
* For SMU 13.0.4/11, PMFW will handle the features disablement properly
- * for gpu reset case. Driver involvement is unnecessary.
+ * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
*/
- if (amdgpu_in_reset(adev)) {
+ if (amdgpu_in_reset(adev) || adev->in_s0ix) {
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 0cda3b276f61..f0800c0c5168 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -588,7 +588,9 @@ err0_out:
return -ENOMEM;
}
-static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu)
+static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu,
+ bool use_metrics_v3,
+ bool use_metrics_v2)
{
struct smu_table_context *smu_table= &smu->smu_table;
SmuMetricsExternal_t *metrics_ext =
@@ -596,13 +598,11 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
uint32_t throttler_status = 0;
int i;
- if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4900)) {
+ if (use_metrics_v3) {
for (i = 0; i < THROTTLER_COUNT; i++)
throttler_status |=
(metrics_ext->SmuMetrics_V3.ThrottlingPercentage[i] ? 1U << i : 0);
- } else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4300)) {
+ } else if (use_metrics_v2) {
for (i = 0; i < THROTTLER_COUNT; i++)
throttler_status |=
(metrics_ext->SmuMetrics_V2.ThrottlingPercentage[i] ? 1U << i : 0);
@@ -864,7 +864,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
metrics->TemperatureVrSoc) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_THROTTLER_STATUS:
- *value = sienna_cichlid_get_throttler_status_locked(smu);
+ *value = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
break;
case METRICS_CURR_FANSPEED:
*value = use_metrics_v3 ? metrics_v3->CurrFanSpeed :
@@ -4017,7 +4017,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] :
use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] : metrics->CurrClock[PPCLK_DCLK_1];
- gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu);
+ gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
gpu_metrics->indep_throttle_status =
smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
sienna_cichlid_throttler_map);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 3d188616ba24..0fb6be11a0cc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -332,10 +332,13 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
PPTable_t *pptable = smu->smu_table.driver_pptable;
+#if 0
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
const OverDriveLimits_t * const overdrive_upperlimits =
&pptable->SkuTable.OverDriveLimitsBasicMax;
const OverDriveLimits_t * const overdrive_lowerlimits =
&pptable->SkuTable.OverDriveLimitsMin;
+#endif
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@@ -347,18 +350,30 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
smu_baco->maco_support = true;
+ /*
+ * We are in the transition to a new OD mechanism.
+ * Disable the OD feature support for SMU13 temporarily.
+ * TODO: get this reverted when new OD mechanism online
+ */
+#if 0
if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask)
smu->od_enabled = false;
- table_context->thermal_controller_type =
- powerplay_table->thermal_controller_type;
-
/*
* Instead of having its own buffer space and get overdrive_table copied,
* smu->od_settings just points to the actual overdrive_table
*/
smu->od_settings = &powerplay_table->overdrive_table;
+#else
+ smu->od_enabled = false;
+#endif
+
+ table_context->thermal_controller_type =
+ powerplay_table->thermal_controller_type;
+
+ smu->adev->pm.no_fan =
+ !(pptable->SkuTable.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
return 0;
}
@@ -1140,7 +1155,6 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
(OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
- const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
uint32_t gen_speed, lane_width;
int i, curr_freq, size = 0;
int32_t min_value, max_value;
@@ -1256,7 +1270,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
pcie_table->clk_freq[i],
(gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
- (lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ?
+ (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
"*" : "");
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 1ac552142763..dc6104a04dce 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -81,9 +81,10 @@
#define EPSILON 1
#define smnPCIE_ESM_CTRL 0x193D0
-#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1ab40288
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
+#define MAX_LINK_WIDTH 6
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
@@ -708,16 +709,19 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
*value = SMUQ10_TO_UINT(metrics->SocketPower) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
+ *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
- *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
+ *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
- *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature);
+ *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
*value = UINT_MAX;
@@ -1966,6 +1970,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
struct amdgpu_device *adev = smu->adev;
int ret = 0, inst0, xcc0;
MetricsTable_t *metrics;
+ u16 link_width_level;
inst0 = adev->sdma.instance[0].aid_id;
xcc0 = GET_INST(GC, 0);
@@ -1993,9 +1998,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->average_socket_power =
SMUQ10_TO_UINT(metrics->SocketPower);
- /* Energy is reported in 15.625mJ units */
- gpu_metrics->energy_accumulator =
- SMUQ10_TO_UINT(metrics->SocketEnergyAcc);
+ /* Energy counter reported in 15.259uJ (2^-16) units */
+ gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
gpu_metrics->current_gfxclk =
SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc0]);
@@ -2017,8 +2021,12 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->throttle_status = 0;
if (!(adev->flags & AMD_IS_APU)) {
+ link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
+ if (link_width_level > MAX_LINK_WIDTH)
+ link_width_level = 0;
+
gpu_metrics->pcie_link_width =
- smu_v13_0_6_get_current_pcie_link_width_level(smu);
+ DECODE_LANE_WIDTH(link_width_level);
gpu_metrics->pcie_link_speed =
smu_v13_0_6_get_current_pcie_link_speed(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index b1f0937ccade..62f2886ab4df 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -323,10 +323,12 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
struct smu_baco_context *smu_baco = &smu->smu_baco;
PPTable_t *smc_pptable = table_context->driver_pptable;
BoardTable_t *BoardTable = &smc_pptable->BoardTable;
+#if 0
const OverDriveLimits_t * const overdrive_upperlimits =
&smc_pptable->SkuTable.OverDriveLimitsBasicMax;
const OverDriveLimits_t * const overdrive_lowerlimits =
&smc_pptable->SkuTable.OverDriveLimitsMin;
+#endif
if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@@ -338,18 +340,22 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
smu_baco->maco_support = true;
+#if 0
if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask)
smu->od_enabled = false;
- table_context->thermal_controller_type =
- powerplay_table->thermal_controller_type;
-
/*
* Instead of having its own buffer space and get overdrive_table copied,
* smu->od_settings just points to the actual overdrive_table
*/
smu->od_settings = &powerplay_table->overdrive_table;
+#else
+ smu->od_enabled = false;
+#endif
+
+ table_context->thermal_controller_type =
+ powerplay_table->thermal_controller_type;
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 504d51c42f79..aadb396508c5 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -2517,9 +2517,11 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
};
int int_status[3], i;
- if (it6505->enable_drv_hold || pm_runtime_get_if_in_use(dev) <= 0)
+ if (it6505->enable_drv_hold || !it6505->powered)
return IRQ_HANDLED;
+ pm_runtime_get_sync(dev);
+
int_status[0] = it6505_read(it6505, INT_STATUS_01);
int_status[1] = it6505_read(it6505, INT_STATUS_02);
int_status[2] = it6505_read(it6505, INT_STATUS_03);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 5163e5224aad..9663601ce098 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -774,9 +774,7 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
- MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO_NO_HSA |
- MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
- MIPI_DSI_MODE_NO_EOT_PACKET;
+ MIPI_DSI_MODE_VIDEO_HSE;
ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index e0dbd9140726..1f470968ed14 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3456,6 +3456,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
connector->base.id, connector->name);
return NULL;
}
+ if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n",
+ connector->base.id, connector->name);
+ }
/* it is incorrect if hsync/vsync width is zero */
if (!hsync_pulse_width || !vsync_pulse_width) {
@@ -3502,27 +3506,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
} else {
- switch (pt->misc & DRM_EDID_PT_SYNC_MASK) {
- case DRM_EDID_PT_ANALOG_CSYNC:
- case DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC:
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Analog composite sync!\n",
- connector->base.id, connector->name);
- mode->flags |= DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC;
- break;
- case DRM_EDID_PT_DIGITAL_CSYNC:
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Digital composite sync!\n",
- connector->base.id, connector->name);
- mode->flags |= DRM_MODE_FLAG_CSYNC;
- mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PCSYNC : DRM_MODE_FLAG_NCSYNC;
- break;
- case DRM_EDID_PT_DIGITAL_SEPARATE_SYNC:
- mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
- mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
- break;
- }
+ mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
}
set_size:
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 4ea6507a77e5..baaf0e0feb06 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -623,7 +623,13 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
int ret;
if (obj->import_attach) {
+ /* Reset both vm_ops and vm_private_data, so we don't end up with
+ * vm_ops pointing to our implementation if the dma-buf backend
+ * doesn't set those fields.
+ */
vma->vm_private_data = NULL;
+ vma->vm_ops = NULL;
+
ret = dma_buf_mmap(obj->dma_buf, vma, 0);
/* Drop the reference drm_gem_mmap_obj() acquired.*/
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index f0ee9bcf661d..b0c6a2a86f2f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -662,10 +662,24 @@ static const struct intel_display_device_info xe_lpdp_display = {
BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
};
+/*
+ * Separate detection for no display cases to keep the display id array simple.
+ *
+ * IVB Q requires subvendor and subdevice matching to differentiate from IVB D
+ * GT2 server.
+ */
+static bool has_no_display(struct pci_dev *pdev)
+{
+ static const struct pci_device_id ids[] = {
+ INTEL_IVB_Q_IDS(0),
+ {}
+ };
+
+ return pci_match_id(ids, pdev);
+}
+
#undef INTEL_VGA_DEVICE
-#undef INTEL_QUANTA_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, info) { id, info }
-#define INTEL_QUANTA_VGA_DEVICE(info) { 0x16a, info }
static const struct {
u32 devid;
@@ -690,7 +704,6 @@ static const struct {
INTEL_IRONLAKE_M_IDS(&ilk_m_display),
INTEL_SNB_D_IDS(&snb_display),
INTEL_SNB_M_IDS(&snb_display),
- INTEL_IVB_Q_IDS(NULL), /* must be first IVB in list */
INTEL_IVB_M_IDS(&ivb_display),
INTEL_IVB_D_IDS(&ivb_display),
INTEL_HSW_IDS(&hsw_display),
@@ -775,6 +788,11 @@ intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid,
if (has_gmdid)
return probe_gmdid_display(i915, gmdid_ver, gmdid_rel, gmdid_step);
+ if (has_no_display(pdev)) {
+ drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
+ return &no_display;
+ }
+
for (i = 0; i < ARRAY_SIZE(intel_display_ids); i++) {
if (intel_display_ids[i].devid == pdev->device)
return intel_display_ids[i].info;
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 21f92123c844..67e3aaf9b432 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2752,7 +2752,7 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
__drm_atomic_helper_connector_reset(&sdvo_connector->base.base,
&conn_state->base.base);
- INIT_LIST_HEAD(&sdvo_connector->base.panel.fixed_modes);
+ intel_panel_init_alloc(&sdvo_connector->base);
return sdvo_connector;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index ee9f83af7cf6..477df260ae3a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -470,12 +470,19 @@ int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
ret = slpc_set_param(slpc,
SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
val);
- if (ret)
+ if (ret) {
guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
val, ERR_PTR(ret));
- else
+ } else {
slpc->ignore_eff_freq = val;
+ /* Set min to RPn when we disable efficient freq */
+ if (val)
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ slpc->min_freq);
+ }
+
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&slpc->lock);
return ret;
@@ -602,9 +609,8 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
return ret;
if (!slpc->min_freq_softlimit) {
- ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit);
- if (unlikely(ret))
- return ret;
+ /* Min softlimit is initialized to RPn */
+ slpc->min_freq_softlimit = slpc->min_freq;
slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
} else {
return intel_guc_slpc_set_min_freq(slpc,
@@ -755,6 +761,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
return ret;
}
+ /* Set cached value of ignore efficient freq */
+ intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
+
/* Revert SLPC min/max to softlimits if necessary */
ret = slpc_set_softlimits(slpc);
if (unlikely(ret)) {
@@ -765,9 +774,6 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set cached media freq ratio mode */
intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
- /* Set cached value of ignore efficient freq */
- intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index f75c6f09dd2a..622f6eb9a8bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -967,7 +967,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
/* Determine display colour depth for everything except LVDS now,
* DP requires this before mode_valid() is called.
*/
- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
+ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
nouveau_connector_detect_depth(connector);
/* Find the native mode if this is a digital panel, if we didn't
@@ -1408,8 +1408,7 @@ nouveau_connector_create(struct drm_device *dev,
ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index,
&nv_connector->conn);
if (ret) {
- kfree(nv_connector);
- return ERR_PTR(ret);
+ goto drm_conn_err;
}
ret = nvif_conn_event_ctor(&nv_connector->conn, "kmsHotplug",
@@ -1426,8 +1425,7 @@ nouveau_connector_create(struct drm_device *dev,
if (ret) {
nvif_event_dtor(&nv_connector->hpd);
nvif_conn_dtor(&nv_connector->conn);
- kfree(nv_connector);
- return ERR_PTR(ret);
+ goto drm_conn_err;
}
}
}
@@ -1475,4 +1473,9 @@ nouveau_connector_create(struct drm_device *dev,
drm_connector_register(connector);
return connector;
+
+drm_conn_err:
+ drm_connector_cleanup(connector);
+ kfree(nv_connector);
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 40c8ea43c42f..b8ac66b4a2c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -26,6 +26,8 @@
#include "head.h"
#include "ior.h"
+#include <drm/display/drm_dp.h>
+
#include <subdev/bios.h>
#include <subdev/bios/init.h>
#include <subdev/gpio.h>
@@ -634,6 +636,50 @@ nvkm_dp_enable_supported_link_rates(struct nvkm_outp *outp)
return outp->dp.rates != 0;
}
+/* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps()
+ * converted to work inside nvkm. This is a temporary holdover until we start
+ * passing the drm_dp_aux device through NVKM
+ */
+static int
+nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp)
+{
+ struct nvkm_i2c_aux *aux = outp->dp.aux;
+ u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
+ int ret;
+
+ ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Prior to DP1.3 the bit represented by
+ * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
+ * If it is set DP_DPCD_REV at 0000h could be at a value less than
+ * the true capability of the panel. The only way to check is to
+ * then compare 0000h and 2200h.
+ */
+ if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
+ return 0;
+
+ ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext));
+ if (ret < 0)
+ return ret;
+
+ if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
+ OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n",
+ outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]);
+ return 0;
+ }
+
+ if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)))
+ return 0;
+
+ memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext));
+
+ return 0;
+}
+
void
nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr)
{
@@ -689,7 +735,7 @@ nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr)
memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr));
}
- if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, sizeof(outp->dp.dpcd))) {
+ if (!nvkm_dp_read_dpcd_caps(outp)) {
const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
const u8 *rate;
int rate_max;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 00dbeda7e346..de161e7a04aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -117,6 +117,7 @@ void gk104_grctx_generate_r418800(struct gf100_gr *);
extern const struct gf100_grctx_func gk110_grctx;
void gk110_grctx_generate_r419eb0(struct gf100_gr *);
+void gk110_grctx_generate_r419f78(struct gf100_gr *);
extern const struct gf100_grctx_func gk110b_grctx;
extern const struct gf100_grctx_func gk208_grctx;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 94233d0119df..52a234b1ef01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -906,7 +906,9 @@ static void
gk104_grctx_generate_r419f78(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
- nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
+
+ /* bit 3 set disables loads in fp helper invocations, we need it enabled */
+ nvkm_mask(device, 0x419f78, 0x00000009, 0x00000000);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index 4391458e1fb2..3acdd9eeb74a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -820,6 +820,15 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr)
nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000);
}
+void
+gk110_grctx_generate_r419f78(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ /* bit 3 set disables loads in fp helper invocations, we need it enabled */
+ nvkm_mask(device, 0x419f78, 0x00000008, 0x00000000);
+}
+
const struct gf100_grctx_func
gk110_grctx = {
.main = gf100_grctx_generate_main,
@@ -854,4 +863,5 @@ gk110_grctx = {
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r418800 = gk104_grctx_generate_r418800,
.r419eb0 = gk110_grctx_generate_r419eb0,
+ .r419f78 = gk110_grctx_generate_r419f78,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index 7b9a34f9ec3c..5597e87624ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -103,4 +103,5 @@ gk110b_grctx = {
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r418800 = gk104_grctx_generate_r418800,
.r419eb0 = gk110_grctx_generate_r419eb0,
+ .r419f78 = gk110_grctx_generate_r419f78,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index c78d07a8bb7d..612656496541 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -568,4 +568,5 @@ gk208_grctx = {
.dist_skip_table = gf117_grctx_generate_dist_skip_table,
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r418800 = gk104_grctx_generate_r418800,
+ .r419f78 = gk110_grctx_generate_r419f78,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index beac66eb2a80..9906974ac3f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -988,4 +988,5 @@ gm107_grctx = {
.r406500 = gm107_grctx_generate_r406500,
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r419e00 = gm107_grctx_generate_r419e00,
+ .r419f78 = gk110_grctx_generate_r419f78,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index 3b6c8100a242..a7775aa18541 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -206,19 +206,6 @@ tu102_gr_av_to_init_veid(struct nvkm_blob *blob, struct gf100_gr_pack **ppack)
return gk20a_gr_av_to_init_(blob, 64, 0x00100000, ppack);
}
-int
-tu102_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
-{
- int ret;
-
- ret = gm200_gr_load(gr, ver, fwif);
- if (ret)
- return ret;
-
- return gk20a_gr_load_net(gr, "gr/", "sw_veid_bundle_init", ver, tu102_gr_av_to_init_veid,
- &gr->bundle_veid);
-}
-
static const struct gf100_gr_fwif
tu102_gr_fwif[] = {
{ 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 8f4f137a2af6..213008499caa 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -404,38 +404,30 @@ static int jdi_panel_add(struct jdi_panel *jdi)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(jdi->supplies),
jdi->supplies);
- if (ret < 0) {
- dev_err(dev, "failed to init regulator, ret=%d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to init regulator, ret=%d\n", ret);
jdi->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(jdi->enable_gpio)) {
- ret = PTR_ERR(jdi->enable_gpio);
- dev_err(dev, "cannot get enable-gpio %d\n", ret);
- return ret;
+ return dev_err_probe(dev, PTR_ERR(jdi->enable_gpio),
+ "cannot get enable-gpio %d\n", ret);
}
jdi->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(jdi->reset_gpio)) {
- ret = PTR_ERR(jdi->reset_gpio);
- dev_err(dev, "cannot get reset-gpios %d\n", ret);
- return ret;
- }
+ if (IS_ERR(jdi->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(jdi->reset_gpio),
+ "cannot get reset-gpios %d\n", ret);
jdi->dcdc_en_gpio = devm_gpiod_get(dev, "dcdc-en", GPIOD_OUT_LOW);
- if (IS_ERR(jdi->dcdc_en_gpio)) {
- ret = PTR_ERR(jdi->dcdc_en_gpio);
- dev_err(dev, "cannot get dcdc-en-gpio %d\n", ret);
- return ret;
- }
+ if (IS_ERR(jdi->dcdc_en_gpio))
+ return dev_err_probe(dev, PTR_ERR(jdi->dcdc_en_gpio),
+ "cannot get dcdc-en-gpio %d\n", ret);
jdi->backlight = drm_panel_create_dsi_backlight(jdi->dsi);
- if (IS_ERR(jdi->backlight)) {
- ret = PTR_ERR(jdi->backlight);
- dev_err(dev, "failed to register backlight %d\n", ret);
- return ret;
- }
+ if (IS_ERR(jdi->backlight))
+ return dev_err_probe(dev, PTR_ERR(jdi->backlight),
+ "failed to register backlight %d\n", ret);
drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index aaba36b3a674..b38d0e95cd54 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -999,21 +999,21 @@ static const struct panel_desc auo_g104sn02 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode auo_g121ean01_mode = {
- .clock = 66700,
- .hdisplay = 1280,
- .hsync_start = 1280 + 58,
- .hsync_end = 1280 + 58 + 8,
- .htotal = 1280 + 58 + 8 + 70,
- .vdisplay = 800,
- .vsync_start = 800 + 6,
- .vsync_end = 800 + 6 + 4,
- .vtotal = 800 + 6 + 4 + 10,
+static const struct display_timing auo_g121ean01_timing = {
+ .pixelclock = { 60000000, 74400000, 90000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 20, 50, 100 },
+ .hback_porch = { 20, 50, 100 },
+ .hsync_len = { 30, 100, 200 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 2, 10, 25 },
+ .vback_porch = { 2, 10, 25 },
+ .vsync_len = { 4, 18, 50 },
};
static const struct panel_desc auo_g121ean01 = {
- .modes = &auo_g121ean01_mode,
- .num_modes = 1,
+ .timings = &auo_g121ean01_timing,
+ .num_timings = 1,
.bpc = 8,
.size = {
.width = 261,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index ea993d7162e8..307a890fde13 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -310,7 +310,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
u32 domain,
size_t size,
struct qxl_surface *surf,
- struct qxl_bo **qobj,
+ struct drm_gem_object **gobj,
uint32_t *handle);
void qxl_gem_object_free(struct drm_gem_object *gobj);
int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index d636ba685451..17df5c7ccf69 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -34,6 +34,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
{
struct qxl_device *qdev = to_qxl(dev);
struct qxl_bo *qobj;
+ struct drm_gem_object *gobj;
uint32_t handle;
int r;
struct qxl_surface surf;
@@ -62,11 +63,13 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
r = qxl_gem_object_create_with_handle(qdev, file_priv,
QXL_GEM_DOMAIN_CPU,
- args->size, &surf, &qobj,
+ args->size, &surf, &gobj,
&handle);
if (r)
return r;
+ qobj = gem_to_qxl_bo(gobj);
qobj->is_dumb = true;
+ drm_gem_object_put(gobj);
args->pitch = pitch;
args->handle = handle;
return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index a08da0bd9098..fc5e3763c359 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -72,32 +72,41 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
return 0;
}
+/*
+ * If the caller passed a valid gobj pointer, it is responsible to call
+ * drm_gem_object_put() when it no longer needs to acess the object.
+ *
+ * If gobj is NULL, it is handled internally.
+ */
int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
struct drm_file *file_priv,
u32 domain,
size_t size,
struct qxl_surface *surf,
- struct qxl_bo **qobj,
+ struct drm_gem_object **gobj,
uint32_t *handle)
{
- struct drm_gem_object *gobj;
int r;
+ struct drm_gem_object *local_gobj;
- BUG_ON(!qobj);
BUG_ON(!handle);
r = qxl_gem_object_create(qdev, size, 0,
domain,
false, false, surf,
- &gobj);
+ &local_gobj);
if (r)
return -ENOMEM;
- r = drm_gem_handle_create(file_priv, gobj, handle);
+ r = drm_gem_handle_create(file_priv, local_gobj, handle);
if (r)
return r;
- /* drop reference from allocate - handle holds it now */
- *qobj = gem_to_qxl_bo(gobj);
- drm_gem_object_put(gobj);
+
+ if (gobj)
+ *gobj = local_gobj;
+ else
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(local_gobj);
+
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 30f58b21372a..dd0f834d881c 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -38,7 +38,6 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc *qxl_alloc = data;
int ret;
- struct qxl_bo *qobj;
uint32_t handle;
u32 domain = QXL_GEM_DOMAIN_VRAM;
@@ -50,7 +49,7 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
domain,
qxl_alloc->size,
NULL,
- &qobj, &handle);
+ NULL, &handle);
if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n",
__func__, ret);
@@ -386,7 +385,6 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc_surf *param = data;
- struct qxl_bo *qobj;
int handle;
int ret;
int size, actual_stride;
@@ -406,7 +404,7 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
QXL_GEM_DOMAIN_SURFACE,
size,
&surf,
- &qobj, &handle);
+ NULL, &handle);
if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n",
__func__, ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index a530ecc4d207..bf34498c1b6d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -833,12 +833,12 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
* need align with 2 pixel.
*/
if (fb->format->is_yuv && ((new_plane_state->src.x1 >> 16) % 2)) {
- DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
+ DRM_DEBUG_KMS("Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
}
if (fb->format->is_yuv && new_plane_state->rotation & DRM_MODE_REFLECT_Y) {
- DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n");
+ DRM_DEBUG_KMS("Invalid Source: Yuv format does not support this rotation\n");
return -EINVAL;
}
@@ -846,7 +846,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
struct vop *vop = to_vop(crtc);
if (!vop->data->afbc) {
- DRM_ERROR("vop does not support AFBC\n");
+ DRM_DEBUG_KMS("vop does not support AFBC\n");
return -EINVAL;
}
@@ -855,15 +855,16 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
return ret;
if (new_plane_state->src.x1 || new_plane_state->src.y1) {
- DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n",
- new_plane_state->src.x1,
- new_plane_state->src.y1, fb->offsets[0]);
+ DRM_DEBUG_KMS("AFBC does not support offset display, " \
+ "xpos=%d, ypos=%d, offset=%d\n",
+ new_plane_state->src.x1, new_plane_state->src.y1,
+ fb->offsets[0]);
return -EINVAL;
}
if (new_plane_state->rotation && new_plane_state->rotation != DRM_MODE_ROTATE_0) {
- DRM_ERROR("No rotation support in AFBC, rotation=%d\n",
- new_plane_state->rotation);
+ DRM_DEBUG_KMS("No rotation support in AFBC, rotation=%d\n",
+ new_plane_state->rotation);
return -EINVAL;
}
}
diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c
index a997dbcb563f..023807859be7 100644
--- a/drivers/hwmon/aquacomputer_d5next.c
+++ b/drivers/hwmon/aquacomputer_d5next.c
@@ -13,9 +13,11 @@
#include <linux/crc16.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/hid.h>
#include <linux/hwmon.h>
#include <linux/jiffies.h>
+#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
@@ -63,6 +65,8 @@ static const char *const aqc_device_names[] = {
#define CTRL_REPORT_ID 0x03
#define AQUAERO_CTRL_REPORT_ID 0x0b
+#define CTRL_REPORT_DELAY 200 /* ms */
+
/* The HID report that the official software always sends
* after writing values, currently same for all devices
*/
@@ -527,6 +531,9 @@ struct aqc_data {
int secondary_ctrl_report_size;
u8 *secondary_ctrl_report;
+ ktime_t last_ctrl_report_op;
+ int ctrl_report_delay; /* Delay between two ctrl report operations, in ms */
+
int buffer_size;
u8 *buffer;
int checksum_start;
@@ -611,17 +618,35 @@ static int aqc_aquastreamxt_convert_fan_rpm(u16 val)
return 0;
}
+static void aqc_delay_ctrl_report(struct aqc_data *priv)
+{
+ /*
+ * If previous read or write is too close to this one, delay the current operation
+ * to give the device enough time to process the previous one.
+ */
+ if (priv->ctrl_report_delay) {
+ s64 delta = ktime_ms_delta(ktime_get(), priv->last_ctrl_report_op);
+
+ if (delta < priv->ctrl_report_delay)
+ msleep(priv->ctrl_report_delay - delta);
+ }
+}
+
/* Expects the mutex to be locked */
static int aqc_get_ctrl_data(struct aqc_data *priv)
{
int ret;
+ aqc_delay_ctrl_report(priv);
+
memset(priv->buffer, 0x00, priv->buffer_size);
ret = hid_hw_raw_request(priv->hdev, priv->ctrl_report_id, priv->buffer, priv->buffer_size,
HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
if (ret < 0)
ret = -ENODATA;
+ priv->last_ctrl_report_op = ktime_get();
+
return ret;
}
@@ -631,6 +656,8 @@ static int aqc_send_ctrl_data(struct aqc_data *priv)
int ret;
u16 checksum;
+ aqc_delay_ctrl_report(priv);
+
/* Checksum is not needed for Aquaero */
if (priv->kind != aquaero) {
/* Init and xorout value for CRC-16/USB is 0xffff */
@@ -646,12 +673,16 @@ static int aqc_send_ctrl_data(struct aqc_data *priv)
ret = hid_hw_raw_request(priv->hdev, priv->ctrl_report_id, priv->buffer, priv->buffer_size,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
if (ret < 0)
- return ret;
+ goto record_access_and_ret;
/* The official software sends this report after every change, so do it here as well */
ret = hid_hw_raw_request(priv->hdev, priv->secondary_ctrl_report_id,
priv->secondary_ctrl_report, priv->secondary_ctrl_report_size,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+record_access_and_ret:
+ priv->last_ctrl_report_op = ktime_get();
+
return ret;
}
@@ -1524,6 +1555,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->buffer_size = AQUAERO_CTRL_REPORT_SIZE;
priv->temp_ctrl_offset = AQUAERO_TEMP_CTRL_OFFSET;
+ priv->ctrl_report_delay = CTRL_REPORT_DELAY;
priv->temp_label = label_temp_sensors;
priv->virtual_temp_label = label_virtual_temp_sensors;
@@ -1547,6 +1579,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->temp_ctrl_offset = D5NEXT_TEMP_CTRL_OFFSET;
priv->buffer_size = D5NEXT_CTRL_REPORT_SIZE;
+ priv->ctrl_report_delay = CTRL_REPORT_DELAY;
priv->power_cycle_count_offset = D5NEXT_POWER_CYCLES;
@@ -1597,6 +1630,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->temp_ctrl_offset = OCTO_TEMP_CTRL_OFFSET;
priv->buffer_size = OCTO_CTRL_REPORT_SIZE;
+ priv->ctrl_report_delay = CTRL_REPORT_DELAY;
priv->power_cycle_count_offset = OCTO_POWER_CYCLES;
@@ -1624,6 +1658,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->temp_ctrl_offset = QUADRO_TEMP_CTRL_OFFSET;
priv->buffer_size = QUADRO_CTRL_REPORT_SIZE;
+ priv->ctrl_report_delay = CTRL_REPORT_DELAY;
priv->flow_pulses_ctrl_offset = QUADRO_FLOW_PULSES_CTRL_OFFSET;
priv->power_cycle_count_offset = QUADRO_POWER_CYCLES;
diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c
index fa5070ae26bc..7c5f4b10a7c1 100644
--- a/drivers/hwmon/pmbus/bel-pfe.c
+++ b/drivers/hwmon/pmbus/bel-pfe.c
@@ -17,12 +17,13 @@
enum chips {pfe1100, pfe3000};
/*
- * Disable status check for pfe3000 devices, because some devices report
- * communication error (invalid command) for VOUT_MODE command (0x20)
- * although correct VOUT_MODE (0x16) is returned: it leads to incorrect
- * exponent in linear mode.
+ * Disable status check because some devices report communication error
+ * (invalid command) for VOUT_MODE command (0x20) although the correct
+ * VOUT_MODE (0x16) is returned: it leads to incorrect exponent in linear
+ * mode.
+ * This affects both pfe3000 and pfe1100.
*/
-static struct pmbus_platform_data pfe3000_plat_data = {
+static struct pmbus_platform_data pfe_plat_data = {
.flags = PMBUS_SKIP_STATUS_CHECK,
};
@@ -94,16 +95,15 @@ static int pfe_pmbus_probe(struct i2c_client *client)
int model;
model = (int)i2c_match_id(pfe_device_id, client)->driver_data;
+ client->dev.platform_data = &pfe_plat_data;
/*
* PFE3000-12-069RA devices may not stay in page 0 during device
* probe which leads to probe failure (read status word failed).
* So let's set the device to page 0 at the beginning.
*/
- if (model == pfe3000) {
- client->dev.platform_data = &pfe3000_plat_data;
+ if (model == pfe3000)
i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
- }
return pmbus_do_probe(client, &pfe_driver_info[model]);
}
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 8685e0b58a83..7bc3ebfe8081 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -62,7 +62,6 @@
#define AD7192_MODE_STA_MASK BIT(20) /* Status Register transmission Mask */
#define AD7192_MODE_CLKSRC(x) (((x) & 0x3) << 18) /* Clock Source Select */
#define AD7192_MODE_SINC3 BIT(15) /* SINC3 Filter Select */
-#define AD7192_MODE_ACX BIT(14) /* AC excitation enable(AD7195 only)*/
#define AD7192_MODE_ENPAR BIT(13) /* Parity Enable */
#define AD7192_MODE_CLKDIV BIT(12) /* Clock divide by 2 (AD7190/2 only)*/
#define AD7192_MODE_SCYCLE BIT(11) /* Single cycle conversion */
@@ -91,6 +90,7 @@
/* Configuration Register Bit Designations (AD7192_REG_CONF) */
#define AD7192_CONF_CHOP BIT(23) /* CHOP enable */
+#define AD7192_CONF_ACX BIT(22) /* AC excitation enable(AD7195 only) */
#define AD7192_CONF_REFSEL BIT(20) /* REFIN1/REFIN2 Reference Select */
#define AD7192_CONF_CHAN(x) ((x) << 8) /* Channel select */
#define AD7192_CONF_CHAN_MASK (0x7FF << 8) /* Channel select mask */
@@ -472,7 +472,7 @@ static ssize_t ad7192_show_ac_excitation(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7192_state *st = iio_priv(indio_dev);
- return sysfs_emit(buf, "%d\n", !!(st->mode & AD7192_MODE_ACX));
+ return sysfs_emit(buf, "%d\n", !!(st->conf & AD7192_CONF_ACX));
}
static ssize_t ad7192_show_bridge_switch(struct device *dev,
@@ -513,13 +513,13 @@ static ssize_t ad7192_set(struct device *dev,
ad_sd_write_reg(&st->sd, AD7192_REG_GPOCON, 1, st->gpocon);
break;
- case AD7192_REG_MODE:
+ case AD7192_REG_CONF:
if (val)
- st->mode |= AD7192_MODE_ACX;
+ st->conf |= AD7192_CONF_ACX;
else
- st->mode &= ~AD7192_MODE_ACX;
+ st->conf &= ~AD7192_CONF_ACX;
- ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
+ ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, st->conf);
break;
default:
ret = -EINVAL;
@@ -579,12 +579,11 @@ static IIO_DEVICE_ATTR(bridge_switch_en, 0644,
static IIO_DEVICE_ATTR(ac_excitation_en, 0644,
ad7192_show_ac_excitation, ad7192_set,
- AD7192_REG_MODE);
+ AD7192_REG_CONF);
static struct attribute *ad7192_attributes[] = {
&iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr,
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
- &iio_dev_attr_ac_excitation_en.dev_attr.attr,
NULL
};
@@ -595,6 +594,7 @@ static const struct attribute_group ad7192_attribute_group = {
static struct attribute *ad7195_attributes[] = {
&iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr,
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
+ &iio_dev_attr_ac_excitation_en.dev_attr.attr,
NULL
};
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 213526c1592f..aea83f369437 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -124,6 +124,7 @@ static const struct regmap_config ina2xx_regmap_config = {
enum ina2xx_ids { ina219, ina226 };
struct ina2xx_config {
+ const char *name;
u16 config_default;
int calibration_value;
int shunt_voltage_lsb; /* nV */
@@ -155,6 +156,7 @@ struct ina2xx_chip_info {
static const struct ina2xx_config ina2xx_config[] = {
[ina219] = {
+ .name = "ina219",
.config_default = INA219_CONFIG_DEFAULT,
.calibration_value = 4096,
.shunt_voltage_lsb = 10000,
@@ -164,6 +166,7 @@ static const struct ina2xx_config ina2xx_config[] = {
.chip_id = ina219,
},
[ina226] = {
+ .name = "ina226",
.config_default = INA226_CONFIG_DEFAULT,
.calibration_value = 2048,
.shunt_voltage_lsb = 2500,
@@ -996,7 +999,7 @@ static int ina2xx_probe(struct i2c_client *client)
/* Patch the current config register with default. */
val = chip->config->config_default;
- if (id->driver_data == ina226) {
+ if (type == ina226) {
ina226_set_average(chip, INA226_DEFAULT_AVG, &val);
ina226_set_int_time_vbus(chip, INA226_DEFAULT_IT, &val);
ina226_set_int_time_vshunt(chip, INA226_DEFAULT_IT, &val);
@@ -1015,7 +1018,7 @@ static int ina2xx_probe(struct i2c_client *client)
}
indio_dev->modes = INDIO_DIRECT_MODE;
- if (id->driver_data == ina226) {
+ if (type == ina226) {
indio_dev->channels = ina226_channels;
indio_dev->num_channels = ARRAY_SIZE(ina226_channels);
indio_dev->info = &ina226_info;
@@ -1024,7 +1027,7 @@ static int ina2xx_probe(struct i2c_client *client)
indio_dev->num_channels = ARRAY_SIZE(ina219_channels);
indio_dev->info = &ina219_info;
}
- indio_dev->name = id->name;
+ indio_dev->name = id ? id->name : chip->config->name;
ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
&ina2xx_setup_ops);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index af6bfcc19075..eb78a6f17fd0 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -916,12 +916,6 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
goto err_vref;
}
- ret = clk_prepare_enable(priv->core_clk);
- if (ret) {
- dev_err(dev, "failed to enable core clk\n");
- goto err_core_clk;
- }
-
regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1);
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval);
@@ -948,8 +942,6 @@ err_adc_clk:
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
MESON_SAR_ADC_REG3_ADC_EN, 0);
meson_sar_adc_set_bandgap(indio_dev, false);
- clk_disable_unprepare(priv->core_clk);
-err_core_clk:
regulator_disable(priv->vref);
err_vref:
meson_sar_adc_unlock(indio_dev);
@@ -977,8 +969,6 @@ static void meson_sar_adc_hw_disable(struct iio_dev *indio_dev)
meson_sar_adc_set_bandgap(indio_dev, false);
- clk_disable_unprepare(priv->core_clk);
-
regulator_disable(priv->vref);
if (!ret)
@@ -1211,7 +1201,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
if (IS_ERR(priv->clkin))
return dev_err_probe(dev, PTR_ERR(priv->clkin), "failed to get clkin\n");
- priv->core_clk = devm_clk_get(dev, "core");
+ priv->core_clk = devm_clk_get_enabled(dev, "core");
if (IS_ERR(priv->core_clk))
return dev_err_probe(dev, PTR_ERR(priv->core_clk), "failed to get core clk\n");
@@ -1294,15 +1284,26 @@ static int meson_sar_adc_remove(struct platform_device *pdev)
static int meson_sar_adc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
meson_sar_adc_hw_disable(indio_dev);
+ clk_disable_unprepare(priv->core_clk);
+
return 0;
}
static int meson_sar_adc_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->core_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable core clk\n");
+ return ret;
+ }
return meson_sar_adc_hw_enable(indio_dev);
}
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index 943e9e14d1e9..b72d39fc2434 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -253,7 +253,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
platform_set_drvdata(pdev, indio_dev);
state->ec = ec->ec_dev;
- state->msg = devm_kzalloc(&pdev->dev,
+ state->msg = devm_kzalloc(&pdev->dev, sizeof(*state->msg) +
max((u16)sizeof(struct ec_params_motion_sense),
state->ec->max_response), GFP_KERNEL);
if (!state->msg)
diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
index 9bf8337806fc..8c8e0bbfc99f 100644
--- a/drivers/iio/frequency/admv1013.c
+++ b/drivers/iio/frequency/admv1013.c
@@ -344,9 +344,12 @@ static int admv1013_update_quad_filters(struct admv1013_state *st)
static int admv1013_update_mixer_vgate(struct admv1013_state *st)
{
- unsigned int vcm, mixer_vgate;
+ unsigned int mixer_vgate;
+ int vcm;
vcm = regulator_get_voltage(st->reg);
+ if (vcm < 0)
+ return vcm;
if (vcm < 1800000)
mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 6a18b363cf73..b6e6b1df8a61 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -2687,7 +2687,7 @@ unknown_format:
static int lsm6dsx_get_acpi_mount_matrix(struct device *dev,
struct iio_mount_matrix *orientation)
{
- return false;
+ return -EOPNOTSUPP;
}
#endif
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index c117f50d0cf3..adcba832e6fa 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1888,7 +1888,7 @@ static const struct iio_buffer_setup_ops noop_ring_setup_ops;
int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- struct fwnode_handle *fwnode;
+ struct fwnode_handle *fwnode = NULL;
int ret;
if (!indio_dev->info)
@@ -1899,7 +1899,8 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
/* If the calling driver did not initialize firmware node, do it here */
if (dev_fwnode(&indio_dev->dev))
fwnode = dev_fwnode(&indio_dev->dev);
- else
+ /* The default dummy IIO device has no parent */
+ else if (indio_dev->dev.parent)
fwnode = dev_fwnode(indio_dev->dev.parent);
device_set_node(&indio_dev->dev, fwnode);
diff --git a/drivers/iio/light/rohm-bu27008.c b/drivers/iio/light/rohm-bu27008.c
index 489902bed7f0..b50bf8973d9a 100644
--- a/drivers/iio/light/rohm-bu27008.c
+++ b/drivers/iio/light/rohm-bu27008.c
@@ -190,7 +190,7 @@ static const struct iio_itime_sel_mul bu27008_itimes[] = {
.address = BU27008_REG_##data##_LO, \
.scan_index = BU27008_##color, \
.scan_type = { \
- .sign = 's', \
+ .sign = 'u', \
.realbits = 16, \
.storagebits = 16, \
.endianness = IIO_LE, \
@@ -633,7 +633,7 @@ static int bu27008_try_find_new_time_gain(struct bu27008_data *data, int val,
for (i = 0; i < data->gts.num_itime; i++) {
new_time_sel = data->gts.itime_table[i].sel;
ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts,
- new_time_sel, val, val2 * 1000, gain_sel);
+ new_time_sel, val, val2, gain_sel);
if (!ret)
break;
}
@@ -662,7 +662,7 @@ static int bu27008_set_scale(struct bu27008_data *data,
goto unlock_out;
ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts, time_sel,
- val, val2 * 1000, &gain_sel);
+ val, val2, &gain_sel);
if (ret) {
ret = bu27008_try_find_new_time_gain(data, val, val2, &gain_sel);
if (ret)
@@ -677,6 +677,21 @@ unlock_out:
return ret;
}
+static int bu27008_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_INT_TIME:
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
static int bu27008_write_raw(struct iio_dev *idev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@@ -756,6 +771,7 @@ static int bu27008_update_scan_mode(struct iio_dev *idev,
static const struct iio_info bu27008_info = {
.read_raw = &bu27008_read_raw,
.write_raw = &bu27008_write_raw,
+ .write_raw_get_fmt = &bu27008_write_raw_get_fmt,
.read_avail = &bu27008_read_avail,
.update_scan_mode = bu27008_update_scan_mode,
.validate_trigger = iio_validate_own_trigger,
diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c
index e63ef5789cde..bf3de853a811 100644
--- a/drivers/iio/light/rohm-bu27034.c
+++ b/drivers/iio/light/rohm-bu27034.c
@@ -575,7 +575,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
return -EINVAL;
if (chan == BU27034_CHAN_ALS) {
- if (val == 0 && val2 == 1000)
+ if (val == 0 && val2 == 1000000)
return 0;
return -EINVAL;
@@ -587,7 +587,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
goto unlock_out;
ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts, time_sel,
- val, val2 * 1000, &gain_sel);
+ val, val2, &gain_sel);
if (ret) {
/*
* Could not support scale with given time. Need to change time.
@@ -624,7 +624,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
/* Can we provide requested scale with this time? */
ret = iio_gts_find_gain_sel_for_scale_using_time(
- &data->gts, new_time_sel, val, val2 * 1000,
+ &data->gts, new_time_sel, val, val2,
&gain_sel);
if (ret)
continue;
@@ -1217,6 +1217,21 @@ static int bu27034_read_raw(struct iio_dev *idev,
}
}
+static int bu27034_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_INT_TIME:
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
static int bu27034_write_raw(struct iio_dev *idev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@@ -1267,6 +1282,7 @@ static int bu27034_read_avail(struct iio_dev *idev,
static const struct iio_info bu27034_info = {
.read_raw = &bu27034_read_raw,
.write_raw = &bu27034_write_raw,
+ .write_raw_get_fmt = &bu27034_write_raw_get_fmt,
.read_avail = &bu27034_read_avail,
};
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 755a9c57db6f..f9ab671c8eda 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -85,6 +85,8 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
dma_addr_t mask;
int i;
+ umem->iova = va = virt;
+
if (umem->is_odp) {
unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift);
@@ -100,7 +102,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
*/
pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
- umem->iova = va = virt;
/* The best result is the smallest page size that results in the minimum
* number of required pages. Compute the largest page size that could
* work based on VA address bits that don't change.
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index b42166fe7454..63e98e2d3596 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1253,6 +1253,8 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
if (rc) {
+ bnxt_unregister_dev(rdev->en_dev);
+ clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
ibdev_err(&rdev->ibdev, "Failed to get chip context\n");
return -EINVAL;
}
@@ -1526,8 +1528,8 @@ static void bnxt_re_remove(struct auxiliary_device *adev)
}
bnxt_re_setup_cc(rdev, false);
ib_unregister_device(&rdev->ibdev);
- ib_dealloc_device(&rdev->ibdev);
bnxt_re_dev_uninit(rdev);
+ ib_dealloc_device(&rdev->ibdev);
skip_remove:
mutex_unlock(&bnxt_re_mutex);
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 5fd8f7c90bb0..739d942761d1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -819,6 +819,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
}
memset((u8 *)dpit->tbl, 0xFF, bytes);
+ mutex_init(&res->dpi_tbl_lock);
dpit->priv_db = dpit->ucreg.bar_reg + dpit->ucreg.offset;
return 0;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 9dbb89e9f4af..baaa4406d5e6 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -12307,6 +12307,7 @@ static void free_cntrs(struct hfi1_devdata *dd)
if (dd->synth_stats_timer.function)
del_timer_sync(&dd->synth_stats_timer);
+ cancel_work_sync(&dd->update_cntr_work);
ppd = (struct hfi1_pportdata *)(dd + 1);
for (i = 0; i < dd->num_pports; i++, ppd++) {
kfree(ppd->cntrs);
diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
index 8f385f9c2dd3..d5f2a6b5376b 100644
--- a/drivers/interconnect/qcom/bcm-voter.c
+++ b/drivers/interconnect/qcom/bcm-voter.c
@@ -83,6 +83,11 @@ static void bcm_aggregate(struct qcom_icc_bcm *bcm)
temp = agg_peak[bucket] * bcm->vote_scale;
bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
+
+ if (bcm->enable_mask && (bcm->vote_x[bucket] || bcm->vote_y[bucket])) {
+ bcm->vote_x[bucket] = 0;
+ bcm->vote_y[bucket] = bcm->enable_mask;
+ }
}
if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
index 04391c1ba465..7843d8864d6b 100644
--- a/drivers/interconnect/qcom/icc-rpmh.h
+++ b/drivers/interconnect/qcom/icc-rpmh.h
@@ -81,6 +81,7 @@ struct qcom_icc_node {
* @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm
* @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm
* @vote_scale: scaling factor for vote_x and vote_y
+ * @enable_mask: optional mask to send as vote instead of vote_x/vote_y
* @dirty: flag used to indicate whether the bcm needs to be committed
* @keepalive: flag used to indicate whether a keepalive is required
* @aux_data: auxiliary data used when calculating threshold values and
@@ -97,6 +98,7 @@ struct qcom_icc_bcm {
u64 vote_x[QCOM_ICC_NUM_BUCKETS];
u64 vote_y[QCOM_ICC_NUM_BUCKETS];
u64 vote_scale;
+ u32 enable_mask;
bool dirty;
bool keepalive;
struct bcm_db aux_data;
diff --git a/drivers/interconnect/qcom/sa8775p.c b/drivers/interconnect/qcom/sa8775p.c
index da21cc31a580..f56538669de0 100644
--- a/drivers/interconnect/qcom/sa8775p.c
+++ b/drivers/interconnect/qcom/sa8775p.c
@@ -1873,6 +1873,7 @@ static struct qcom_icc_node srvc_snoc = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = 0x8,
.num_nodes = 1,
.nodes = { &ebi },
};
diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
index 2d7a8e7b85ec..e64c214b4020 100644
--- a/drivers/interconnect/qcom/sm8450.c
+++ b/drivers/interconnect/qcom/sm8450.c
@@ -1337,6 +1337,7 @@ static struct qcom_icc_node qns_mem_noc_sf_disp = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = 0x8,
.num_nodes = 1,
.nodes = { &ebi },
};
@@ -1349,6 +1350,7 @@ static struct qcom_icc_bcm bcm_ce0 = {
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
+ .enable_mask = 0x1,
.keepalive = true,
.num_nodes = 55,
.nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie,
@@ -1383,6 +1385,7 @@ static struct qcom_icc_bcm bcm_cn0 = {
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
+ .enable_mask = 0x1,
.num_nodes = 2,
.nodes = { &qxm_nsp, &qns_nsp_gemnoc },
};
@@ -1403,6 +1406,7 @@ static struct qcom_icc_bcm bcm_mm0 = {
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
+ .enable_mask = 0x1,
.num_nodes = 12,
.nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp,
&qnm_camnoc_sf, &qnm_mdp,
@@ -1445,6 +1449,7 @@ static struct qcom_icc_bcm bcm_sh0 = {
static struct qcom_icc_bcm bcm_sh1 = {
.name = "SH1",
+ .enable_mask = 0x1,
.num_nodes = 7,
.nodes = { &alm_gpu_tcu, &alm_sys_tcu,
&qnm_nsp_gemnoc, &qnm_pcie,
@@ -1461,6 +1466,7 @@ static struct qcom_icc_bcm bcm_sn0 = {
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
+ .enable_mask = 0x1,
.num_nodes = 4,
.nodes = { &qhm_gic, &qxm_pimem,
&xm_gic, &qns_gemnoc_gc },
@@ -1492,6 +1498,7 @@ static struct qcom_icc_bcm bcm_sn7 = {
static struct qcom_icc_bcm bcm_acv_disp = {
.name = "ACV",
+ .enable_mask = 0x1,
.num_nodes = 1,
.nodes = { &ebi_disp },
};
@@ -1510,6 +1517,7 @@ static struct qcom_icc_bcm bcm_mm0_disp = {
static struct qcom_icc_bcm bcm_mm1_disp = {
.name = "MM1",
+ .enable_mask = 0x1,
.num_nodes = 3,
.nodes = { &qnm_mdp_disp, &qnm_rot_disp,
&qns_mem_noc_sf_disp },
@@ -1523,6 +1531,7 @@ static struct qcom_icc_bcm bcm_sh0_disp = {
static struct qcom_icc_bcm bcm_sh1_disp = {
.name = "SH1",
+ .enable_mask = 0x1,
.num_nodes = 1,
.nodes = { &qnm_pcie_disp },
};
diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c
index d823ba988ef6..0864ed285375 100644
--- a/drivers/interconnect/qcom/sm8550.c
+++ b/drivers/interconnect/qcom/sm8550.c
@@ -1473,6 +1473,7 @@ static struct qcom_icc_node qns_mem_noc_sf_cam_ife_2 = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = 0x8,
.num_nodes = 1,
.nodes = { &ebi },
};
@@ -1485,6 +1486,7 @@ static struct qcom_icc_bcm bcm_ce0 = {
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
+ .enable_mask = 0x1,
.keepalive = true,
.num_nodes = 54,
.nodes = { &qsm_cfg, &qhs_ahb2phy0,
@@ -1524,6 +1526,7 @@ static struct qcom_icc_bcm bcm_cn1 = {
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
+ .enable_mask = 0x1,
.num_nodes = 2,
.nodes = { &qxm_nsp, &qns_nsp_gemnoc },
};
@@ -1549,6 +1552,7 @@ static struct qcom_icc_bcm bcm_mm0 = {
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
+ .enable_mask = 0x1,
.num_nodes = 8,
.nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp,
&qnm_camnoc_sf, &qnm_vapss_hcp,
@@ -1589,6 +1593,7 @@ static struct qcom_icc_bcm bcm_sh0 = {
static struct qcom_icc_bcm bcm_sh1 = {
.name = "SH1",
+ .enable_mask = 0x1,
.num_nodes = 13,
.nodes = { &alm_gpu_tcu, &alm_sys_tcu,
&chm_apps, &qnm_gpu,
@@ -1608,6 +1613,7 @@ static struct qcom_icc_bcm bcm_sn0 = {
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
+ .enable_mask = 0x1,
.num_nodes = 3,
.nodes = { &qhm_gic, &xm_gic,
&qns_gemnoc_gc },
@@ -1633,6 +1639,7 @@ static struct qcom_icc_bcm bcm_sn7 = {
static struct qcom_icc_bcm bcm_acv_disp = {
.name = "ACV",
+ .enable_mask = 0x1,
.num_nodes = 1,
.nodes = { &ebi_disp },
};
@@ -1657,12 +1664,14 @@ static struct qcom_icc_bcm bcm_sh0_disp = {
static struct qcom_icc_bcm bcm_sh1_disp = {
.name = "SH1",
+ .enable_mask = 0x1,
.num_nodes = 2,
.nodes = { &qnm_mnoc_hf_disp, &qnm_pcie_disp },
};
static struct qcom_icc_bcm bcm_acv_cam_ife_0 = {
.name = "ACV",
+ .enable_mask = 0x0,
.num_nodes = 1,
.nodes = { &ebi_cam_ife_0 },
};
@@ -1681,6 +1690,7 @@ static struct qcom_icc_bcm bcm_mm0_cam_ife_0 = {
static struct qcom_icc_bcm bcm_mm1_cam_ife_0 = {
.name = "MM1",
+ .enable_mask = 0x1,
.num_nodes = 4,
.nodes = { &qnm_camnoc_hf_cam_ife_0, &qnm_camnoc_icp_cam_ife_0,
&qnm_camnoc_sf_cam_ife_0, &qns_mem_noc_sf_cam_ife_0 },
@@ -1694,6 +1704,7 @@ static struct qcom_icc_bcm bcm_sh0_cam_ife_0 = {
static struct qcom_icc_bcm bcm_sh1_cam_ife_0 = {
.name = "SH1",
+ .enable_mask = 0x1,
.num_nodes = 3,
.nodes = { &qnm_mnoc_hf_cam_ife_0, &qnm_mnoc_sf_cam_ife_0,
&qnm_pcie_cam_ife_0 },
@@ -1701,6 +1712,7 @@ static struct qcom_icc_bcm bcm_sh1_cam_ife_0 = {
static struct qcom_icc_bcm bcm_acv_cam_ife_1 = {
.name = "ACV",
+ .enable_mask = 0x0,
.num_nodes = 1,
.nodes = { &ebi_cam_ife_1 },
};
@@ -1719,6 +1731,7 @@ static struct qcom_icc_bcm bcm_mm0_cam_ife_1 = {
static struct qcom_icc_bcm bcm_mm1_cam_ife_1 = {
.name = "MM1",
+ .enable_mask = 0x1,
.num_nodes = 4,
.nodes = { &qnm_camnoc_hf_cam_ife_1, &qnm_camnoc_icp_cam_ife_1,
&qnm_camnoc_sf_cam_ife_1, &qns_mem_noc_sf_cam_ife_1 },
@@ -1732,6 +1745,7 @@ static struct qcom_icc_bcm bcm_sh0_cam_ife_1 = {
static struct qcom_icc_bcm bcm_sh1_cam_ife_1 = {
.name = "SH1",
+ .enable_mask = 0x1,
.num_nodes = 3,
.nodes = { &qnm_mnoc_hf_cam_ife_1, &qnm_mnoc_sf_cam_ife_1,
&qnm_pcie_cam_ife_1 },
@@ -1739,6 +1753,7 @@ static struct qcom_icc_bcm bcm_sh1_cam_ife_1 = {
static struct qcom_icc_bcm bcm_acv_cam_ife_2 = {
.name = "ACV",
+ .enable_mask = 0x0,
.num_nodes = 1,
.nodes = { &ebi_cam_ife_2 },
};
@@ -1757,6 +1772,7 @@ static struct qcom_icc_bcm bcm_mm0_cam_ife_2 = {
static struct qcom_icc_bcm bcm_mm1_cam_ife_2 = {
.name = "MM1",
+ .enable_mask = 0x1,
.num_nodes = 4,
.nodes = { &qnm_camnoc_hf_cam_ife_2, &qnm_camnoc_icp_cam_ife_2,
&qnm_camnoc_sf_cam_ife_2, &qns_mem_noc_sf_cam_ife_2 },
@@ -1770,6 +1786,7 @@ static struct qcom_icc_bcm bcm_sh0_cam_ife_2 = {
static struct qcom_icc_bcm bcm_sh1_cam_ife_2 = {
.name = "SH1",
+ .enable_mask = 0x1,
.num_nodes = 3,
.nodes = { &qnm_mnoc_hf_cam_ife_2, &qnm_mnoc_sf_cam_ife_2,
&qnm_pcie_cam_ife_2 },
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index d676cf63a966..3dae5e3a1697 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -195,7 +195,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
}
}
- if (option->force_clkreq_0)
+ if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
else
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
index cfebad51d1d8..f4ab09439da7 100644
--- a/drivers/misc/cardreader/rts5228.c
+++ b/drivers/misc/cardreader/rts5228.c
@@ -435,17 +435,10 @@ static void rts5228_init_from_cfg(struct rtsx_pcr *pcr)
option->ltr_enabled = false;
}
}
-
- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
- | PM_L1_1_EN | PM_L1_2_EN))
- option->force_clkreq_0 = false;
- else
- option->force_clkreq_0 = true;
}
static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
{
- struct rtsx_cr_option *option = &pcr->option;
rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
@@ -476,17 +469,6 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
else
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
- /*
- * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
- * to drive low, and we forcibly request clock.
- */
- if (option->force_clkreq_0)
- rtsx_pci_write_register(pcr, PETXCFG,
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
- else
- rtsx_pci_write_register(pcr, PETXCFG,
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
-
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
if (pcr->rtd3_en) {
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 91d240dd68fa..47ab72a43256 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -327,12 +327,11 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
}
}
-
/*
* If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
* to drive low, and we forcibly request clock.
*/
- if (option->force_clkreq_0)
+ if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
rtsx_pci_write_register(pcr, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
else
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 9b42b20a3e5a..79b18f6f73a8 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -517,17 +517,10 @@ static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
option->ltr_enabled = false;
}
}
-
- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
- | PM_L1_1_EN | PM_L1_2_EN))
- option->force_clkreq_0 = false;
- else
- option->force_clkreq_0 = true;
}
static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
{
- struct rtsx_cr_option *option = &pcr->option;
/* Set mcu_cnt to 7 to ensure data can be sampled properly */
rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
@@ -546,17 +539,6 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
rts5260_init_hw(pcr);
- /*
- * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
- * to drive low, and we forcibly request clock.
- */
- if (option->force_clkreq_0)
- rtsx_pci_write_register(pcr, PETXCFG,
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
- else
- rtsx_pci_write_register(pcr, PETXCFG,
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
-
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
return 0;
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index b1e76030cafd..94af6bf8a25a 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -498,17 +498,10 @@ static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
option->ltr_enabled = false;
}
}
-
- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
- | PM_L1_1_EN | PM_L1_2_EN))
- option->force_clkreq_0 = false;
- else
- option->force_clkreq_0 = true;
}
static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
{
- struct rtsx_cr_option *option = &pcr->option;
u32 val;
rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
@@ -554,17 +547,6 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
else
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
- /*
- * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
- * to drive low, and we forcibly request clock.
- */
- if (option->force_clkreq_0)
- rtsx_pci_write_register(pcr, PETXCFG,
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
- else
- rtsx_pci_write_register(pcr, PETXCFG,
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
-
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
if (pcr->rtd3_en) {
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 32b7783e9d4f..a3f4b52bb159 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1326,8 +1326,11 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
return err;
}
- if (pcr->aspm_mode == ASPM_MODE_REG)
+ if (pcr->aspm_mode == ASPM_MODE_REG) {
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+ }
/* No CD interrupt if probing driver with card inserted.
* So we need to initialize pcr->card_exist here.
diff --git a/drivers/misc/tps6594-esm.c b/drivers/misc/tps6594-esm.c
index b488f704f104..05e2c151e632 100644
--- a/drivers/misc/tps6594-esm.c
+++ b/drivers/misc/tps6594-esm.c
@@ -13,6 +13,8 @@
#include <linux/mfd/tps6594.h>
+#define TPS6594_DEV_REV_1 0x08
+
static irqreturn_t tps6594_esm_isr(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
@@ -32,11 +34,26 @@ static int tps6594_esm_probe(struct platform_device *pdev)
{
struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
+ unsigned int rev;
int irq;
int ret;
int i;
- for (i = 0 ; i < pdev->num_resources ; i++) {
+ /*
+ * Due to a bug in revision 1 of the PMIC, the GPIO3 used for the
+ * SoC ESM function is used to power the load switch instead.
+ * As a consequence, ESM can not be used on those PMIC.
+ * Check the version and return an error in case of revision 1.
+ */
+ ret = regmap_read(tps->regmap, TPS6594_REG_DEV_REV, &rev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to read PMIC revision\n");
+ if (rev == TPS6594_DEV_REV_1)
+ return dev_err_probe(dev, -ENODEV,
+ "ESM not supported for revision 1 PMIC\n");
+
+ for (i = 0; i < pdev->num_resources; i++) {
irq = platform_get_irq_byname(pdev, pdev->resource[i].name);
if (irq < 0)
return dev_err_probe(dev, irq, "Failed to get %s irq\n",
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index bce9c9e43752..52a99d8bada0 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2952,6 +2952,14 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
/* If there is a GPIO connected to the reset pin, toggle it */
if (gpiod) {
+ /* If the switch has just been reset and not yet completed
+ * loading EEPROM, the reset may interrupt the I2C transaction
+ * mid-byte, causing the first EEPROM read after the reset
+ * from the wrong location resulting in the switch booting
+ * to wrong mode and inoperable.
+ */
+ mv88e6xxx_g1_wait_eeprom_done(chip);
+
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 392ec09a1d8a..3e4fb3c3e834 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1793,11 +1793,9 @@ static int b44_nway_reset(struct net_device *dev)
b44_readphy(bp, MII_BMCR, &bmcr);
b44_readphy(bp, MII_BMCR, &bmcr);
r = -EINVAL;
- if (bmcr & BMCR_ANENABLE) {
- b44_writephy(bp, MII_BMCR,
- bmcr | BMCR_ANRESTART);
- r = 0;
- }
+ if (bmcr & BMCR_ANENABLE)
+ r = b44_writephy(bp, MII_BMCR,
+ bmcr | BMCR_ANRESTART);
spin_unlock_irq(&bp->lock);
return r;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index b61566afb2f4..31f664ee4d77 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -5193,6 +5193,9 @@ static int __maybe_unused macb_suspend(struct device *dev)
unsigned int q;
int err;
+ if (!device_may_wakeup(&bp->dev->dev))
+ phy_exit(bp->sgmii_phy);
+
if (!netif_running(netdev))
return 0;
@@ -5253,7 +5256,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
if (!(bp->wol & MACB_WOL_ENABLED)) {
rtnl_lock();
phylink_stop(bp->phylink);
- phy_exit(bp->sgmii_phy);
rtnl_unlock();
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -5283,6 +5285,9 @@ static int __maybe_unused macb_resume(struct device *dev)
unsigned int q;
int err;
+ if (!device_may_wakeup(&bp->dev->dev))
+ phy_init(bp->sgmii_phy);
+
if (!netif_running(netdev))
return 0;
@@ -5343,8 +5348,6 @@ static int __maybe_unused macb_resume(struct device *dev)
macb_set_rx_mode(netdev);
macb_restore_features(bp);
rtnl_lock();
- if (!device_may_wakeup(&bp->dev->dev))
- phy_init(bp->sgmii_phy);
phylink_start(bp->phylink);
rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 4bb300fbb777..07a46adeab38 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -210,11 +210,11 @@ read_nvm_exit:
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
* @offset: offset in words from module start
- * @words: number of words to write
- * @data: buffer with words to write to the Shadow RAM
+ * @words: number of words to read
+ * @data: buffer with words to read to the Shadow RAM
* @last_command: tells the AdminQ that this is the last command
*
- * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ * Reads a 16 bit words buffer to the Shadow RAM using the admin command.
**/
static int i40e_read_nvm_aq(struct i40e_hw *hw,
u8 module_pointer, u32 offset,
@@ -234,18 +234,18 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw,
*/
if ((offset + words) > hw->nvm.sr_size)
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ "NVM read error: offset %d beyond Shadow RAM limit %d\n",
(offset + words), hw->nvm.sr_size);
else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
- /* We can write only up to 4KB (one sector), in one AQ write */
+ /* We can read only up to 4KB (one sector), in one AQ write */
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write fail error: tried to write %d words, limit is %d.\n",
+ "NVM read fail error: tried to read %d words, limit is %d.\n",
words, I40E_SR_SECTOR_SIZE_IN_WORDS);
else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
!= (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
- /* A single write cannot spread over two sectors */
+ /* A single read cannot spread over two sectors */
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n",
offset, words);
else
ret_code = i40e_aq_read_nvm(hw, module_pointer,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 460ca561819a..a34303ad057d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1289,6 +1289,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
+ fltr->ip_ver = 4;
break;
case AH_V4_FLOW:
case ESP_V4_FLOW:
@@ -1300,6 +1301,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
+ fltr->ip_ver = 4;
break;
case IPV4_USER_FLOW:
fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
@@ -1312,6 +1314,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
+ fltr->ip_ver = 4;
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
@@ -1330,6 +1333,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
+ fltr->ip_ver = 6;
break;
case AH_V6_FLOW:
case ESP_V6_FLOW:
@@ -1345,6 +1349,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
sizeof(struct in6_addr));
fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
+ fltr->ip_ver = 6;
break;
case IPV6_USER_FLOW:
memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
@@ -1361,6 +1366,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
+ fltr->ip_ver = 6;
break;
case ETHER_FLOW:
fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
@@ -1371,6 +1377,10 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
return -EINVAL;
}
+ err = iavf_validate_fdir_fltr_masks(adapter, fltr);
+ if (err)
+ return err;
+
if (iavf_fdir_is_dup_fltr(adapter, fltr))
return -EEXIST;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
index 505e82ebafe4..03e774bd2a5b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
@@ -18,6 +18,79 @@ static const struct in6_addr ipv6_addr_full_mask = {
}
};
+static const struct in6_addr ipv6_addr_zero_mask = {
+ .in6_u = {
+ .u6_addr8 = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ }
+};
+
+/**
+ * iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks
+ * @adapter: pointer to the VF adapter structure
+ * @fltr: Flow Director filter data structure
+ *
+ * Returns 0 if all masks of packet fields are either full or empty. Returns
+ * error on at least one partial mask.
+ */
+int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr)
+{
+ if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX))
+ goto partial_mask;
+
+ if (fltr->ip_ver == 4) {
+ if (fltr->ip_mask.v4_addrs.src_ip &&
+ fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX))
+ goto partial_mask;
+
+ if (fltr->ip_mask.v4_addrs.dst_ip &&
+ fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX))
+ goto partial_mask;
+
+ if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX)
+ goto partial_mask;
+ } else if (fltr->ip_ver == 6) {
+ if (memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_zero_mask,
+ sizeof(struct in6_addr)) &&
+ memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask,
+ sizeof(struct in6_addr)))
+ goto partial_mask;
+
+ if (memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_zero_mask,
+ sizeof(struct in6_addr)) &&
+ memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask,
+ sizeof(struct in6_addr)))
+ goto partial_mask;
+
+ if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX)
+ goto partial_mask;
+ }
+
+ if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX)
+ goto partial_mask;
+
+ if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX))
+ goto partial_mask;
+
+ if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX))
+ goto partial_mask;
+
+ if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX))
+ goto partial_mask;
+
+ if (fltr->ip_mask.l4_header &&
+ fltr->ip_mask.l4_header != htonl(U32_MAX))
+ goto partial_mask;
+
+ return 0;
+
+partial_mask:
+ dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n");
+ return -EOPNOTSUPP;
+}
+
/**
* iavf_pkt_udp_no_pay_len - the length of UDP packet without payload
* @fltr: Flow Director filter data structure
@@ -263,8 +336,6 @@ iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr,
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
}
- fltr->ip_ver = 4;
-
return 0;
}
@@ -309,8 +380,6 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
}
- fltr->ip_ver = 6;
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
index 33c55c366315..9eb9f73f6adf 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
@@ -110,6 +110,8 @@ struct iavf_fdir_fltr {
struct virtchnl_fdir_add vc_add_msg;
};
+int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr);
int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 9a53a5e5d73e..03d0e7559f49 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -574,6 +574,12 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
{
+ if (ice_is_adq_active(pf)) {
+ dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
+ NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
+ return -EOPNOTSUPP;
+ }
+
dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
pf->hw.pf_id);
NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0f04347eda39..dba81aaf4e91 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -8889,6 +8889,11 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
ice_setup_tc_block_cb,
np, np, true);
case TC_SETUP_QDISC_MQPRIO:
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
+ return -EOPNOTSUPP;
+ }
+
if (pf->adev) {
mutex_lock(&pf->adev_mutex);
device_lock(&pf->adev->dev);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
index 4c6d91a8c83e..17bfd5cdf462 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -76,7 +76,7 @@ static int octep_send_mbox_req(struct octep_device *oct,
list_add_tail(&d->list, &oct->ctrl_req_wait_list);
ret = wait_event_interruptible_timeout(oct->ctrl_req_wait_q,
(d->done != 0),
- jiffies + msecs_to_jiffies(500));
+ msecs_to_jiffies(500));
list_del(&d->list);
if (ret == 0 || ret == 1)
return -EAGAIN;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 43eb6e871351..4424de2ffd70 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -1038,6 +1038,10 @@ static void octep_device_cleanup(struct octep_device *oct)
{
int i;
+ oct->poll_non_ioq_intr = false;
+ cancel_delayed_work_sync(&oct->intr_poll_task);
+ cancel_work_sync(&oct->ctrl_mbox_task);
+
dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
for (i = 0; i < OCTEP_MAX_VF; i++) {
@@ -1200,14 +1204,11 @@ static void octep_remove(struct pci_dev *pdev)
if (!oct)
return;
- cancel_work_sync(&oct->tx_timeout_task);
- cancel_work_sync(&oct->ctrl_mbox_task);
netdev = oct->netdev;
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- oct->poll_non_ioq_intr = false;
- cancel_delayed_work_sync(&oct->intr_poll_task);
+ cancel_work_sync(&oct->tx_timeout_task);
octep_device_cleanup(oct);
pci_release_mem_regions(pdev);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 9e8e6184f9e4..ecfe93a479da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -84,6 +84,8 @@ enum mlx5e_xdp_xmit_mode {
* MLX5E_XDP_XMIT_MODE_XSK:
* none.
*/
+#define MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO 4
+
union mlx5e_xdp_info {
enum mlx5e_xdp_xmit_mode mode;
union {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bc9d5a5bea01..a2ae791538ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1298,11 +1298,13 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- int entries = wq_sz * MLX5_SEND_WQEBB_NUM_DS * 2; /* upper bound for maximum num of
- * entries of all xmit_modes.
- */
+ int entries;
size_t size;
+ /* upper bound for maximum num of entries of all xmit_modes. */
+ entries = roundup_pow_of_two(wq_sz * MLX5_SEND_WQEBB_NUM_DS *
+ MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO);
+
size = array_size(sizeof(*xdpi_fifo->xi), entries);
xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
if (!xdpi_fifo->xi)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index aab7059bf6e9..244cfd470903 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -245,12 +245,20 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
mlx5_lag_is_shared_fdb(dev) &&
mlx5_lag_is_master(dev)) {
struct mlx5_core_dev *peer_dev;
- int i;
+ int i, j;
mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) {
err = mlx5_cmd_set_slave_root_fdb(dev, peer_dev, !disconnect,
(!disconnect) ? ft->id : 0);
if (err && !disconnect) {
+ mlx5_lag_for_each_peer_mdev(dev, peer_dev, j) {
+ if (j < i)
+ mlx5_cmd_set_slave_root_fdb(dev, peer_dev, 1,
+ ns->root_ft->id);
+ else
+ break;
+ }
+
MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
MLX5_SET(set_flow_table_root_in, in, table_id,
ns->root_ft->id);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 4b004a728190..99df00c30b8c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -176,6 +176,15 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
}
#endif
+static int __maybe_unused qede_suspend(struct device *dev)
+{
+ dev_info(dev, "Device does not support suspend operation\n");
+
+ return -EOPNOTSUPP;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops, qede_suspend, NULL);
+
static const struct pci_error_handlers qede_err_handler = {
.error_detected = qede_io_error_detected,
};
@@ -190,6 +199,7 @@ static struct pci_driver qede_pci_driver = {
.sriov_configure = qede_sriov_configure,
#endif
.err_handler = &qede_err_handler,
+ .driver.pm = &qede_pm_ops,
};
static struct qed_eth_cb_ops qede_ll_ops = {
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index f3e8ed578c09..6da06931187d 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -1194,7 +1194,7 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
net_dev->features |= NETIF_F_HW_TC;
efx->fixed_features |= NETIF_F_HW_TC;
}
- return rc;
+ return 0;
}
int ef100_probe_vf(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 246657222958..039180c61c83 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -2090,10 +2090,10 @@ int efx_init_tc(struct efx_nic *efx)
rc = efx_mae_get_tables(efx);
if (rc)
return rc;
- efx->tc->up = true;
rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
if (rc)
goto out_free;
+ efx->tc->up = true;
return 0;
out_free:
efx_mae_free_tables(efx);
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index e5d642c67a2c..97139c07130f 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -314,15 +314,21 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
pdev = of_find_device_by_node(pcs_np);
of_node_put(pcs_np);
- if (!pdev || !platform_get_drvdata(pdev))
+ if (!pdev || !platform_get_drvdata(pdev)) {
+ if (pdev)
+ put_device(&pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
+ }
miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL);
- if (!miic_port)
+ if (!miic_port) {
+ put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
+ }
miic = platform_get_drvdata(pdev);
device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
+ put_device(&pdev->dev);
miic_port->miic = miic;
miic_port->port = port - 1;
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 59cae0d808aa..04b2e6eeb195 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -542,6 +542,17 @@ static int bcm54xx_resume(struct phy_device *phydev)
return bcm54xx_config_init(phydev);
}
+static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
+{
+ return -EOPNOTSUPP;
+}
+
+static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
+ u16 val)
+{
+ return -EOPNOTSUPP;
+}
+
static int bcm54811_config_init(struct phy_device *phydev)
{
int err, reg;
@@ -1103,6 +1114,8 @@ static struct phy_driver broadcom_drivers[] = {
.get_strings = bcm_phy_get_strings,
.get_stats = bcm54xx_get_stats,
.probe = bcm54xx_phy_probe,
+ .read_mmd = bcm54810_read_mmd,
+ .write_mmd = bcm54810_write_mmd,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.config_intr = bcm_phy_config_intr,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 17cb3e07216a..2ce74593d6e4 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -3284,6 +3284,8 @@ static int phy_probe(struct device *dev)
goto out;
}
+ phy_disable_interrupts(phydev);
+
/* Start out supporting everything. Eventually,
* a controller will attach, and may modify one
* or both of these values
@@ -3401,16 +3403,6 @@ static int phy_remove(struct device *dev)
return 0;
}
-static void phy_shutdown(struct device *dev)
-{
- struct phy_device *phydev = to_phy_device(dev);
-
- if (phydev->state == PHY_READY || !phydev->attached_dev)
- return;
-
- phy_disable_interrupts(phydev);
-}
-
/**
* phy_driver_register - register a phy_driver with the PHY layer
* @new_driver: new phy_driver to register
@@ -3444,7 +3436,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
new_driver->mdiodrv.driver.bus = &mdio_bus_type;
new_driver->mdiodrv.driver.probe = phy_probe;
new_driver->mdiodrv.driver.remove = phy_remove;
- new_driver->mdiodrv.driver.shutdown = phy_shutdown;
new_driver->mdiodrv.driver.owner = owner;
new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 8243563a40f0..e8b94580194e 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2188,7 +2188,9 @@ static void team_setup(struct net_device *dev)
dev->hw_features = TEAM_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_FILTER;
dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
dev->features |= dev->hw_features;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 953f6d8f8db0..8d5e12a5a845 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1081,8 +1081,9 @@ static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
err_xdp_ring:
for (i--; i >= start; i--)
ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
+ i = end;
err_page_pool:
- for (i = start; i < end; i++) {
+ for (i--; i >= start; i--) {
page_pool_destroy(priv->rq[i].page_pool);
priv->rq[i].page_pool = NULL;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9999f81b78d1..494242bb9cf6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2770,7 +2770,7 @@ static void virtnet_init_default_rss(struct virtnet_info *vi)
vi->ctrl->rss.indirection_table[i] = indir_val;
}
- vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs;
+ vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
vi->ctrl->rss.hash_key_length = vi->rss_key_size;
netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
@@ -4358,8 +4358,6 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->has_rss || vi->has_rss_hash_report)
virtnet_init_default_rss(vi);
- _virtnet_set_queues(vi, vi->curr_queue_pairs);
-
/* serialize netdev register + virtio_device_ready() with ndo_open() */
rtnl_lock();
@@ -4372,6 +4370,8 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
+ _virtnet_set_queues(vi, vi->curr_queue_pairs);
+
/* a random MAC address has been assigned, notify the device.
* We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
* because many devices work fine without getting MAC explicitly
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 37b6fa746662..f3a01b79148c 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3933,6 +3933,12 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
*/
nvme_mpath_clear_ctrl_paths(ctrl);
+ /*
+ * Unquiesce io queues so any pending IO won't hang, especially
+ * those submitted from scan work
+ */
+ nvme_unquiesce_io_queues(ctrl);
+
/* prevent racing with ns scanning */
flush_work(&ctrl->scan_work);
@@ -3942,10 +3948,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
* removing the namespaces' disks; fail all the queues now to avoid
* potentially having to clean up the failed sync later.
*/
- if (ctrl->state == NVME_CTRL_DEAD) {
+ if (ctrl->state == NVME_CTRL_DEAD)
nvme_mark_namespaces_dead(ctrl);
- nvme_unquiesce_io_queues(ctrl);
- }
/* this is a no-op when called from the controller reset handler */
nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 5c3250f36ce7..d39f3219358b 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -786,11 +786,9 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
if (!(ioucmd->flags & IORING_URING_CMD_POLLED))
return 0;
- rcu_read_lock();
req = READ_ONCE(ioucmd->cookie);
if (req && blk_rq_is_poll(req))
ret = blk_rq_poll(req, iob, poll_flags);
- rcu_read_unlock();
return ret;
}
#ifdef CONFIG_NVME_MULTIPATH
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index baf69af7ea78..2f57da12d983 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3402,7 +3402,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */
- .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x144d, 0xa802), /* Samsung SM953 */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index d433b2ec07a6..337a624a537c 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -883,6 +883,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
goto out_cleanup_tagset;
if (!new) {
+ nvme_start_freeze(&ctrl->ctrl);
nvme_unquiesce_io_queues(&ctrl->ctrl);
if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
/*
@@ -891,6 +892,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
* to be safe.
*/
ret = -ENODEV;
+ nvme_unfreeze(&ctrl->ctrl);
goto out_wait_freeze_timed_out;
}
blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
@@ -940,7 +942,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
if (ctrl->ctrl.queue_count > 1) {
- nvme_start_freeze(&ctrl->ctrl);
nvme_quiesce_io_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 9ce417cd32a7..5b332d9f87fc 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1868,6 +1868,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
goto out_cleanup_connect_q;
if (!new) {
+ nvme_start_freeze(ctrl);
nvme_unquiesce_io_queues(ctrl);
if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
/*
@@ -1876,6 +1877,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
* to be safe.
*/
ret = -ENODEV;
+ nvme_unfreeze(ctrl);
goto out_wait_freeze_timed_out;
}
blk_mq_update_nr_hw_queues(ctrl->tagset,
@@ -1980,7 +1982,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
if (ctrl->queue_count <= 1)
return;
nvme_quiesce_admin_queue(ctrl);
- nvme_start_freeze(ctrl);
nvme_quiesce_io_queues(ctrl);
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index bf3405f4289e..8b1dcd537020 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -121,6 +121,8 @@ module_param(sba_reserve_agpgart, int, 0444);
MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
#endif
+struct proc_dir_entry *proc_runway_root __ro_after_init;
+struct proc_dir_entry *proc_mckinley_root __ro_after_init;
/************************************
** SBA register read and write support
@@ -1968,11 +1970,15 @@ static int __init sba_driver_callback(struct parisc_device *dev)
#ifdef CONFIG_PROC_FS
switch (dev->id.hversion) {
case PLUTO_MCKINLEY_PORT:
+ if (!proc_mckinley_root)
+ proc_mckinley_root = proc_mkdir("bus/mckinley", NULL);
root = proc_mckinley_root;
break;
case ASTRO_RUNWAY_PORT:
case IKE_MERCED_PORT:
default:
+ if (!proc_runway_root)
+ proc_runway_root = proc_mkdir("bus/runway", NULL);
root = proc_runway_root;
break;
}
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 8d49bad7f847..0859be86e718 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -179,7 +179,6 @@ config PCI_MVEBU
depends on MVEBU_MBUS
depends on ARM
depends on OF
- depends on BROKEN
select PCI_BRIDGE_EMUL
help
Add support for Marvell EBU PCIe controller. This PCIe controller
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index cf61733bf78d..9952057c8819 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -485,20 +485,15 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
goto err_remove_edma;
- if (dw_pcie_link_up(pci)) {
- dw_pcie_print_link_status(pci);
- } else {
+ if (!dw_pcie_link_up(pci)) {
ret = dw_pcie_start_link(pci);
if (ret)
goto err_remove_edma;
-
- if (pci->ops && pci->ops->start_link) {
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
- goto err_stop_link;
- }
}
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
+
bridge->sysdata = pp;
ret = pci_host_probe(bridge);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index c87848cd8686..1f2ee71da4da 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -644,20 +644,9 @@ void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
}
-void dw_pcie_print_link_status(struct dw_pcie *pci)
-{
- u32 offset, val;
-
- offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
- val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
-
- dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
- FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
- FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
-}
-
int dw_pcie_wait_for_link(struct dw_pcie *pci)
{
+ u32 offset, val;
int retries;
/* Check if the link is up or not */
@@ -673,7 +662,12 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
return -ETIMEDOUT;
}
- dw_pcie_print_link_status(pci);
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+
+ dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
+ FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
+ FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 615660640801..79713ce075cc 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -429,7 +429,6 @@ void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
int dw_pcie_edma_detect(struct dw_pcie *pci);
void dw_pcie_edma_remove(struct dw_pcie *pci);
-void dw_pcie_print_link_status(struct dw_pcie *pci);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 328d1e416014..601129772b2d 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -498,6 +498,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
acpiphp_native_scan_bridge(dev);
}
} else {
+ LIST_HEAD(add_list);
int max, pass;
acpiphp_rescan_slot(slot);
@@ -511,10 +512,15 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
if (pass && dev->subordinate) {
check_hotplug_bridge(slot, dev);
pcibios_resource_survey_bus(dev->subordinate);
+ if (pci_is_root_bus(bus))
+ __pci_bus_size_bridges(dev->subordinate, &add_list);
}
}
}
- pci_assign_unassigned_bridge_resources(bus->self);
+ if (pci_is_root_bus(bus))
+ __pci_bus_assign_resources(bus, &add_list, NULL);
+ else
+ pci_assign_unassigned_bridge_resources(bus->self);
}
acpiphp_sanitize_bus(bus);
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index ab69d517a36a..a70e67749be3 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -176,7 +176,8 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
{
- u8 mode, flag = 0;
+ u8 flag = 0;
+ int mode;
int src;
mode = amd_pmf_get_pprof_modes(dev);
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 1f59ac55c5f7..a95004e3d80b 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -335,8 +335,8 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
node = dev_to_node(&_pci_dev->dev);
if (node == NUMA_NO_NODE) {
- pr_info("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
- cpu, bus_no, dev, fn);
+ pr_info_once("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
+ cpu, bus_no, dev, fn);
continue;
}
diff --git a/drivers/platform/x86/lenovo-ymc.c b/drivers/platform/x86/lenovo-ymc.c
index 41676188b373..f360370d5002 100644
--- a/drivers/platform/x86/lenovo-ymc.c
+++ b/drivers/platform/x86/lenovo-ymc.c
@@ -24,6 +24,10 @@ static bool ec_trigger __read_mostly;
module_param(ec_trigger, bool, 0444);
MODULE_PARM_DESC(ec_trigger, "Enable EC triggering work-around to force emitting tablet mode events");
+static bool force;
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Force loading on boards without a convertible DMI chassis-type");
+
static const struct dmi_system_id ec_trigger_quirk_dmi_table[] = {
{
/* Lenovo Yoga 7 14ARB7 */
@@ -35,6 +39,20 @@ static const struct dmi_system_id ec_trigger_quirk_dmi_table[] = {
{ }
};
+static const struct dmi_system_id allowed_chasis_types_dmi_table[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
+ },
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
+ },
+ },
+ { }
+};
+
struct lenovo_ymc_private {
struct input_dev *input_dev;
struct acpi_device *ec_acpi_dev;
@@ -111,6 +129,13 @@ static int lenovo_ymc_probe(struct wmi_device *wdev, const void *ctx)
struct input_dev *input_dev;
int err;
+ if (!dmi_check_system(allowed_chasis_types_dmi_table)) {
+ if (force)
+ dev_info(&wdev->dev, "Force loading Lenovo YMC support\n");
+ else
+ return -ENODEV;
+ }
+
ec_trigger |= dmi_check_system(ec_trigger_quirk_dmi_table);
priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 67367f010139..7d33977d9c60 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -62,10 +62,6 @@
#define MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET 0x37
#define MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET 0x3a
#define MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET 0x3b
-#define MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET 0x3c
-#define MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET 0x3d
-#define MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET 0x3e
-#define MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET 0x3f
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET 0x40
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42
@@ -126,6 +122,10 @@
#define MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET 0xaa
#define MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET 0xab
#define MLXPLAT_CPLD_LPC_REG_LC_PWR_ON 0xb2
+#define MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET 0xb6
+#define MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET 0xb7
+#define MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET 0xb8
+#define MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET 0xb9
#define MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET 0xc2
#define MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT 0xc3
#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET 0xc7
@@ -222,7 +222,7 @@
MLXPLAT_CPLD_AGGR_MASK_LC_SDWN)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_LOW 0xc1
#define MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2 BIT(2)
-#define MLXPLAT_CPLD_LOW_AGGR_MASK_PWR_BUT BIT(4)
+#define MLXPLAT_CPLD_LOW_AGGR_MASK_PWR_BUT GENMASK(5, 4)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
@@ -237,7 +237,7 @@
#define MLXPLAT_CPLD_GWP_MASK GENMASK(0, 0)
#define MLXPLAT_CPLD_EROT_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_BUTTON_MASK BIT(0)
-#define MLXPLAT_CPLD_LATCH_RST_MASK BIT(5)
+#define MLXPLAT_CPLD_LATCH_RST_MASK BIT(6)
#define MLXPLAT_CPLD_THERMAL1_PDB_MASK BIT(3)
#define MLXPLAT_CPLD_THERMAL2_PDB_MASK BIT(4)
#define MLXPLAT_CPLD_INTRUSION_MASK BIT(6)
@@ -2356,7 +2356,7 @@ mlxplat_mlxcpld_l1_switch_pwr_events_handler(void *handle, enum mlxreg_hotplug_k
u8 action)
{
dev_info(&mlxplat_dev->dev, "System shutdown due to short press of power button");
- kernel_halt();
+ kernel_power_off();
return 0;
}
@@ -2475,7 +2475,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_l1_switch_events_items[] = {
.reg = MLXPLAT_CPLD_LPC_REG_PWRB_OFFSET,
.mask = MLXPLAT_CPLD_PWR_BUTTON_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_pwr_events_items_data),
- .inversed = 0,
+ .inversed = 1,
.health = false,
},
{
@@ -2484,7 +2484,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_l1_switch_events_items[] = {
.reg = MLXPLAT_CPLD_LPC_REG_BRD_OFFSET,
.mask = MLXPLAT_CPLD_L1_CHA_HEALTH_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_health_events_items_data),
- .inversed = 0,
+ .inversed = 1,
.health = false,
.ind = 8,
},
@@ -3677,7 +3677,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
{
.label = "latch_reset",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
- .mask = GENMASK(7, 0) & ~BIT(5),
+ .mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0200,
},
{
@@ -6238,8 +6238,6 @@ static void mlxplat_i2c_mux_topolgy_exit(struct mlxplat_priv *priv)
if (priv->pdev_mux[i])
platform_device_unregister(priv->pdev_mux[i]);
}
-
- mlxplat_post_exit();
}
static int mlxplat_i2c_main_complition_notify(void *handle, int id)
@@ -6369,6 +6367,7 @@ static void __exit mlxplat_exit(void)
pm_power_off = NULL;
mlxplat_pre_exit(priv);
mlxplat_i2c_main_exit(priv);
+ mlxplat_post_exit();
}
module_exit(mlxplat_exit);
diff --git a/drivers/platform/x86/msi-ec.c b/drivers/platform/x86/msi-ec.c
index ff93986e3d35..f26a3121092f 100644
--- a/drivers/platform/x86/msi-ec.c
+++ b/drivers/platform/x86/msi-ec.c
@@ -27,15 +27,15 @@
#include <linux/seq_file.h>
#include <linux/string.h>
-static const char *const SM_ECO_NAME = "eco";
-static const char *const SM_COMFORT_NAME = "comfort";
-static const char *const SM_SPORT_NAME = "sport";
-static const char *const SM_TURBO_NAME = "turbo";
-
-static const char *const FM_AUTO_NAME = "auto";
-static const char *const FM_SILENT_NAME = "silent";
-static const char *const FM_BASIC_NAME = "basic";
-static const char *const FM_ADVANCED_NAME = "advanced";
+#define SM_ECO_NAME "eco"
+#define SM_COMFORT_NAME "comfort"
+#define SM_SPORT_NAME "sport"
+#define SM_TURBO_NAME "turbo"
+
+#define FM_AUTO_NAME "auto"
+#define FM_SILENT_NAME "silent"
+#define FM_BASIC_NAME "basic"
+#define FM_ADVANCED_NAME "advanced"
static const char * const ALLOWED_FW_0[] __initconst = {
"14C1EMS1.012",
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index 2c2abf69f049..8158e3cf5d6d 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -329,6 +329,19 @@ static const struct smi_node cs35l41_hda = {
.bus_type = SMI_AUTO_DETECT,
};
+static const struct smi_node cs35l56_hda = {
+ .instances = {
+ { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
+ /* a 5th entry is an alias address, not a real device */
+ { "cs35l56-hda_dummy_dev" },
+ {}
+ },
+ .bus_type = SMI_AUTO_DETECT,
+};
+
/*
* Note new device-ids must also be added to ignore_serial_bus_ids in
* drivers/acpi/scan.c: acpi_device_enumeration_by_parent().
@@ -337,6 +350,7 @@ static const struct acpi_device_id smi_acpi_ids[] = {
{ "BSG1160", (unsigned long)&bsg1160_data },
{ "BSG2150", (unsigned long)&bsg2150_data },
{ "CSC3551", (unsigned long)&cs35l41_hda },
+ { "CSC3556", (unsigned long)&cs35l56_hda },
{ "INT3515", (unsigned long)&int3515_data },
/* Non-conforming _HID for Cirrus Logic already released */
{ "CLSA0100", (unsigned long)&cs35l41_hda },
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index dfd5ec9f75c9..a0621665a6d2 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -778,9 +778,6 @@ static int da9063_check_xvp_constraints(struct regulator_config *config)
const struct notification_limit *uv_l = &constr->under_voltage_limits;
const struct notification_limit *ov_l = &constr->over_voltage_limits;
- if (!config->init_data) /* No config in DT, pointers will be invalid */
- return 0;
-
/* make sure that only one severity is used to clarify if unchanged, enabled or disabled */
if ((!!uv_l->prot + !!uv_l->err + !!uv_l->warn) > 1) {
dev_err(config->dev, "%s: at most one voltage monitoring severity allowed!\n",
@@ -1031,9 +1028,12 @@ static int da9063_regulator_probe(struct platform_device *pdev)
config.of_node = da9063_reg_matches[id].of_node;
config.regmap = da9063->regmap;
- ret = da9063_check_xvp_constraints(&config);
- if (ret)
- return ret;
+ /* Checking constraints requires init_data from DT. */
+ if (config.init_data) {
+ ret = da9063_check_xvp_constraints(&config);
+ if (ret)
+ return ret;
+ }
regl->rdev = devm_regulator_register(&pdev->dev, &regl->desc,
&config);
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index f3b280af0773..cd077b7c4aff 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -1068,7 +1068,7 @@ static const struct rpmh_vreg_init_data pm8550_vreg_data[] = {
RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l8-l9"),
RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo515, "vdd-l1-l4-l10"),
RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo515, "vdd-l11"),
- RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo, "vdd-l12"),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic5_nldo515, "vdd-l12"),
RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l2-l13-l14"),
RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo, "vdd-l2-l13-l14"),
RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo515, "vdd-l15"),
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index e1e4f9d10887..857be0f3ae5b 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1598,7 +1598,7 @@ NCR_700_intr(int irq, void *dev_id)
printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
#endif
resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
- } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
+ } else if (slot && dsp >= to32bit(&slot->pSG[0].ins) &&
dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index d82de34f6fd7..e51e92f932fa 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -27,7 +27,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.54"
+#define DRV_VERSION "1.6.0.55"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 26dbd347156e..be89ce96df46 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -2139,7 +2139,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
bool new_sc)
{
- int ret = SUCCESS;
+ int ret = 0;
struct fnic_pending_aborts_iter_data iter_data = {
.fnic = fnic,
.lun_dev = lr_sc->device,
@@ -2159,9 +2159,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
/* walk again to check, if IOs are still pending in fw */
if (fnic_is_abts_pending(fnic, lr_sc))
- ret = FAILED;
+ ret = 1;
clean_pending_aborts_end:
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s: exit status: %d\n", __func__, ret);
return ret;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a62e091894f6..d26941b131fd 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -109,8 +109,6 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
}
}
-#define LPFC_INVALID_REFTAG ((u32)-1)
-
/**
* lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
* @phba: The Hba for which this call is being executed.
@@ -978,8 +976,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
sgpe = scsi_prot_sglist(sc);
lba = scsi_prot_ref_tag(sc);
- if (lba == LPFC_INVALID_REFTAG)
- return 0;
/* First check if we need to match the LBA */
if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
@@ -1560,8 +1556,6 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command for pde*/
reftag = scsi_prot_ref_tag(sc);
- if (reftag == LPFC_INVALID_REFTAG)
- goto out;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -1723,8 +1717,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command */
blksize = scsi_prot_interval(sc);
reftag = scsi_prot_ref_tag(sc);
- if (reftag == LPFC_INVALID_REFTAG)
- goto out;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -1953,8 +1945,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command for pde*/
reftag = scsi_prot_ref_tag(sc);
- if (reftag == LPFC_INVALID_REFTAG)
- goto out;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -2154,8 +2144,6 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command */
blksize = scsi_prot_interval(sc);
reftag = scsi_prot_ref_tag(sc);
- if (reftag == LPFC_INVALID_REFTAG)
- goto out;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -2746,8 +2734,6 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
src = (struct scsi_dif_tuple *)sg_virt(sgpe);
start_ref_tag = scsi_prot_ref_tag(cmd);
- if (start_ref_tag == LPFC_INVALID_REFTAG)
- goto out;
start_app_tag = src->app_tag;
len = sgpe->length;
while (src && protsegcnt) {
@@ -3493,11 +3479,11 @@ err:
scsi_cmnd->sc_data_direction);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "9084 Cannot setup S/G List for HBA"
- "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
+ "9084 Cannot setup S/G List for HBA "
+ "IO segs %d/%d SGL %d SCSI %d: %d %d %d\n",
lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
- prot_group_type, num_sge);
+ prot_group_type, num_sge, ret);
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->prot_seg_cnt = 0;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 2a31ddc99dde..7825765c936c 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -31,6 +31,7 @@ static void qedf_remove(struct pci_dev *pdev);
static void qedf_shutdown(struct pci_dev *pdev);
static void qedf_schedule_recovery_handler(void *dev);
static void qedf_recovery_handler(struct work_struct *work);
+static int qedf_suspend(struct pci_dev *pdev, pm_message_t state);
/*
* Driver module parameters.
@@ -3271,6 +3272,7 @@ static struct pci_driver qedf_pci_driver = {
.probe = qedf_probe,
.remove = qedf_remove,
.shutdown = qedf_shutdown,
+ .suspend = qedf_suspend,
};
static int __qedf_probe(struct pci_dev *pdev, int mode)
@@ -4000,6 +4002,22 @@ static void qedf_shutdown(struct pci_dev *pdev)
__qedf_remove(pdev, QEDF_MODE_NORMAL);
}
+static int qedf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct qedf_ctx *qedf;
+
+ if (!pdev) {
+ QEDF_ERR(NULL, "pdev is NULL.\n");
+ return -ENODEV;
+ }
+
+ qedf = pci_get_drvdata(pdev);
+
+ QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
+
+ return -EPERM;
+}
+
/*
* Recovery handler code
*/
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 450522b204d6..cd0180b1f5b9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -69,6 +69,7 @@ static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
static void qedi_recovery_handler(struct work_struct *work);
static void qedi_schedule_hw_err_handler(void *dev,
enum qed_hw_err_type err_type);
+static int qedi_suspend(struct pci_dev *pdev, pm_message_t state);
static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
{
@@ -1976,8 +1977,9 @@ static int qedi_cpu_offline(unsigned int cpu)
struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
struct qedi_work *work, *tmp;
struct task_struct *thread;
+ unsigned long flags;
- spin_lock_bh(&p->p_work_lock);
+ spin_lock_irqsave(&p->p_work_lock, flags);
thread = p->iothread;
p->iothread = NULL;
@@ -1988,7 +1990,7 @@ static int qedi_cpu_offline(unsigned int cpu)
kfree(work);
}
- spin_unlock_bh(&p->p_work_lock);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
if (thread)
kthread_stop(thread);
return 0;
@@ -2510,6 +2512,22 @@ static void qedi_shutdown(struct pci_dev *pdev)
__qedi_remove(pdev, QEDI_MODE_SHUTDOWN);
}
+static int qedi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct qedi_ctx *qedi;
+
+ if (!pdev) {
+ QEDI_ERR(NULL, "pdev is NULL.\n");
+ return -ENODEV;
+ }
+
+ qedi = pci_get_drvdata(pdev);
+
+ QEDI_ERR(&qedi->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
+
+ return -EPERM;
+}
+
static int __qedi_probe(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi;
@@ -2868,6 +2886,7 @@ static struct pci_driver qedi_pci_driver = {
.remove = qedi_remove,
.shutdown = qedi_shutdown,
.err_handler = &qedi_err_handler,
+ .suspend = qedi_suspend,
};
static int __init qedi_init(void)
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index 898a0bdf8df6..711252e52d8e 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -248,6 +248,7 @@ int raid_component_add(struct raid_template *r,struct device *raid_dev,
return 0;
err_out:
+ put_device(&rc->dev);
list_del(&rc->node);
rd->component_count--;
put_device(component_dev);
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 4a6eb1741be0..41f23cd0bfb4 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -406,7 +406,7 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
size_t length, loff_t *ppos)
{
int host, channel, id, lun;
- char *buffer, *p;
+ char *buffer, *end, *p;
int err;
if (!buf || length > PAGE_SIZE)
@@ -421,10 +421,14 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
goto out;
err = -EINVAL;
- if (length < PAGE_SIZE)
- buffer[length] = '\0';
- else if (buffer[PAGE_SIZE-1])
- goto out;
+ if (length < PAGE_SIZE) {
+ end = buffer + length;
+ *end = '\0';
+ } else {
+ end = buffer + PAGE_SIZE - 1;
+ if (*end)
+ goto out;
+ }
/*
* Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
@@ -433,10 +437,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
if (!strncmp("scsi add-single-device", buffer, 22)) {
p = buffer + 23;
- host = simple_strtoul(p, &p, 0);
- channel = simple_strtoul(p + 1, &p, 0);
- id = simple_strtoul(p + 1, &p, 0);
- lun = simple_strtoul(p + 1, &p, 0);
+ host = (p < end) ? simple_strtoul(p, &p, 0) : 0;
+ channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
+ id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
+ lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
err = scsi_add_single_device(host, channel, id, lun);
@@ -447,10 +451,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
} else if (!strncmp("scsi remove-single-device", buffer, 25)) {
p = buffer + 26;
- host = simple_strtoul(p, &p, 0);
- channel = simple_strtoul(p + 1, &p, 0);
- id = simple_strtoul(p + 1, &p, 0);
- lun = simple_strtoul(p + 1, &p, 0);
+ host = (p < end) ? simple_strtoul(p, &p, 0) : 0;
+ channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
+ id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
+ lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
err = scsi_remove_single_device(host, channel, id, lun);
}
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index 3e2e5783924d..e429ad23c396 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -303,6 +303,7 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
"Snic Tgt: device_add, with err = %d\n",
ret);
+ put_device(&tgt->dev);
put_device(&snic->shost->shost_gendev);
spin_lock_irqsave(snic->shost->host_lock, flags);
list_del(&tgt->list);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index f2823218670a..047ffaf7d42a 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1674,10 +1674,6 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
*/
static enum scsi_timeout_action storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
{
-#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
- if (scmnd->device->host->transportt == fc_transport_template)
- return fc_eh_timed_out(scmnd);
-#endif
return SCSI_EH_RESET_TIMER;
}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 62b26b7998fd..3fb4553a6442 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1964,6 +1964,8 @@ unlock:
pm_runtime_mark_last_busy(&tb->dev);
pm_runtime_put_autosuspend(&tb->dev);
+
+ kfree(ev);
}
static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
index 1269f417515b..0dfd1e083994 100644
--- a/drivers/thunderbolt/tmu.c
+++ b/drivers/thunderbolt/tmu.c
@@ -579,7 +579,9 @@ int tb_switch_tmu_disable(struct tb_switch *sw)
* uni-directional mode and we don't want to change it's TMU
* mode.
*/
- tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
+ ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
+ if (ret)
+ return ret;
tb_port_tmu_time_sync_disable(up);
ret = tb_port_tmu_time_sync_disable(down);
diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
index f8a5e79ed3b4..ab0652d8705a 100644
--- a/drivers/ufs/host/ufs-renesas.c
+++ b/drivers/ufs/host/ufs-renesas.c
@@ -359,7 +359,7 @@ static int ufs_renesas_init(struct ufs_hba *hba)
{
struct ufs_renesas_priv *priv;
- priv = devm_kmalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ufshcd_set_variant(hba, priv);
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index 766005d20bae..501e8bc9738e 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -42,6 +42,7 @@ struct usb_conn_info {
struct power_supply_desc desc;
struct power_supply *charger;
+ bool initial_detection;
};
/*
@@ -86,11 +87,13 @@ static void usb_conn_detect_cable(struct work_struct *work)
dev_dbg(info->dev, "role %s -> %s, gpios: id %d, vbus %d\n",
usb_role_string(info->last_role), usb_role_string(role), id, vbus);
- if (info->last_role == role) {
+ if (!info->initial_detection && info->last_role == role) {
dev_warn(info->dev, "repeated role: %s\n", usb_role_string(role));
return;
}
+ info->initial_detection = false;
+
if (info->last_role == USB_ROLE_HOST && info->vbus)
regulator_disable(info->vbus);
@@ -258,6 +261,7 @@ static int usb_conn_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, true);
/* Perform initial detection */
+ info->initial_detection = true;
usb_conn_queue_dwork(info, 0);
return 0;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 5fd067151fbf..858fe4c299b7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -4455,9 +4455,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
u32 count;
if (pm_runtime_suspended(dwc->dev)) {
+ dwc->pending_events = true;
+ /*
+ * Trigger runtime resume. The get() function will be balanced
+ * after processing the pending events in dwc3_process_pending
+ * events().
+ */
pm_runtime_get(dwc->dev);
disable_irq_nosync(dwc->irq_gadget);
- dwc->pending_events = true;
return IRQ_HANDLED;
}
@@ -4718,6 +4723,8 @@ void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
{
if (dwc->pending_events) {
dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
+ dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf);
+ pm_runtime_put(dwc->dev);
dwc->pending_events = false;
enable_irq(dwc->irq_gadget);
}
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index cd58f2a4e7f3..7d49d8a0b00c 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -822,6 +822,9 @@ EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
* usb_gadget_activate() is called. For example, user mode components may
* need to be activated before the system can talk to hosts.
*
+ * This routine may sleep; it must not be called in interrupt context
+ * (such as from within a gadget driver's disconnect() callback).
+ *
* Returns zero on success, else negative errno.
*/
int usb_gadget_deactivate(struct usb_gadget *gadget)
@@ -860,6 +863,8 @@ EXPORT_SYMBOL_GPL(usb_gadget_deactivate);
* This routine activates gadget which was previously deactivated with
* usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
*
+ * This routine may sleep; it must not be called in interrupt context.
+ *
* Returns zero on success, else negative errno.
*/
int usb_gadget_activate(struct usb_gadget *gadget)
@@ -1638,7 +1643,11 @@ static void gadget_unbind_driver(struct device *dev)
usb_gadget_disable_async_callbacks(udc);
if (gadget->irq)
synchronize_irq(gadget->irq);
+ mutex_unlock(&udc->connect_lock);
+
udc->driver->unbind(gadget);
+
+ mutex_lock(&udc->connect_lock);
usb_gadget_udc_stop_locked(udc);
mutex_unlock(&udc->connect_lock);
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 5e912dd29b4c..115f05a6201a 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -318,7 +318,8 @@ static int alauda_get_media_status(struct us_data *us, unsigned char *data)
rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe,
command, 0xc0, 0, 1, data, 2);
- usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]);
+ if (rc == USB_STOR_XFER_GOOD)
+ usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]);
return rc;
}
@@ -454,9 +455,14 @@ static int alauda_init_media(struct us_data *us)
static int alauda_check_media(struct us_data *us)
{
struct alauda_info *info = (struct alauda_info *) us->extra;
- unsigned char status[2];
+ unsigned char *status = us->iobuf;
+ int rc;
- alauda_get_media_status(us, status);
+ rc = alauda_get_media_status(us, status);
+ if (rc != USB_STOR_XFER_GOOD) {
+ status[0] = 0xF0; /* Pretend there's no media */
+ status[1] = 0;
+ }
/* Check for no media or door open */
if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10)
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 66de880b28d0..cdf8261e22db 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -60,6 +60,7 @@ struct dp_altmode {
enum dp_state state;
bool hpd;
+ bool pending_hpd;
struct mutex lock; /* device lock */
struct work_struct work;
@@ -144,8 +145,13 @@ static int dp_altmode_status_update(struct dp_altmode *dp)
dp->state = DP_STATE_EXIT;
} else if (!(con & DP_CONF_CURRENTLY(dp->data.conf))) {
ret = dp_altmode_configure(dp, con);
- if (!ret)
+ if (!ret) {
dp->state = DP_STATE_CONFIGURE;
+ if (dp->hpd != hpd) {
+ dp->hpd = hpd;
+ dp->pending_hpd = true;
+ }
+ }
} else {
if (dp->hpd != hpd) {
drm_connector_oob_hotplug_event(dp->connector_fwnode);
@@ -161,6 +167,16 @@ static int dp_altmode_configured(struct dp_altmode *dp)
{
sysfs_notify(&dp->alt->dev.kobj, "displayport", "configuration");
sysfs_notify(&dp->alt->dev.kobj, "displayport", "pin_assignment");
+ /*
+ * If the DFP_D/UFP_D sends a change in HPD when first notifying the
+ * DisplayPort driver that it is connected, then we wait until
+ * configuration is complete to signal HPD.
+ */
+ if (dp->pending_hpd) {
+ drm_connector_oob_hotplug_event(dp->connector_fwnode);
+ sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
+ dp->pending_hpd = false;
+ }
return dp_altmode_notify(dp);
}
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index 784b9d8107e9..65da61150ba7 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -29,6 +29,7 @@ config TYPEC_MUX_INTEL_PMC
tristate "Intel PMC mux control"
depends on ACPI
depends on INTEL_SCU_IPC
+ select USB_COMMON
select USB_ROLE_SWITCH
help
Driver for USB muxes controlled by Intel PMC FW. Intel PMC FW can
diff --git a/drivers/usb/typec/mux/nb7vpq904m.c b/drivers/usb/typec/mux/nb7vpq904m.c
index 80e580d50129..4d1122d95013 100644
--- a/drivers/usb/typec/mux/nb7vpq904m.c
+++ b/drivers/usb/typec/mux/nb7vpq904m.c
@@ -463,16 +463,18 @@ static int nb7vpq904m_probe(struct i2c_client *client)
ret = nb7vpq904m_register_bridge(nb7);
if (ret)
- return ret;
+ goto err_disable_gpio;
sw_desc.drvdata = nb7;
sw_desc.fwnode = dev->fwnode;
sw_desc.set = nb7vpq904m_sw_set;
nb7->sw = typec_switch_register(dev, &sw_desc);
- if (IS_ERR(nb7->sw))
- return dev_err_probe(dev, PTR_ERR(nb7->sw),
- "Error registering typec switch\n");
+ if (IS_ERR(nb7->sw)) {
+ ret = dev_err_probe(dev, PTR_ERR(nb7->sw),
+ "Error registering typec switch\n");
+ goto err_disable_gpio;
+ }
retimer_desc.drvdata = nb7;
retimer_desc.fwnode = dev->fwnode;
@@ -480,12 +482,21 @@ static int nb7vpq904m_probe(struct i2c_client *client)
nb7->retimer = typec_retimer_register(dev, &retimer_desc);
if (IS_ERR(nb7->retimer)) {
- typec_switch_unregister(nb7->sw);
- return dev_err_probe(dev, PTR_ERR(nb7->retimer),
- "Error registering typec retimer\n");
+ ret = dev_err_probe(dev, PTR_ERR(nb7->retimer),
+ "Error registering typec retimer\n");
+ goto err_switch_unregister;
}
return 0;
+
+err_switch_unregister:
+ typec_switch_unregister(nb7->sw);
+
+err_disable_gpio:
+ gpiod_set_value(nb7->enable_gpio, 0);
+ regulator_disable(nb7->vcc_supply);
+
+ return ret;
}
static void nb7vpq904m_remove(struct i2c_client *client)
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 829d75ebab42..cc1d83926497 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -5349,6 +5349,10 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
/* Do nothing, vbus drop expected */
break;
+ case SNK_HARD_RESET_WAIT_VBUS:
+ /* Do nothing, its OK to receive vbus off events */
+ break;
+
default:
if (port->pwr_role == TYPEC_SINK && port->attached)
tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
@@ -5395,6 +5399,9 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
case SNK_DEBOUNCED:
/*Do nothing, still waiting for VSAFE5V for connect */
break;
+ case SNK_HARD_RESET_WAIT_VBUS:
+ /* Do nothing, its OK to receive vbus off events */
+ break;
default:
if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
tcpm_set_state(port, SNK_UNATTACHED, 0);
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 25fc4120b618..b53420e874ac 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -31,6 +31,7 @@ struct mlx5_vdpa_mr {
struct list_head head;
unsigned long num_directs;
unsigned long num_klms;
+ /* state of dvq mr */
bool initialized;
/* serialize mkey creation and destruction */
@@ -121,6 +122,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
unsigned int asid);
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
+void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
#define mlx5_vdpa_warn(__dev, format, ...) \
dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 03e543229791..5a1971fcd87b 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -489,60 +489,103 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
}
}
-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
+static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+{
+ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
+ return;
+
+ prune_iotlb(mvdev);
+}
+
+static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
- mutex_lock(&mr->mkey_mtx);
+ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
+ return;
+
if (!mr->initialized)
- goto out;
+ return;
- prune_iotlb(mvdev);
if (mr->user_mr)
destroy_user_mr(mvdev, mr);
else
destroy_dma_mr(mvdev, mr);
mr->initialized = false;
-out:
+}
+
+void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+
+ mutex_lock(&mr->mkey_mtx);
+
+ _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
+ _mlx5_vdpa_destroy_cvq_mr(mvdev, asid);
+
mutex_unlock(&mr->mkey_mtx);
}
-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
- struct vhost_iotlb *iotlb, unsigned int asid)
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
+{
+ mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]);
+ mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]);
+}
+
+static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev,
+ struct vhost_iotlb *iotlb,
+ unsigned int asid)
+{
+ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
+ return 0;
+
+ return dup_iotlb(mvdev, iotlb);
+}
+
+static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev,
+ struct vhost_iotlb *iotlb,
+ unsigned int asid)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err;
- if (mr->initialized)
+ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
return 0;
- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
- if (iotlb)
- err = create_user_mr(mvdev, iotlb);
- else
- err = create_dma_mr(mvdev, mr);
+ if (mr->initialized)
+ return 0;
- if (err)
- return err;
- }
+ if (iotlb)
+ err = create_user_mr(mvdev, iotlb);
+ else
+ err = create_dma_mr(mvdev, mr);
- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) {
- err = dup_iotlb(mvdev, iotlb);
- if (err)
- goto out_err;
- }
+ if (err)
+ return err;
mr->initialized = true;
+
+ return 0;
+}
+
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+ struct vhost_iotlb *iotlb, unsigned int asid)
+{
+ int err;
+
+ err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid);
+ if (err)
+ return err;
+
+ err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid);
+ if (err)
+ goto out_err;
+
return 0;
out_err:
- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
- if (iotlb)
- destroy_user_mr(mvdev, mr);
- else
- destroy_dma_mr(mvdev, mr);
- }
+ _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
return err;
}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index bece4df7b8dd..40a03b08d7cf 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -2517,7 +2517,15 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
else
ndev->rqt_size = 1;
- ndev->cur_num_vqs = 2 * ndev->rqt_size;
+ /* Device must start with 1 queue pair, as per VIRTIO v1.2 spec, section
+ * 5.1.6.5.5 "Device operation in multiqueue mode":
+ *
+ * Multiqueue is disabled by default.
+ * The driver enables multiqueue by sending a command using class
+ * VIRTIO_NET_CTRL_MQ. The command selects the mode of multiqueue
+ * operation, as follows: ...
+ */
+ ndev->cur_num_vqs = 2;
update_cvq_info(mvdev);
return err;
@@ -2636,7 +2644,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
goto err_mr;
teardown_driver(ndev);
- mlx5_vdpa_destroy_mr(mvdev);
+ mlx5_vdpa_destroy_mr_asid(mvdev, asid);
err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
if (err)
goto err_mr;
@@ -2652,7 +2660,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
return 0;
err_setup:
- mlx5_vdpa_destroy_mr(mvdev);
+ mlx5_vdpa_destroy_mr_asid(mvdev, asid);
err_mr:
return err;
}
@@ -3548,17 +3556,6 @@ static void mlx5v_remove(struct auxiliary_device *adev)
kfree(mgtdev);
}
-static void mlx5v_shutdown(struct auxiliary_device *auxdev)
-{
- struct mlx5_vdpa_mgmtdev *mgtdev;
- struct mlx5_vdpa_net *ndev;
-
- mgtdev = auxiliary_get_drvdata(auxdev);
- ndev = mgtdev->ndev;
-
- free_irqs(ndev);
-}
-
static const struct auxiliary_device_id mlx5v_id_table[] = {
{ .name = MLX5_ADEV_NAME ".vnet", },
{},
@@ -3570,7 +3567,6 @@ static struct auxiliary_driver mlx5v_driver = {
.name = "vnet",
.probe = mlx5v_probe,
.remove = mlx5v_remove,
- .shutdown = mlx5v_shutdown,
.id_table = mlx5v_id_table,
};
diff --git a/drivers/vdpa/pds/Makefile b/drivers/vdpa/pds/Makefile
index 2e22418e3ab3..c2d314d4614d 100644
--- a/drivers/vdpa/pds/Makefile
+++ b/drivers/vdpa/pds/Makefile
@@ -5,6 +5,5 @@ obj-$(CONFIG_PDS_VDPA) := pds_vdpa.o
pds_vdpa-y := aux_drv.o \
cmds.o \
+ debugfs.o \
vdpa_dev.o
-
-pds_vdpa-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/vdpa/pds/debugfs.c b/drivers/vdpa/pds/debugfs.c
index 21a0dc0cb607..9b04aad6ec35 100644
--- a/drivers/vdpa/pds/debugfs.c
+++ b/drivers/vdpa/pds/debugfs.c
@@ -176,6 +176,7 @@ static int identity_show(struct seq_file *seq, void *v)
{
struct pds_vdpa_aux *vdpa_aux = seq->private;
struct vdpa_mgmt_dev *mgmt;
+ u64 hw_features;
seq_printf(seq, "aux_dev: %s\n",
dev_name(&vdpa_aux->padev->aux_dev.dev));
@@ -183,8 +184,9 @@ static int identity_show(struct seq_file *seq, void *v)
mgmt = &vdpa_aux->vdpa_mdev;
seq_printf(seq, "max_vqs: %d\n", mgmt->max_supported_vqs);
seq_printf(seq, "config_attr_mask: %#llx\n", mgmt->config_attr_mask);
- seq_printf(seq, "supported_features: %#llx\n", mgmt->supported_features);
- print_feature_bits_all(seq, mgmt->supported_features);
+ hw_features = le64_to_cpu(vdpa_aux->ident.hw_features);
+ seq_printf(seq, "hw_features: %#llx\n", hw_features);
+ print_feature_bits_all(seq, hw_features);
return 0;
}
@@ -200,7 +202,6 @@ static int config_show(struct seq_file *seq, void *v)
{
struct pds_vdpa_device *pdsv = seq->private;
struct virtio_net_config vc;
- u64 driver_features;
u8 status;
memcpy_fromio(&vc, pdsv->vdpa_aux->vd_mdev.device,
@@ -223,12 +224,8 @@ static int config_show(struct seq_file *seq, void *v)
status = vp_modern_get_status(&pdsv->vdpa_aux->vd_mdev);
seq_printf(seq, "dev_status: %#x\n", status);
print_status_bits(seq, status);
-
- seq_printf(seq, "req_features: %#llx\n", pdsv->req_features);
- print_feature_bits_all(seq, pdsv->req_features);
- driver_features = vp_modern_get_driver_features(&pdsv->vdpa_aux->vd_mdev);
- seq_printf(seq, "driver_features: %#llx\n", driver_features);
- print_feature_bits_all(seq, driver_features);
+ seq_printf(seq, "negotiated_features: %#llx\n", pdsv->negotiated_features);
+ print_feature_bits_all(seq, pdsv->negotiated_features);
seq_printf(seq, "vdpa_index: %d\n", pdsv->vdpa_index);
seq_printf(seq, "num_vqs: %d\n", pdsv->num_vqs);
diff --git a/drivers/vdpa/pds/vdpa_dev.c b/drivers/vdpa/pds/vdpa_dev.c
index 5071a4d58f8d..52b2449182ad 100644
--- a/drivers/vdpa/pds/vdpa_dev.c
+++ b/drivers/vdpa/pds/vdpa_dev.c
@@ -126,11 +126,9 @@ static void pds_vdpa_release_irq(struct pds_vdpa_device *pdsv, int qid)
static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
- struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev;
struct device *dev = &pdsv->vdpa_dev.dev;
u64 driver_features;
u16 invert_idx = 0;
- int irq;
int err;
dev_dbg(dev, "%s: qid %d ready %d => %d\n",
@@ -143,19 +141,6 @@ static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool re
invert_idx = PDS_VDPA_PACKED_INVERT_IDX;
if (ready) {
- irq = pci_irq_vector(pdev, qid);
- snprintf(pdsv->vqs[qid].irq_name, sizeof(pdsv->vqs[qid].irq_name),
- "vdpa-%s-%d", dev_name(dev), qid);
-
- err = request_irq(irq, pds_vdpa_isr, 0,
- pdsv->vqs[qid].irq_name, &pdsv->vqs[qid]);
- if (err) {
- dev_err(dev, "%s: no irq for qid %d: %pe\n",
- __func__, qid, ERR_PTR(err));
- return;
- }
- pdsv->vqs[qid].irq = irq;
-
/* Pass vq setup info to DSC using adminq to gather up and
* send all info at once so FW can do its full set up in
* one easy operation
@@ -164,7 +149,6 @@ static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool re
if (err) {
dev_err(dev, "Failed to init vq %d: %pe\n",
qid, ERR_PTR(err));
- pds_vdpa_release_irq(pdsv, qid);
ready = false;
}
} else {
@@ -172,7 +156,6 @@ static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool re
if (err)
dev_err(dev, "%s: reset_vq failed qid %d: %pe\n",
__func__, qid, ERR_PTR(err));
- pds_vdpa_release_irq(pdsv, qid);
}
pdsv->vqs[qid].ready = ready;
@@ -318,6 +301,7 @@ static int pds_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 featur
struct device *dev = &pdsv->vdpa_dev.dev;
u64 driver_features;
u64 nego_features;
+ u64 hw_features;
u64 missing;
if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
@@ -325,21 +309,26 @@ static int pds_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 featur
return -EOPNOTSUPP;
}
- pdsv->req_features = features;
-
/* Check for valid feature bits */
- nego_features = features & le64_to_cpu(pdsv->vdpa_aux->ident.hw_features);
- missing = pdsv->req_features & ~nego_features;
+ nego_features = features & pdsv->supported_features;
+ missing = features & ~nego_features;
if (missing) {
dev_err(dev, "Can't support all requested features in %#llx, missing %#llx features\n",
- pdsv->req_features, missing);
+ features, missing);
return -EOPNOTSUPP;
}
+ pdsv->negotiated_features = nego_features;
+
driver_features = pds_vdpa_get_driver_features(vdpa_dev);
dev_dbg(dev, "%s: %#llx => %#llx\n",
__func__, driver_features, nego_features);
+ /* if we're faking the F_MAC, strip it before writing to device */
+ hw_features = le64_to_cpu(pdsv->vdpa_aux->ident.hw_features);
+ if (!(hw_features & BIT_ULL(VIRTIO_NET_F_MAC)))
+ nego_features &= ~BIT_ULL(VIRTIO_NET_F_MAC);
+
if (driver_features == nego_features)
return 0;
@@ -352,7 +341,7 @@ static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
- return vp_modern_get_driver_features(&pdsv->vdpa_aux->vd_mdev);
+ return pdsv->negotiated_features;
}
static void pds_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
@@ -389,6 +378,72 @@ static u8 pds_vdpa_get_status(struct vdpa_device *vdpa_dev)
return vp_modern_get_status(&pdsv->vdpa_aux->vd_mdev);
}
+static int pds_vdpa_request_irqs(struct pds_vdpa_device *pdsv)
+{
+ struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev;
+ struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux;
+ struct device *dev = &pdsv->vdpa_dev.dev;
+ int max_vq, nintrs, qid, err;
+
+ max_vq = vdpa_aux->vdpa_mdev.max_supported_vqs;
+
+ nintrs = pci_alloc_irq_vectors(pdev, max_vq, max_vq, PCI_IRQ_MSIX);
+ if (nintrs < 0) {
+ dev_err(dev, "Couldn't get %d msix vectors: %pe\n",
+ max_vq, ERR_PTR(nintrs));
+ return nintrs;
+ }
+
+ for (qid = 0; qid < pdsv->num_vqs; ++qid) {
+ int irq = pci_irq_vector(pdev, qid);
+
+ snprintf(pdsv->vqs[qid].irq_name, sizeof(pdsv->vqs[qid].irq_name),
+ "vdpa-%s-%d", dev_name(dev), qid);
+
+ err = request_irq(irq, pds_vdpa_isr, 0,
+ pdsv->vqs[qid].irq_name,
+ &pdsv->vqs[qid]);
+ if (err) {
+ dev_err(dev, "%s: no irq for qid %d: %pe\n",
+ __func__, qid, ERR_PTR(err));
+ goto err_release;
+ }
+
+ pdsv->vqs[qid].irq = irq;
+ }
+
+ vdpa_aux->nintrs = nintrs;
+
+ return 0;
+
+err_release:
+ while (qid--)
+ pds_vdpa_release_irq(pdsv, qid);
+
+ pci_free_irq_vectors(pdev);
+
+ vdpa_aux->nintrs = 0;
+
+ return err;
+}
+
+static void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv)
+{
+ struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev;
+ struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux;
+ int qid;
+
+ if (!vdpa_aux->nintrs)
+ return;
+
+ for (qid = 0; qid < pdsv->num_vqs; qid++)
+ pds_vdpa_release_irq(pdsv, qid);
+
+ pci_free_irq_vectors(pdev);
+
+ vdpa_aux->nintrs = 0;
+}
+
static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
{
struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
@@ -399,6 +454,11 @@ static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
old_status = pds_vdpa_get_status(vdpa_dev);
dev_dbg(dev, "%s: old %#x new %#x\n", __func__, old_status, status);
+ if (status & ~old_status & VIRTIO_CONFIG_S_DRIVER_OK) {
+ if (pds_vdpa_request_irqs(pdsv))
+ status = old_status | VIRTIO_CONFIG_S_FAILED;
+ }
+
pds_vdpa_cmd_set_status(pdsv, status);
/* Note: still working with FW on the need for this reset cmd */
@@ -409,6 +469,8 @@ static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
pdsv->vqs[i].avail_idx = 0;
pdsv->vqs[i].used_idx = 0;
}
+
+ pds_vdpa_cmd_set_mac(pdsv, pdsv->mac);
}
if (status & ~old_status & VIRTIO_CONFIG_S_FEATURES_OK) {
@@ -418,6 +480,20 @@ static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
i, &pdsv->vqs[i].notify_pa);
}
}
+
+ if (old_status & ~status & VIRTIO_CONFIG_S_DRIVER_OK)
+ pds_vdpa_release_irqs(pdsv);
+}
+
+static void pds_vdpa_init_vqs_entry(struct pds_vdpa_device *pdsv, int qid,
+ void __iomem *notify)
+{
+ memset(&pdsv->vqs[qid], 0, sizeof(pdsv->vqs[0]));
+ pdsv->vqs[qid].qid = qid;
+ pdsv->vqs[qid].pdsv = pdsv;
+ pdsv->vqs[qid].ready = false;
+ pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR;
+ pdsv->vqs[qid].notify = notify;
}
static int pds_vdpa_reset(struct vdpa_device *vdpa_dev)
@@ -441,14 +517,17 @@ static int pds_vdpa_reset(struct vdpa_device *vdpa_dev)
if (err)
dev_err(dev, "%s: reset_vq failed qid %d: %pe\n",
__func__, i, ERR_PTR(err));
- pds_vdpa_release_irq(pdsv, i);
- memset(&pdsv->vqs[i], 0, sizeof(pdsv->vqs[0]));
- pdsv->vqs[i].ready = false;
}
}
pds_vdpa_set_status(vdpa_dev, 0);
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+ /* Reset the vq info */
+ for (i = 0; i < pdsv->num_vqs && !err; i++)
+ pds_vdpa_init_vqs_entry(pdsv, i, pdsv->vqs[i].notify);
+ }
+
return 0;
}
@@ -532,7 +611,6 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
struct device *dma_dev;
struct pci_dev *pdev;
struct device *dev;
- u8 mac[ETH_ALEN];
int err;
int i;
@@ -563,7 +641,7 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
u64 unsupp_features =
- add_config->device_features & ~mgmt->supported_features;
+ add_config->device_features & ~pdsv->supported_features;
if (unsupp_features) {
dev_err(dev, "Unsupported features: %#llx\n", unsupp_features);
@@ -614,29 +692,30 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
}
/* Set a mac, either from the user config if provided
- * or set a random mac if default is 00:..:00
+ * or use the device's mac if not 00:..:00
+ * or set a random mac
*/
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
- ether_addr_copy(mac, add_config->net.mac);
- pds_vdpa_cmd_set_mac(pdsv, mac);
+ ether_addr_copy(pdsv->mac, add_config->net.mac);
} else {
struct virtio_net_config __iomem *vc;
vc = pdsv->vdpa_aux->vd_mdev.device;
- memcpy_fromio(mac, vc->mac, sizeof(mac));
- if (is_zero_ether_addr(mac)) {
- eth_random_addr(mac);
- dev_info(dev, "setting random mac %pM\n", mac);
- pds_vdpa_cmd_set_mac(pdsv, mac);
+ memcpy_fromio(pdsv->mac, vc->mac, sizeof(pdsv->mac));
+ if (is_zero_ether_addr(pdsv->mac) &&
+ (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_MAC))) {
+ eth_random_addr(pdsv->mac);
+ dev_info(dev, "setting random mac %pM\n", pdsv->mac);
}
}
+ pds_vdpa_cmd_set_mac(pdsv, pdsv->mac);
for (i = 0; i < pdsv->num_vqs; i++) {
- pdsv->vqs[i].qid = i;
- pdsv->vqs[i].pdsv = pdsv;
- pdsv->vqs[i].irq = VIRTIO_MSI_NO_VECTOR;
- pdsv->vqs[i].notify = vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev,
- i, &pdsv->vqs[i].notify_pa);
+ void __iomem *notify;
+
+ notify = vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev,
+ i, &pdsv->vqs[i].notify_pa);
+ pds_vdpa_init_vqs_entry(pdsv, i, notify);
}
pdsv->vdpa_dev.mdev = &vdpa_aux->vdpa_mdev;
@@ -746,24 +825,19 @@ int pds_vdpa_get_mgmt_info(struct pds_vdpa_aux *vdpa_aux)
max_vqs = min_t(u16, dev_intrs, max_vqs);
mgmt->max_supported_vqs = min_t(u16, PDS_VDPA_MAX_QUEUES, max_vqs);
- vdpa_aux->nintrs = mgmt->max_supported_vqs;
+ vdpa_aux->nintrs = 0;
mgmt->ops = &pds_vdpa_mgmt_dev_ops;
mgmt->id_table = pds_vdpa_id_table;
mgmt->device = dev;
mgmt->supported_features = le64_to_cpu(vdpa_aux->ident.hw_features);
+
+ /* advertise F_MAC even if the device doesn't */
+ mgmt->supported_features |= BIT_ULL(VIRTIO_NET_F_MAC);
+
mgmt->config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
- err = pci_alloc_irq_vectors(pdev, vdpa_aux->nintrs, vdpa_aux->nintrs,
- PCI_IRQ_MSIX);
- if (err < 0) {
- dev_err(dev, "Couldn't get %d msix vectors: %pe\n",
- vdpa_aux->nintrs, ERR_PTR(err));
- return err;
- }
- vdpa_aux->nintrs = err;
-
return 0;
}
diff --git a/drivers/vdpa/pds/vdpa_dev.h b/drivers/vdpa/pds/vdpa_dev.h
index a1bc37de9537..d984ba24a7da 100644
--- a/drivers/vdpa/pds/vdpa_dev.h
+++ b/drivers/vdpa/pds/vdpa_dev.h
@@ -35,10 +35,11 @@ struct pds_vdpa_device {
struct pds_vdpa_aux *vdpa_aux;
struct pds_vdpa_vq_info vqs[PDS_VDPA_MAX_QUEUES];
- u64 supported_features; /* specified device features */
- u64 req_features; /* features requested by vdpa */
+ u64 supported_features; /* supported device features */
+ u64 negotiated_features; /* negotiated features */
u8 vdpa_index; /* rsvd for future subdevice use */
u8 num_vqs; /* num vqs in use */
+ u8 mac[ETH_ALEN]; /* mac selected when the device was added */
struct vdpa_callback config_cb;
struct notifier_block nb;
};
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 965e32529eb8..a7612e0783b3 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -1247,44 +1247,41 @@ static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
[VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
[VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
[VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
+ [VDPA_ATTR_DEV_NET_CFG_MAX_VQP] = { .type = NLA_U16 },
/* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
[VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
+ [VDPA_ATTR_DEV_QUEUE_INDEX] = { .type = NLA_U32 },
+ [VDPA_ATTR_DEV_FEATURES] = { .type = NLA_U64 },
};
static const struct genl_ops vdpa_nl_ops[] = {
{
.cmd = VDPA_CMD_MGMTDEV_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = vdpa_nl_cmd_mgmtdev_get_doit,
.dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
},
{
.cmd = VDPA_CMD_DEV_NEW,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = vdpa_nl_cmd_dev_add_set_doit,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = VDPA_CMD_DEV_DEL,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = vdpa_nl_cmd_dev_del_set_doit,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = VDPA_CMD_DEV_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = vdpa_nl_cmd_dev_get_doit,
.dumpit = vdpa_nl_cmd_dev_get_dumpit,
},
{
.cmd = VDPA_CMD_DEV_CONFIG_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = vdpa_nl_cmd_dev_config_get_doit,
.dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
},
{
.cmd = VDPA_CMD_DEV_VSTATS_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = vdpa_nl_cmd_dev_stats_get_doit,
.flags = GENL_ADMIN_PERM,
},
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index dc38ed21319d..df7869537ef1 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -935,10 +935,10 @@ static void vduse_dev_irq_inject(struct work_struct *work)
{
struct vduse_dev *dev = container_of(work, struct vduse_dev, inject);
- spin_lock_irq(&dev->irq_lock);
+ spin_lock_bh(&dev->irq_lock);
if (dev->config_cb.callback)
dev->config_cb.callback(dev->config_cb.private);
- spin_unlock_irq(&dev->irq_lock);
+ spin_unlock_bh(&dev->irq_lock);
}
static void vduse_vq_irq_inject(struct work_struct *work)
@@ -946,10 +946,10 @@ static void vduse_vq_irq_inject(struct work_struct *work)
struct vduse_virtqueue *vq = container_of(work,
struct vduse_virtqueue, inject);
- spin_lock_irq(&vq->irq_lock);
+ spin_lock_bh(&vq->irq_lock);
if (vq->ready && vq->cb.callback)
vq->cb.callback(vq->cb.private);
- spin_unlock_irq(&vq->irq_lock);
+ spin_unlock_bh(&vq->irq_lock);
}
static bool vduse_vq_signal_irqfd(struct vduse_virtqueue *vq)
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index c83f7f043470..abef0619c790 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -25,6 +25,8 @@
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/miscdevice.h>
+#include <linux/blk_types.h>
+#include <linux/bio.h>
#include <asm/unaligned.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
@@ -75,6 +77,9 @@ struct vhost_scsi_cmd {
u32 tvc_prot_sgl_count;
/* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
u32 tvc_lun;
+ u32 copied_iov:1;
+ const void *saved_iter_addr;
+ struct iov_iter saved_iter;
/* Pointer to the SGL formatted memory from virtio-scsi */
struct scatterlist *tvc_sgl;
struct scatterlist *tvc_prot_sgl;
@@ -328,8 +333,13 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
int i;
if (tv_cmd->tvc_sgl_count) {
- for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
- put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+ for (i = 0; i < tv_cmd->tvc_sgl_count; i++) {
+ if (tv_cmd->copied_iov)
+ __free_page(sg_page(&tv_cmd->tvc_sgl[i]));
+ else
+ put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+ }
+ kfree(tv_cmd->saved_iter_addr);
}
if (tv_cmd->tvc_prot_sgl_count) {
for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
@@ -504,6 +514,28 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
mutex_unlock(&vq->mutex);
}
+static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
+{
+ struct iov_iter *iter = &cmd->saved_iter;
+ struct scatterlist *sg = cmd->tvc_sgl;
+ struct page *page;
+ size_t len;
+ int i;
+
+ for (i = 0; i < cmd->tvc_sgl_count; i++) {
+ page = sg_page(&sg[i]);
+ len = sg[i].length;
+
+ if (copy_page_to_iter(page, 0, len, iter) != len) {
+ pr_err("Could not copy data while handling misaligned cmd. Error %zu\n",
+ len);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
/* Fill in status and signal that we are done processing this command
*
* This is scheduled in the vhost work queue so we are called with the owner
@@ -527,15 +559,20 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
cmd, se_cmd->residual_count, se_cmd->scsi_status);
-
memset(&v_rsp, 0, sizeof(v_rsp));
- v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
- /* TODO is status_qualifier field needed? */
- v_rsp.status = se_cmd->scsi_status;
- v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
- se_cmd->scsi_sense_length);
- memcpy(v_rsp.sense, cmd->tvc_sense_buf,
- se_cmd->scsi_sense_length);
+
+ if (cmd->saved_iter_addr && vhost_scsi_copy_sgl_to_iov(cmd)) {
+ v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
+ } else {
+ v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq,
+ se_cmd->residual_count);
+ /* TODO is status_qualifier field needed? */
+ v_rsp.status = se_cmd->scsi_status;
+ v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
+ se_cmd->scsi_sense_length);
+ memcpy(v_rsp.sense, cmd->tvc_sense_buf,
+ se_cmd->scsi_sense_length);
+ }
iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
cmd->tvc_in_iovs, sizeof(v_rsp));
@@ -613,12 +650,12 @@ static int
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
struct iov_iter *iter,
struct scatterlist *sgl,
- bool write)
+ bool is_prot)
{
struct page **pages = cmd->tvc_upages;
struct scatterlist *sg = sgl;
- ssize_t bytes;
- size_t offset;
+ ssize_t bytes, mapped_bytes;
+ size_t offset, mapped_offset;
unsigned int npages = 0;
bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
@@ -627,13 +664,53 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
if (bytes <= 0)
return bytes < 0 ? bytes : -EFAULT;
+ mapped_bytes = bytes;
+ mapped_offset = offset;
+
while (bytes) {
unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
+ /*
+ * The block layer requires bios/requests to be a multiple of
+ * 512 bytes, but Windows can send us vecs that are misaligned.
+ * This can result in bios and later requests with misaligned
+ * sizes if we have to break up a cmd/scatterlist into multiple
+ * bios.
+ *
+ * We currently only break up a command into multiple bios if
+ * we hit the vec/seg limit, so check if our sgl_count is
+ * greater than the max and if a vec in the cmd has a
+ * misaligned offset/size.
+ */
+ if (!is_prot &&
+ (offset & (SECTOR_SIZE - 1) || n & (SECTOR_SIZE - 1)) &&
+ cmd->tvc_sgl_count > BIO_MAX_VECS) {
+ WARN_ONCE(true,
+ "vhost-scsi detected misaligned IO. Performance may be degraded.");
+ goto revert_iter_get_pages;
+ }
+
sg_set_page(sg++, pages[npages++], n, offset);
bytes -= n;
offset = 0;
}
+
return npages;
+
+revert_iter_get_pages:
+ iov_iter_revert(iter, mapped_bytes);
+
+ npages = 0;
+ while (mapped_bytes) {
+ unsigned int n = min_t(unsigned int, PAGE_SIZE - mapped_offset,
+ mapped_bytes);
+
+ put_page(pages[npages++]);
+
+ mapped_bytes -= n;
+ mapped_offset = 0;
+ }
+
+ return -EINVAL;
}
static int
@@ -657,25 +734,80 @@ vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
}
static int
-vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
- struct iov_iter *iter,
- struct scatterlist *sg, int sg_count)
+vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
+ struct scatterlist *sg, int sg_count)
+{
+ size_t len = iov_iter_count(iter);
+ unsigned int nbytes = 0;
+ struct page *page;
+ int i;
+
+ if (cmd->tvc_data_direction == DMA_FROM_DEVICE) {
+ cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter,
+ GFP_KERNEL);
+ if (!cmd->saved_iter_addr)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < sg_count; i++) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ i--;
+ goto err;
+ }
+
+ nbytes = min_t(unsigned int, PAGE_SIZE, len);
+ sg_set_page(&sg[i], page, nbytes, 0);
+
+ if (cmd->tvc_data_direction == DMA_TO_DEVICE &&
+ copy_page_from_iter(page, 0, nbytes, iter) != nbytes)
+ goto err;
+
+ len -= nbytes;
+ }
+
+ cmd->copied_iov = 1;
+ return 0;
+
+err:
+ pr_err("Could not read %u bytes while handling misaligned cmd\n",
+ nbytes);
+
+ for (; i >= 0; i--)
+ __free_page(sg_page(&sg[i]));
+ kfree(cmd->saved_iter_addr);
+ return -ENOMEM;
+}
+
+static int
+vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
+ struct scatterlist *sg, int sg_count, bool is_prot)
{
struct scatterlist *p = sg;
+ size_t revert_bytes;
int ret;
while (iov_iter_count(iter)) {
- ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
+ ret = vhost_scsi_map_to_sgl(cmd, iter, sg, is_prot);
if (ret < 0) {
+ revert_bytes = 0;
+
while (p < sg) {
- struct page *page = sg_page(p++);
- if (page)
+ struct page *page = sg_page(p);
+
+ if (page) {
put_page(page);
+ revert_bytes += p->length;
+ }
+ p++;
}
+
+ iov_iter_revert(iter, revert_bytes);
return ret;
}
sg += ret;
}
+
return 0;
}
@@ -685,7 +817,6 @@ vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
size_t data_bytes, struct iov_iter *data_iter)
{
int sgl_count, ret;
- bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
if (prot_bytes) {
sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
@@ -698,9 +829,9 @@ vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
- ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
- cmd->tvc_prot_sgl,
- cmd->tvc_prot_sgl_count);
+ ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter,
+ cmd->tvc_prot_sgl,
+ cmd->tvc_prot_sgl_count, true);
if (ret < 0) {
cmd->tvc_prot_sgl_count = 0;
return ret;
@@ -716,8 +847,14 @@ vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
cmd->tvc_sgl, cmd->tvc_sgl_count);
- ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
- cmd->tvc_sgl, cmd->tvc_sgl_count);
+ ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
+ cmd->tvc_sgl_count, false);
+ if (ret == -EINVAL) {
+ sg_init_table(cmd->tvc_sgl, cmd->tvc_sgl_count);
+ ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
+ cmd->tvc_sgl_count);
+ }
+
if (ret < 0) {
cmd->tvc_sgl_count = 0;
return ret;
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 835f6cc2fb66..fa5226c198cc 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -38,11 +38,6 @@ module_param(bbm_block_size, ulong, 0444);
MODULE_PARM_DESC(bbm_block_size,
"Big Block size in bytes. Default is 0 (auto-detection).");
-static bool bbm_safe_unplug = true;
-module_param(bbm_safe_unplug, bool, 0444);
-MODULE_PARM_DESC(bbm_safe_unplug,
- "Use a safe unplug mechanism in BBM, avoiding long/endless loops");
-
/*
* virtio-mem currently supports the following modes of operation:
*
@@ -173,6 +168,13 @@ struct virtio_mem {
/* The number of subblocks per Linux memory block. */
uint32_t sbs_per_mb;
+ /*
+ * Some of the Linux memory blocks tracked as "partially
+ * plugged" are completely unplugged and can be offlined
+ * and removed -- which previously failed.
+ */
+ bool have_unplugged_mb;
+
/* Summary of all memory block states. */
unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT];
@@ -746,11 +748,15 @@ static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
* immediately instead of waiting.
*/
virtio_mem_retry(vm);
- } else {
- dev_dbg(&vm->vdev->dev,
- "offlining and removing memory failed: %d\n", rc);
+ return 0;
}
- return rc;
+ dev_dbg(&vm->vdev->dev, "offlining and removing memory failed: %d\n", rc);
+ /*
+ * We don't really expect this to fail, because we fake-offlined all
+ * memory already. But it could fail in corner cases.
+ */
+ WARN_ON_ONCE(rc != -ENOMEM && rc != -EBUSY);
+ return rc == -ENOMEM ? -ENOMEM : -EBUSY;
}
/*
@@ -767,6 +773,34 @@ static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm,
}
/*
+ * Try (offlining and) removing memory from Linux in case all subblocks are
+ * unplugged. Can be called on online and offline memory blocks.
+ *
+ * May modify the state of memory blocks in virtio-mem.
+ */
+static int virtio_mem_sbm_try_remove_unplugged_mb(struct virtio_mem *vm,
+ unsigned long mb_id)
+{
+ int rc;
+
+ /*
+ * Once all subblocks of a memory block were unplugged, offline and
+ * remove it.
+ */
+ if (!virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
+ return 0;
+
+ /* offline_and_remove_memory() works for online and offline memory. */
+ mutex_unlock(&vm->hotplug_mutex);
+ rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
+ mutex_lock(&vm->hotplug_mutex);
+ if (!rc)
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_UNUSED);
+ return rc;
+}
+
+/*
* See virtio_mem_offline_and_remove_memory(): Try to offline and remove a
* all Linux memory blocks covered by the big block.
*/
@@ -1155,7 +1189,8 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
* Try to allocate a range, marking pages fake-offline, effectively
* fake-offlining them.
*/
-static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
+static int virtio_mem_fake_offline(struct virtio_mem *vm, unsigned long pfn,
+ unsigned long nr_pages)
{
const bool is_movable = is_zone_movable_page(pfn_to_page(pfn));
int rc, retry_count;
@@ -1168,6 +1203,14 @@ static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
* some guarantees.
*/
for (retry_count = 0; retry_count < 5; retry_count++) {
+ /*
+ * If the config changed, stop immediately and go back to the
+ * main loop: avoid trying to keep unplugging if the device
+ * might have decided to not remove any more memory.
+ */
+ if (atomic_read(&vm->config_changed))
+ return -EAGAIN;
+
rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE,
GFP_KERNEL);
if (rc == -ENOMEM)
@@ -1917,7 +1960,7 @@ static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
sb_id * vm->sbm.sb_size);
- rc = virtio_mem_fake_offline(start_pfn, nr_pages);
+ rc = virtio_mem_fake_offline(vm, start_pfn, nr_pages);
if (rc)
return rc;
@@ -1989,20 +2032,10 @@ static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm,
}
unplugged:
- /*
- * Once all subblocks of a memory block were unplugged, offline and
- * remove it. This will usually not fail, as no memory is in use
- * anymore - however some other notifiers might NACK the request.
- */
- if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
- mutex_unlock(&vm->hotplug_mutex);
- rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
- mutex_lock(&vm->hotplug_mutex);
- if (!rc)
- virtio_mem_sbm_set_mb_state(vm, mb_id,
- VIRTIO_MEM_SBM_MB_UNUSED);
- }
-
+ rc = virtio_mem_sbm_try_remove_unplugged_mb(vm, mb_id);
+ if (rc)
+ vm->sbm.have_unplugged_mb = 1;
+ /* Ignore errors, this is not critical. We'll retry later. */
return 0;
}
@@ -2111,38 +2144,32 @@ static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
VIRTIO_MEM_BBM_BB_ADDED))
return -EINVAL;
- if (bbm_safe_unplug) {
- /*
- * Start by fake-offlining all memory. Once we marked the device
- * block as fake-offline, all newly onlined memory will
- * automatically be kept fake-offline. Protect from concurrent
- * onlining/offlining until we have a consistent state.
- */
- mutex_lock(&vm->hotplug_mutex);
- virtio_mem_bbm_set_bb_state(vm, bb_id,
- VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);
+ /*
+ * Start by fake-offlining all memory. Once we marked the device
+ * block as fake-offline, all newly onlined memory will
+ * automatically be kept fake-offline. Protect from concurrent
+ * onlining/offlining until we have a consistent state.
+ */
+ mutex_lock(&vm->hotplug_mutex);
+ virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);
- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- page = pfn_to_online_page(pfn);
- if (!page)
- continue;
+ for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ page = pfn_to_online_page(pfn);
+ if (!page)
+ continue;
- rc = virtio_mem_fake_offline(pfn, PAGES_PER_SECTION);
- if (rc) {
- end_pfn = pfn;
- goto rollback_safe_unplug;
- }
+ rc = virtio_mem_fake_offline(vm, pfn, PAGES_PER_SECTION);
+ if (rc) {
+ end_pfn = pfn;
+ goto rollback;
}
- mutex_unlock(&vm->hotplug_mutex);
}
+ mutex_unlock(&vm->hotplug_mutex);
rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id);
if (rc) {
- if (bbm_safe_unplug) {
- mutex_lock(&vm->hotplug_mutex);
- goto rollback_safe_unplug;
- }
- return rc;
+ mutex_lock(&vm->hotplug_mutex);
+ goto rollback;
}
rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
@@ -2154,7 +2181,7 @@ static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
VIRTIO_MEM_BBM_BB_UNUSED);
return rc;
-rollback_safe_unplug:
+rollback:
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
page = pfn_to_online_page(pfn);
if (!page)
@@ -2260,12 +2287,13 @@ static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
/*
* Try to unplug all blocks that couldn't be unplugged before, for example,
- * because the hypervisor was busy.
+ * because the hypervisor was busy. Further, offline and remove any memory
+ * blocks where we previously failed.
*/
-static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
+static int virtio_mem_cleanup_pending_mb(struct virtio_mem *vm)
{
unsigned long id;
- int rc;
+ int rc = 0;
if (!vm->in_sbm) {
virtio_mem_bbm_for_each_bb(vm, id,
@@ -2287,6 +2315,27 @@ static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
VIRTIO_MEM_SBM_MB_UNUSED);
}
+ if (!vm->sbm.have_unplugged_mb)
+ return 0;
+
+ /*
+ * Let's retry (offlining and) removing completely unplugged Linux
+ * memory blocks.
+ */
+ vm->sbm.have_unplugged_mb = false;
+
+ mutex_lock(&vm->hotplug_mutex);
+ virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL)
+ rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
+ virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL)
+ rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
+ virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL)
+ rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
+ mutex_unlock(&vm->hotplug_mutex);
+
+ if (rc)
+ vm->sbm.have_unplugged_mb = true;
+ /* Ignore errors, this is not critical. We'll retry later. */
return 0;
}
@@ -2368,9 +2417,9 @@ retry:
virtio_mem_refresh_config(vm);
}
- /* Unplug any leftovers from previous runs */
+ /* Cleanup any leftovers from previous runs */
if (!rc)
- rc = virtio_mem_unplug_pending_mb(vm);
+ rc = virtio_mem_cleanup_pending_mb(vm);
if (!rc && vm->requested_size != vm->plugged_size) {
if (vm->requested_size > vm->plugged_size) {
@@ -2382,6 +2431,13 @@ retry:
}
}
+ /*
+ * Keep retrying to offline and remove completely unplugged Linux
+ * memory blocks.
+ */
+ if (!rc && vm->in_sbm && vm->sbm.have_unplugged_mb)
+ rc = -EBUSY;
+
switch (rc) {
case 0:
vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index a46a4a29e929..97760f611295 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -607,9 +607,8 @@ static void virtio_mmio_release_dev(struct device *_d)
struct virtio_device *vdev =
container_of(_d, struct virtio_device, dev);
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
- struct platform_device *pdev = vm_dev->pdev;
- devm_kfree(&pdev->dev, vm_dev);
+ kfree(vm_dev);
}
/* Platform device */
@@ -620,7 +619,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
unsigned long magic;
int rc;
- vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
+ vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
if (!vm_dev)
return -ENOMEM;
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index a6c86f916dbd..c2524a7207cf 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -557,8 +557,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
- vp_dev->is_legacy = vp_dev->ldev.ioaddr ? true : false;
-
rc = register_virtio_device(&vp_dev->vdev);
reg_dev = vp_dev;
if (rc)
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 2257f1b3d8ae..d9cbb02b35a1 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -223,6 +223,7 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
vp_dev->config_vector = vp_config_vector;
vp_dev->setup_vq = setup_vq;
vp_dev->del_vq = del_vq;
+ vp_dev->is_legacy = true;
return 0;
}
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 989e2d7184ce..961161da5900 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -393,11 +393,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
cb.callback = virtio_vdpa_config_cb;
cb.private = vd_dev;
ops->set_config_cb(vdpa, &cb);
+ kfree(masks);
return 0;
err_setup_vq:
virtio_vdpa_del_vqs(vdev);
+ kfree(masks);
return err;
}